mirror of
https://github.com/didi/KnowStreaming.git
synced 2025-12-24 20:22:12 +08:00
Compare commits
1320 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e2692a6fc4 | ||
|
|
c18eeb6d55 | ||
|
|
6853862753 | ||
|
|
610af4a9e8 | ||
|
|
ac4ea13be9 | ||
|
|
b6ea4aec19 | ||
|
|
8346453aa3 | ||
|
|
a9eb4ae30e | ||
|
|
cceff91f81 | ||
|
|
009ffeb099 | ||
|
|
e8e05812d0 | ||
|
|
58a421c4b9 | ||
|
|
af916d5a71 | ||
|
|
8b30f78744 | ||
|
|
592dee884a | ||
|
|
715744ca15 | ||
|
|
8a95401364 | ||
|
|
e80f8086d4 | ||
|
|
af82c2e615 | ||
|
|
1369e7b9eb | ||
|
|
ab6afe6dbc | ||
|
|
e24a582067 | ||
|
|
65f8beef32 | ||
|
|
38366809f1 | ||
|
|
530219a317 | ||
|
|
c07e544c50 | ||
|
|
c9308ee4f2 | ||
|
|
95158813b9 | ||
|
|
59e8a416b5 | ||
|
|
f6becbdf2c | ||
|
|
07bd00d60c | ||
|
|
1adfa639ac | ||
|
|
3f817991aa | ||
|
|
3b72f732be | ||
|
|
e2ad3afe3d | ||
|
|
ae04ffdd71 | ||
|
|
cf9d5b6832 | ||
|
|
9c418d3b38 | ||
|
|
128b180c83 | ||
|
|
b60941abc8 | ||
|
|
1a42472fd8 | ||
|
|
18e00f043e | ||
|
|
6385889902 | ||
|
|
ea0c744677 | ||
|
|
d1417bef8c | ||
|
|
a7309612d5 | ||
|
|
6e56688a31 | ||
|
|
a6abfb3ea8 | ||
|
|
ca696dd6e1 | ||
|
|
db40a5cd0a | ||
|
|
55161e439a | ||
|
|
bdffc10ca6 | ||
|
|
b1892c21e2 | ||
|
|
90e5492060 | ||
|
|
42195c3180 | ||
|
|
94b1e508fd | ||
|
|
dd3dcd37e9 | ||
|
|
0a6e9b7633 | ||
|
|
470e471cad | ||
|
|
bd58b48bcb | ||
|
|
0cd071c5c6 | ||
|
|
abaadfb9a8 | ||
|
|
49e7fea6d3 | ||
|
|
d68a19679e | ||
|
|
75be94fbea | ||
|
|
c11aa4fd17 | ||
|
|
cb96fef1a5 | ||
|
|
e98cfbcf91 | ||
|
|
0140b2e898 | ||
|
|
b3b7ab9f6b | ||
|
|
b34edb9b64 | ||
|
|
c2bc0f788d | ||
|
|
3f518c9e63 | ||
|
|
7f7801a5f7 | ||
|
|
e1e02f7c2a | ||
|
|
c497e4cb2d | ||
|
|
e34e3f3e3d | ||
|
|
b3fd494398 | ||
|
|
ffc115cb76 | ||
|
|
7bfe787e39 | ||
|
|
2256e8bbdb | ||
|
|
e975932d41 | ||
|
|
db044caf8b | ||
|
|
82fbea4e5f | ||
|
|
6aaa4b34b8 | ||
|
|
3cb1f03668 | ||
|
|
e61c446410 | ||
|
|
9d0345c9cd | ||
|
|
62f870a342 | ||
|
|
13641c00ba | ||
|
|
769c2c0fbc | ||
|
|
c71865f623 | ||
|
|
258385dc9a | ||
|
|
65238231f0 | ||
|
|
cb22e02fbe | ||
|
|
aa0bec1206 | ||
|
|
793c780015 | ||
|
|
ec6f063450 | ||
|
|
f25c65b98b | ||
|
|
2d99aae779 | ||
|
|
a8847dc282 | ||
|
|
4852c01c88 | ||
|
|
3d6f405b69 | ||
|
|
18e3fbf41d | ||
|
|
ae8cc3092b | ||
|
|
5c26e8947b | ||
|
|
fbe6945d3b | ||
|
|
7dc8f2dc48 | ||
|
|
91c60ce72c | ||
|
|
687eea80c8 | ||
|
|
9bfe3fd1db | ||
|
|
03f81bc6de | ||
|
|
eed9571ffa | ||
|
|
e4651ef749 | ||
|
|
f715cf7a8d | ||
|
|
fad9ddb9a1 | ||
|
|
b6e4f50849 | ||
|
|
5c6911e398 | ||
|
|
a0371ab88b | ||
|
|
fa2abadc25 | ||
|
|
f03460f3cd | ||
|
|
b5683b73c2 | ||
|
|
c062586c7e | ||
|
|
98a5c7b776 | ||
|
|
e204023b1f | ||
|
|
4c5ffccc45 | ||
|
|
fbcf58e19c | ||
|
|
e5c6d00438 | ||
|
|
ab6a4d7099 | ||
|
|
78b2b8a45e | ||
|
|
add2af4f3f | ||
|
|
235c0ed30e | ||
|
|
5bd93aa478 | ||
|
|
f95be2c1b3 | ||
|
|
5110b30f62 | ||
|
|
861faa5df5 | ||
|
|
efdf624c67 | ||
|
|
caccf9cef5 | ||
|
|
6ba3dceb84 | ||
|
|
9b7c41e804 | ||
|
|
346aee8fe7 | ||
|
|
353d781bca | ||
|
|
3ce4bf231a | ||
|
|
d046cb8bf4 | ||
|
|
da95c63503 | ||
|
|
915e48de22 | ||
|
|
256f770971 | ||
|
|
16e251cbe8 | ||
|
|
67743b859a | ||
|
|
c275b42632 | ||
|
|
a02760417b | ||
|
|
0e50bfc5d4 | ||
|
|
eab988e18f | ||
|
|
dd6004b9d4 | ||
|
|
ac7c32acd5 | ||
|
|
f4a219ceef | ||
|
|
a8b56fb613 | ||
|
|
2925a20e8e | ||
|
|
6b3eb05735 | ||
|
|
17e0c39f83 | ||
|
|
4994639111 | ||
|
|
c187b5246f | ||
|
|
6ed6d5ec8a | ||
|
|
0735b332a8 | ||
|
|
344cec19fe | ||
|
|
6ef365e201 | ||
|
|
edfa6a9f71 | ||
|
|
860d0b92e2 | ||
|
|
5bceed7105 | ||
|
|
44a2fe0398 | ||
|
|
218459ad1b | ||
|
|
7db757bc12 | ||
|
|
896a943587 | ||
|
|
cd2c388e68 | ||
|
|
4543a339b7 | ||
|
|
1c4fbef9f2 | ||
|
|
b2f0f69365 | ||
|
|
c4fb18a73c | ||
|
|
5cad7b4106 | ||
|
|
f3c4133cd2 | ||
|
|
d9c59cb3d3 | ||
|
|
7a0db7161b | ||
|
|
6aefc16fa0 | ||
|
|
186dcd07e0 | ||
|
|
e8652d5db5 | ||
|
|
fb5964af84 | ||
|
|
249fe7c700 | ||
|
|
cc2a590b33 | ||
|
|
5b3f3e5575 | ||
|
|
36cf285397 | ||
|
|
4386563c2c | ||
|
|
0123ce4a5a | ||
|
|
c3d47d3093 | ||
|
|
9735c4f885 | ||
|
|
3a3141a361 | ||
|
|
ac30436324 | ||
|
|
7176e418f5 | ||
|
|
ca794f507e | ||
|
|
0f8be4fadc | ||
|
|
7066246e8f | ||
|
|
7d1bb48b59 | ||
|
|
dd0d519677 | ||
|
|
4293d05fca | ||
|
|
2c82baf9fc | ||
|
|
921161d6d0 | ||
|
|
e632c6c13f | ||
|
|
5833a8644c | ||
|
|
fab41e892f | ||
|
|
7a52cf67b0 | ||
|
|
175b8d643a | ||
|
|
6241eb052a | ||
|
|
c2fd0a8410 | ||
|
|
5127b600ec | ||
|
|
feb03aede6 | ||
|
|
47b6c5d86a | ||
|
|
c4a81613f4 | ||
|
|
daeb5c4cec | ||
|
|
38def45ad6 | ||
|
|
4b29a2fdfd | ||
|
|
a165ecaeef | ||
|
|
6637ba4ccc | ||
|
|
2f807eec2b | ||
|
|
636c2c6a83 | ||
|
|
898a55c703 | ||
|
|
8ffe7e7101 | ||
|
|
7661826ea5 | ||
|
|
e456be91ef | ||
|
|
da0a97cabf | ||
|
|
c1031a492a | ||
|
|
3c8aaf528c | ||
|
|
70ff20a2b0 | ||
|
|
6918f4babe | ||
|
|
805a704d34 | ||
|
|
c69c289bc4 | ||
|
|
dd5869e246 | ||
|
|
b51ffb81a3 | ||
|
|
ed0efd6bd2 | ||
|
|
39d2fe6195 | ||
|
|
7471d05c20 | ||
|
|
3492688733 | ||
|
|
a603783615 | ||
|
|
5c9096d564 | ||
|
|
c27786a257 | ||
|
|
81910d1958 | ||
|
|
55d5fc4bde | ||
|
|
f30586b150 | ||
|
|
37037c19f0 | ||
|
|
1a5e2c7309 | ||
|
|
941dd4fd65 | ||
|
|
5f6df3681c | ||
|
|
7d045dbf05 | ||
|
|
4ff4accdc3 | ||
|
|
bbe967c4a8 | ||
|
|
b101cec6fa | ||
|
|
e98ec562a2 | ||
|
|
0e71ecc587 | ||
|
|
0f11a65df8 | ||
|
|
da00c8c877 | ||
|
|
8b177877bb | ||
|
|
ea199dca8d | ||
|
|
88b5833f77 | ||
|
|
127b5be651 | ||
|
|
80f001cdd5 | ||
|
|
30d297cae1 | ||
|
|
a96853db90 | ||
|
|
c1502152c0 | ||
|
|
afda292796 | ||
|
|
163cab78ae | ||
|
|
8f4ff36c09 | ||
|
|
47b6b3577a | ||
|
|
f3eca3b214 | ||
|
|
62f7d3f72f | ||
|
|
26e60d8a64 | ||
|
|
df655a250c | ||
|
|
811fc9b400 | ||
|
|
83df02783c | ||
|
|
6a5efce874 | ||
|
|
fa0ae5e474 | ||
|
|
cafd665a2d | ||
|
|
e8f77a456b | ||
|
|
4510c62ebd | ||
|
|
79864955e1 | ||
|
|
ff26a8d46c | ||
|
|
cc226d552e | ||
|
|
962f89475b | ||
|
|
ec204a1605 | ||
|
|
58d7623938 | ||
|
|
8f4ecfcdc0 | ||
|
|
ef719cedbc | ||
|
|
b7856c892b | ||
|
|
7435a78883 | ||
|
|
f49206b316 | ||
|
|
7d500a0721 | ||
|
|
98a519f20b | ||
|
|
39b655bb43 | ||
|
|
78d56a49fe | ||
|
|
d2e9d1fa01 | ||
|
|
41ff914dc3 | ||
|
|
3ba447fac2 | ||
|
|
e9cc380a2e | ||
|
|
017cac9bbe | ||
|
|
9ad72694af | ||
|
|
e8f9821870 | ||
|
|
bb167b9f8d | ||
|
|
28fbb5e130 | ||
|
|
16101e81e8 | ||
|
|
aced504d2a | ||
|
|
abb064d9d1 | ||
|
|
dc1899a1cd | ||
|
|
442f34278c | ||
|
|
a6dcbcd35b | ||
|
|
2b600e96eb | ||
|
|
177bb80f31 | ||
|
|
63fbe728c4 | ||
|
|
b33020840b | ||
|
|
c5caf7c0d6 | ||
|
|
0f0473db4c | ||
|
|
beadde3e06 | ||
|
|
a423a20480 | ||
|
|
79f0a23813 | ||
|
|
780fdea2cc | ||
|
|
1c0fda1adf | ||
|
|
9cf13e9b30 | ||
|
|
87cd058fd8 | ||
|
|
81b1ec48c2 | ||
|
|
66dd82f4fd | ||
|
|
ce35b23911 | ||
|
|
e79342acf5 | ||
|
|
3fc9f39d24 | ||
|
|
0221fb3a4a | ||
|
|
f009f8b7ba | ||
|
|
b76959431a | ||
|
|
975370b593 | ||
|
|
7275030971 | ||
|
|
99b0be5a95 | ||
|
|
edd3f95fc4 | ||
|
|
479f983b09 | ||
|
|
7650332252 | ||
|
|
8f1a021851 | ||
|
|
ce4df4d5fd | ||
|
|
bd43ae1b5d | ||
|
|
8fa34116b9 | ||
|
|
7e92553017 | ||
|
|
b7e243a693 | ||
|
|
35d4888afb | ||
|
|
b3e8a4f0f6 | ||
|
|
321125caee | ||
|
|
e01427aa4f | ||
|
|
14652e7f7a | ||
|
|
7c05899dbd | ||
|
|
56726b703f | ||
|
|
6237b0182f | ||
|
|
be5b662f65 | ||
|
|
224698355c | ||
|
|
8f47138ecd | ||
|
|
d159746391 | ||
|
|
63df93ea5e | ||
|
|
38948c0daa | ||
|
|
6c610427b6 | ||
|
|
b4cc31c459 | ||
|
|
7d781712c9 | ||
|
|
dd61ce9b2a | ||
|
|
69a7212986 | ||
|
|
ff05a951fd | ||
|
|
89d5357b40 | ||
|
|
7ca3d65c42 | ||
|
|
7b5c2d800f | ||
|
|
f414b47a78 | ||
|
|
44f4e2f0f9 | ||
|
|
2361008bdf | ||
|
|
7377ef3ec5 | ||
|
|
a28d064b7a | ||
|
|
e2e57e8575 | ||
|
|
9d90bd2835 | ||
|
|
7445e68df4 | ||
|
|
ab42625ad2 | ||
|
|
18789a0a53 | ||
|
|
68a37bb56a | ||
|
|
3b33652c47 | ||
|
|
1e0c4c3904 | ||
|
|
04e223de16 | ||
|
|
c4a691aa8a | ||
|
|
ff9dde163a | ||
|
|
eb7efbd1a5 | ||
|
|
8c8c362c54 | ||
|
|
66e119ad5d | ||
|
|
6dedc04a05 | ||
|
|
0cf8bad0df | ||
|
|
95c9582d8b | ||
|
|
7815126ff5 | ||
|
|
a5fa9de54b | ||
|
|
95f1a2c630 | ||
|
|
1e256ae1fd | ||
|
|
9fc9c54fa1 | ||
|
|
1b362b1e02 | ||
|
|
04e3172cca | ||
|
|
1caab7f3f7 | ||
|
|
9d33c725ad | ||
|
|
6ed1d38106 | ||
|
|
0f07ddedaf | ||
|
|
289945b471 | ||
|
|
f331a6d144 | ||
|
|
0c8c12a651 | ||
|
|
028c3bb2fa | ||
|
|
d7a5a0d405 | ||
|
|
5ef5f6e531 | ||
|
|
1d205734b3 | ||
|
|
5edd43884f | ||
|
|
c1992373bc | ||
|
|
ed562f9c8a | ||
|
|
b4d44ef8c7 | ||
|
|
ad0c16a1b4 | ||
|
|
7eabe66853 | ||
|
|
3983d73695 | ||
|
|
161d4c4562 | ||
|
|
9a1e89564e | ||
|
|
0c18c5b4f6 | ||
|
|
3e12ba34f7 | ||
|
|
e71e29391b | ||
|
|
9b7b9a7af0 | ||
|
|
a23819c308 | ||
|
|
6cb1825d96 | ||
|
|
77b8c758dc | ||
|
|
e5a582cfad | ||
|
|
ec83db267e | ||
|
|
bfd026cae7 | ||
|
|
35f1dd8082 | ||
|
|
7ed0e7dd23 | ||
|
|
1a3cbf7a9d | ||
|
|
d9e4abc3de | ||
|
|
a4186085d3 | ||
|
|
26b1846bb4 | ||
|
|
1aa89527a6 | ||
|
|
eac76d7ad0 | ||
|
|
cea0cd56f6 | ||
|
|
c4b897f282 | ||
|
|
47389dbabb | ||
|
|
a2f8b1a851 | ||
|
|
feac0a058f | ||
|
|
27eeac9fd4 | ||
|
|
a14db4b194 | ||
|
|
54ee271a47 | ||
|
|
a3a9be4f7f | ||
|
|
d4f0a832f3 | ||
|
|
7dc533372c | ||
|
|
1737d87713 | ||
|
|
dbb98dea11 | ||
|
|
802b382b36 | ||
|
|
fc82999d45 | ||
|
|
08aa000c07 | ||
|
|
39015b5100 | ||
|
|
0d635ad419 | ||
|
|
9133205915 | ||
|
|
725ac10c3d | ||
|
|
2b76358c8f | ||
|
|
833c360698 | ||
|
|
7da1e67b01 | ||
|
|
7eb86a47dd | ||
|
|
d67e383c28 | ||
|
|
8749d3e1f5 | ||
|
|
30fba21c48 | ||
|
|
d83d35aee9 | ||
|
|
1d3caeea7d | ||
|
|
c8806dbb4d | ||
|
|
e5802c7f50 | ||
|
|
590f684d66 | ||
|
|
8e5a67f565 | ||
|
|
8d2fbce11e | ||
|
|
26916f6632 | ||
|
|
fbfa0d2d2a | ||
|
|
e626b99090 | ||
|
|
203859b71b | ||
|
|
9a25c22f3a | ||
|
|
0a03f41a7c | ||
|
|
56191939c8 | ||
|
|
beb754aaaa | ||
|
|
f234f740ca | ||
|
|
e14679694c | ||
|
|
e06712397e | ||
|
|
b6c6df7ffc | ||
|
|
375c6f56c9 | ||
|
|
0bf85c97b5 | ||
|
|
630e582321 | ||
|
|
a89fe23bdd | ||
|
|
a7a5fa9a31 | ||
|
|
c73a7eee2f | ||
|
|
121f8468d5 | ||
|
|
7b0b6936e0 | ||
|
|
597ea04a96 | ||
|
|
f7f90aeaaa | ||
|
|
227479f695 | ||
|
|
6477fb3fe0 | ||
|
|
4223f4f3c4 | ||
|
|
7288874d72 | ||
|
|
68f76f2daf | ||
|
|
fe6ddebc49 | ||
|
|
12b5acd073 | ||
|
|
a6f1fe07b3 | ||
|
|
85e3f2a946 | ||
|
|
d4f416de14 | ||
|
|
0d9a6702c1 | ||
|
|
d11285cdbf | ||
|
|
5f1f33d2b9 | ||
|
|
474daf752d | ||
|
|
27d1b92690 | ||
|
|
993afa4c19 | ||
|
|
028d891c32 | ||
|
|
0df55ec22d | ||
|
|
579f64774d | ||
|
|
792f8d939d | ||
|
|
e4fb02fcda | ||
|
|
0c14c641d0 | ||
|
|
dba671fd1e | ||
|
|
80d1693722 | ||
|
|
26014a11b2 | ||
|
|
848fddd55a | ||
|
|
97f5f05f1a | ||
|
|
25b82810f2 | ||
|
|
9b1e506fa7 | ||
|
|
7a42996e97 | ||
|
|
dbfcebcf67 | ||
|
|
37c3f69a28 | ||
|
|
5d412890b4 | ||
|
|
1e318a4c40 | ||
|
|
d4549176ec | ||
|
|
61efdf492f | ||
|
|
67ea4d44c8 | ||
|
|
fdae05a4aa | ||
|
|
5efb837ee8 | ||
|
|
584b626d93 | ||
|
|
de25a4ed8e | ||
|
|
2e852e5ca6 | ||
|
|
b11000715a | ||
|
|
b3f8b46f0f | ||
|
|
8d22a0664a | ||
|
|
20756a3453 | ||
|
|
c9b4d45a64 | ||
|
|
83f7f5468b | ||
|
|
59c042ad67 | ||
|
|
d550fc5068 | ||
|
|
6effba69a0 | ||
|
|
9b46956259 | ||
|
|
b5a4a732da | ||
|
|
487862367e | ||
|
|
5b63b9ce67 | ||
|
|
afbcd3e1df | ||
|
|
12b82c1395 | ||
|
|
863b765e0d | ||
|
|
731429c51c | ||
|
|
66f3bc61fe | ||
|
|
4efe35dd51 | ||
|
|
c92461ef93 | ||
|
|
405e6e0c1d | ||
|
|
0d227aef49 | ||
|
|
0e49002f42 | ||
|
|
2e016800e0 | ||
|
|
09f317b991 | ||
|
|
5a48cb1547 | ||
|
|
f632febf33 | ||
|
|
3c53467943 | ||
|
|
d358c0f4f7 | ||
|
|
de977a5b32 | ||
|
|
703d685d59 | ||
|
|
31a5f17408 | ||
|
|
c40ae3c455 | ||
|
|
b71a34279e | ||
|
|
8f8c0c4eda | ||
|
|
3a384f0e34 | ||
|
|
cf7bc11cbd | ||
|
|
be60ae8399 | ||
|
|
8e50d145d5 | ||
|
|
7a3d15525c | ||
|
|
64f32d8b24 | ||
|
|
949d6ba605 | ||
|
|
ceb8db09f4 | ||
|
|
ed05a0ebb8 | ||
|
|
a7cbb76655 | ||
|
|
93cbfa0b1f | ||
|
|
6120613a98 | ||
|
|
dbd00db159 | ||
|
|
befde952f5 | ||
|
|
1aa759e5be | ||
|
|
2de27719c1 | ||
|
|
21db57b537 | ||
|
|
dfe8d09477 | ||
|
|
90dfa22c64 | ||
|
|
0f35427645 | ||
|
|
7909f60ff8 | ||
|
|
9a1a8a4c30 | ||
|
|
fa7ad64140 | ||
|
|
8a0c23339d | ||
|
|
e7ab3aff16 | ||
|
|
d0948797b9 | ||
|
|
04a5e17451 | ||
|
|
47065c8042 | ||
|
|
488c778736 | ||
|
|
d10a7bcc75 | ||
|
|
afe44a2537 | ||
|
|
9eadafe850 | ||
|
|
dab3eefcc0 | ||
|
|
2b9a6b28d8 | ||
|
|
465f98ca2b | ||
|
|
a0312be4fd | ||
|
|
4a5161372b | ||
|
|
4c9921f752 | ||
|
|
6dd72d40ee | ||
|
|
db49c234bb | ||
|
|
4a9df0c4d9 | ||
|
|
461573c2ba | ||
|
|
291992753f | ||
|
|
fcefe7ac38 | ||
|
|
7da712fcff | ||
|
|
2fd8687624 | ||
|
|
639b1f8336 | ||
|
|
ab3b83e42a | ||
|
|
4818629c40 | ||
|
|
61784c860a | ||
|
|
d5667254f2 | ||
|
|
af2b93983f | ||
|
|
8281301cbd | ||
|
|
0043ab8371 | ||
|
|
500eaace82 | ||
|
|
28e8540c78 | ||
|
|
69adf682e2 | ||
|
|
69cd1ff6e1 | ||
|
|
415d67cc32 | ||
|
|
46a2fec79b | ||
|
|
560b322fca | ||
|
|
effe17ac85 | ||
|
|
7699acfc1b | ||
|
|
6e058240b3 | ||
|
|
f005c6bc44 | ||
|
|
7be462599f | ||
|
|
271ab432d9 | ||
|
|
4114777a4e | ||
|
|
9189a54442 | ||
|
|
b95ee762e3 | ||
|
|
9e3c4dc06b | ||
|
|
1891a3ac86 | ||
|
|
9ecdcac06d | ||
|
|
790cb6a2e1 | ||
|
|
4a98e5f025 | ||
|
|
507abc1d84 | ||
|
|
9b732fbbad | ||
|
|
220f1c6fc3 | ||
|
|
7a950c67b6 | ||
|
|
78f625dc8c | ||
|
|
211d26a3ed | ||
|
|
dce2bc6326 | ||
|
|
90e5d7f6f0 | ||
|
|
71d4e0f9e6 | ||
|
|
580b4534e0 | ||
|
|
fc835e09c6 | ||
|
|
c6e782a637 | ||
|
|
1ddfbfc833 | ||
|
|
dbf637fe0f | ||
|
|
110e129622 | ||
|
|
677e9d1b54 | ||
|
|
ad2adb905e | ||
|
|
5e9de7ac14 | ||
|
|
c63fb8380c | ||
|
|
2d39acc224 | ||
|
|
e68358e05f | ||
|
|
a96f10edf0 | ||
|
|
f03d94935b | ||
|
|
9c1320cd95 | ||
|
|
4f2ae588a5 | ||
|
|
eff51034b7 | ||
|
|
18832dc448 | ||
|
|
5262ae8907 | ||
|
|
7f251679fa | ||
|
|
5f5920b427 | ||
|
|
65a16d058a | ||
|
|
a73484d23a | ||
|
|
47887a20c6 | ||
|
|
9465c6f198 | ||
|
|
c09872c8c2 | ||
|
|
b0501cc80d | ||
|
|
f0792db6b3 | ||
|
|
e1514c901b | ||
|
|
e90c5003ae | ||
|
|
92a0d5d52c | ||
|
|
8912cb5323 | ||
|
|
d008c19149 | ||
|
|
e844b6444a | ||
|
|
02606cdce2 | ||
|
|
0081720f0e | ||
|
|
cca1e92868 | ||
|
|
69b774a074 | ||
|
|
5656b03fb4 | ||
|
|
02d0dcbb7f | ||
|
|
7b2e06df12 | ||
|
|
4259ae63d7 | ||
|
|
d7b11803bc | ||
|
|
fed298a6d4 | ||
|
|
51832385b1 | ||
|
|
462303fca0 | ||
|
|
4405703e42 | ||
|
|
23e398e121 | ||
|
|
b17bb89d04 | ||
|
|
5590cebf8f | ||
|
|
1fa043f09d | ||
|
|
3bd0af1451 | ||
|
|
1545962745 | ||
|
|
d032571681 | ||
|
|
33fb0acc7e | ||
|
|
1ec68a91e2 | ||
|
|
a23c113a46 | ||
|
|
371ae2c0a5 | ||
|
|
8f8f6ffa27 | ||
|
|
475fe0d91f | ||
|
|
3d74e60d03 | ||
|
|
83ac83bb28 | ||
|
|
8478fb857c | ||
|
|
7074bdaa9f | ||
|
|
58164294cc | ||
|
|
7c0e9df156 | ||
|
|
bd62212ecb | ||
|
|
2292039b42 | ||
|
|
73f8da8d5a | ||
|
|
e51dbe0ca7 | ||
|
|
482a375e31 | ||
|
|
689c5ce455 | ||
|
|
734a020ecc | ||
|
|
44d537f78c | ||
|
|
b4c60eb910 | ||
|
|
e120b32375 | ||
|
|
de54966d30 | ||
|
|
39a6302c18 | ||
|
|
05ceeea4b0 | ||
|
|
9f8e3373a8 | ||
|
|
42521cbae4 | ||
|
|
b23c35197e | ||
|
|
70f28d9ac4 | ||
|
|
912d73d98a | ||
|
|
2a720fce6f | ||
|
|
e4534c359f | ||
|
|
b91bec15f2 | ||
|
|
67ad5cacb7 | ||
|
|
b4a739476a | ||
|
|
a7bf2085db | ||
|
|
c3802cf48b | ||
|
|
54711c4491 | ||
|
|
fcb52a69c0 | ||
|
|
1b632f9754 | ||
|
|
73d7a0ecdc | ||
|
|
08943593b3 | ||
|
|
c949a88f20 | ||
|
|
a49c11f655 | ||
|
|
a66aed4a88 | ||
|
|
0045c953a0 | ||
|
|
fdce41b451 | ||
|
|
4d5e4d0f00 | ||
|
|
82c9b6481e | ||
|
|
053d4dcb18 | ||
|
|
e1b2c442aa | ||
|
|
0ed8ba8ca4 | ||
|
|
f195847c68 | ||
|
|
5beb13b17e | ||
|
|
7d9ec05062 | ||
|
|
fc604a9eaf | ||
|
|
4f3c1ad9b6 | ||
|
|
6d45ed586c | ||
|
|
1afb633b4f | ||
|
|
34d9f9174b | ||
|
|
3b0c208eff | ||
|
|
05022f8db4 | ||
|
|
3336de457a | ||
|
|
10a27bc29c | ||
|
|
542e5d3c2d | ||
|
|
7372617b14 | ||
|
|
89735a130b | ||
|
|
859cf74bd6 | ||
|
|
e2744ab399 | ||
|
|
16bd065098 | ||
|
|
71c52e6dd7 | ||
|
|
a7f8c3ced3 | ||
|
|
f3f0432c65 | ||
|
|
426ba2d150 | ||
|
|
2790099efa | ||
|
|
f6ba8bc95e | ||
|
|
d6181522c0 | ||
|
|
04cf071ca6 | ||
|
|
e4371b5d02 | ||
|
|
52c52b2a0d | ||
|
|
8f40f10575 | ||
|
|
fe0f6fcd0b | ||
|
|
31b1ad8bb4 | ||
|
|
373680d854 | ||
|
|
9e3bc80495 | ||
|
|
89405fe003 | ||
|
|
b9ea3865a5 | ||
|
|
b5bd643814 | ||
|
|
52ccaeffd5 | ||
|
|
18136c12fd | ||
|
|
dec3f9e75e | ||
|
|
ccc0ee4d18 | ||
|
|
69e9708080 | ||
|
|
5944ba099a | ||
|
|
ada2718b5e | ||
|
|
1f87bd63e7 | ||
|
|
c0f3259cf6 | ||
|
|
e1d5749a40 | ||
|
|
a8d7eb27d9 | ||
|
|
1eecdf3829 | ||
|
|
be8b345889 | ||
|
|
074da389b3 | ||
|
|
4df2dc09fe | ||
|
|
e8d42ba074 | ||
|
|
c036483680 | ||
|
|
2818584db6 | ||
|
|
37585f760d | ||
|
|
f5477a03a1 | ||
|
|
50388425b2 | ||
|
|
725c59eab0 | ||
|
|
7bf1de29a4 | ||
|
|
d90c3fc7dd | ||
|
|
80785ce072 | ||
|
|
44ea896de8 | ||
|
|
d30cb8a0f0 | ||
|
|
6c7b333b34 | ||
|
|
6d34a00e77 | ||
|
|
1f353e10ce | ||
|
|
4e10f8d1c5 | ||
|
|
a22cd853fc | ||
|
|
354e0d6a87 | ||
|
|
dfabe28645 | ||
|
|
fce230da48 | ||
|
|
055ba9bda6 | ||
|
|
ec19c3b4dd | ||
|
|
37aa526404 | ||
|
|
86c1faa40f | ||
|
|
8dcf15d0f9 | ||
|
|
6835e1e680 | ||
|
|
d8f89b8f67 | ||
|
|
ec28eba781 | ||
|
|
5ef8fff5bc | ||
|
|
4f317b76fa | ||
|
|
61672637dc | ||
|
|
ecf6e8f664 | ||
|
|
4115975320 | ||
|
|
21904a8609 | ||
|
|
10b0a3dabb | ||
|
|
b2091e9aed | ||
|
|
f2cb5bd77c | ||
|
|
19c61c52e6 | ||
|
|
b327359183 | ||
|
|
9e9bb72e17 | ||
|
|
a23907e009 | ||
|
|
ad131f5a2c | ||
|
|
dbeae4ca68 | ||
|
|
0fb0e94848 | ||
|
|
95d2a82d35 | ||
|
|
5bc6eb6774 | ||
|
|
3ba81e9aaa | ||
|
|
329a9b59c1 | ||
|
|
39cccd568e | ||
|
|
19b7f6ad8c | ||
|
|
41c000cf47 | ||
|
|
1b8ea61e87 | ||
|
|
22c26e24b1 | ||
|
|
396045177c | ||
|
|
4538593236 | ||
|
|
8086ef355b | ||
|
|
60d038fe46 | ||
|
|
ff0f4463be | ||
|
|
820571d993 | ||
|
|
e311d3767c | ||
|
|
24d7b80244 | ||
|
|
61f99e4d2e | ||
|
|
d5348bcf49 | ||
|
|
5d31d66365 | ||
|
|
29778a0154 | ||
|
|
165c0a5866 | ||
|
|
588323961e | ||
|
|
fd1c0b71c5 | ||
|
|
54fbdcadf9 | ||
|
|
69a30d0cf0 | ||
|
|
b8f9b44f38 | ||
|
|
cbf17d4eb5 | ||
|
|
327e025262 | ||
|
|
6b1e944bba | ||
|
|
668ed4d61b | ||
|
|
312c0584ed | ||
|
|
110d3acb58 | ||
|
|
ddbc60283b | ||
|
|
471bcecfd6 | ||
|
|
0245791b13 | ||
|
|
4794396ce8 | ||
|
|
c7088779d6 | ||
|
|
672905da12 | ||
|
|
47172b13be | ||
|
|
3668a10af6 | ||
|
|
a4e294c03f | ||
|
|
3fd6f4003f | ||
|
|
3eaf5cd530 | ||
|
|
c344fd8ca4 | ||
|
|
09639ca294 | ||
|
|
a81b6dca83 | ||
|
|
b74aefb08f | ||
|
|
fffc0c3add | ||
|
|
757f90aa7a | ||
|
|
022f9eb551 | ||
|
|
6e7b82cfcb | ||
|
|
b5fb24b360 | ||
|
|
b77345222c | ||
|
|
793e81406e | ||
|
|
cef1ec95d2 | ||
|
|
7e1b3c552b | ||
|
|
69736a63b6 | ||
|
|
fb4a9f9056 | ||
|
|
387d89d3af | ||
|
|
65d9ca9d39 | ||
|
|
8c842af4ba | ||
|
|
4faf9262c9 | ||
|
|
be7724c67d | ||
|
|
48d26347f7 | ||
|
|
bdb01ec8b5 | ||
|
|
9047815799 | ||
|
|
05bd94a2cc | ||
|
|
c9f7da84d0 | ||
|
|
bcc124e86a | ||
|
|
48d2733403 | ||
|
|
31fc6e4e56 | ||
|
|
fcdeef0146 | ||
|
|
1cd524c0cc | ||
|
|
0f746917a7 | ||
|
|
a2228d0169 | ||
|
|
e8a679d34b | ||
|
|
1912a42091 | ||
|
|
ca81f96635 | ||
|
|
eb3b8c4b31 | ||
|
|
6740d6d60b | ||
|
|
c46c35b248 | ||
|
|
0b2dcec4bc | ||
|
|
f8e2a4aff4 | ||
|
|
7256db8c4e | ||
|
|
b14d5d9bee | ||
|
|
12e15c3e4b | ||
|
|
51911bf272 | ||
|
|
6dc8061401 | ||
|
|
b8fa4f8797 | ||
|
|
cc0bea7f45 | ||
|
|
4e9124b244 | ||
|
|
f0eabef7b0 | ||
|
|
23e5557958 | ||
|
|
b1d02afa85 | ||
|
|
2edc380f47 | ||
|
|
cea8295c09 | ||
|
|
244bfc993a | ||
|
|
3a272a4493 | ||
|
|
a3300db770 | ||
|
|
b0394ce261 | ||
|
|
3123089790 | ||
|
|
f13cf66676 | ||
|
|
0c8c4d87fb | ||
|
|
066088fdeb | ||
|
|
cf641e41c7 | ||
|
|
5b48322e1b | ||
|
|
9d3f680d58 | ||
|
|
bed28d57e6 | ||
|
|
2538525103 | ||
|
|
6ed798db8c | ||
|
|
8e9d966829 | ||
|
|
be16640f92 | ||
|
|
0e1376dd2e | ||
|
|
0494575aa7 | ||
|
|
bed57534e0 | ||
|
|
1862d631d1 | ||
|
|
c977ce5690 | ||
|
|
84df377516 | ||
|
|
4d9a284f6e | ||
|
|
da7ad8b44a | ||
|
|
4164046323 | ||
|
|
72e743dfd1 | ||
|
|
7eb7edaf0a | ||
|
|
49368aaf76 | ||
|
|
b8c07a966f | ||
|
|
c6bcc0e3aa | ||
|
|
7719339f23 | ||
|
|
8ad64722ed | ||
|
|
611f8b8865 | ||
|
|
38bdc173e8 | ||
|
|
52244325d9 | ||
|
|
3fd3d99b8c | ||
|
|
d4ee5e91a2 | ||
|
|
c2ad2d7238 | ||
|
|
892e195f0e | ||
|
|
c5b1bed7dc | ||
|
|
0e388d7aa7 | ||
|
|
c3a0dbbe48 | ||
|
|
8b95b3ffc7 | ||
|
|
42b78461cd | ||
|
|
9190a41ca5 | ||
|
|
28a7251319 | ||
|
|
20565866ef | ||
|
|
246f10aee5 | ||
|
|
960017280d | ||
|
|
7218aaf52e | ||
|
|
62050cc7b6 | ||
|
|
f88a14ac0a | ||
|
|
9286761c30 | ||
|
|
07c3273247 | ||
|
|
eb8fe77582 | ||
|
|
b68ba0bff6 | ||
|
|
696657c09e | ||
|
|
12bea9b60a | ||
|
|
9334e9552f | ||
|
|
a43b04a98b | ||
|
|
f359ff995d | ||
|
|
9185d2646b | ||
|
|
33e61c762c | ||
|
|
e342e646ff | ||
|
|
ed163a80e0 | ||
|
|
b390df08b5 | ||
|
|
f0b3b9f7f4 | ||
|
|
a67d732507 | ||
|
|
ca0ebe0d75 | ||
|
|
94d113cbe0 | ||
|
|
25c3aeaa5f | ||
|
|
736d5a00b7 | ||
|
|
f1627b214c | ||
|
|
d9265ec7ea | ||
|
|
663e871bed | ||
|
|
5c5eaddef7 | ||
|
|
edaec4f1ae | ||
|
|
6d19acaa6c | ||
|
|
d29a619fbf | ||
|
|
b17808dd91 | ||
|
|
c5321a3667 | ||
|
|
8836691510 | ||
|
|
6568f6525d | ||
|
|
473fc27b49 | ||
|
|
74aeb55acb | ||
|
|
8efcf0529f | ||
|
|
06071c2f9c | ||
|
|
5eb4eca487 | ||
|
|
33f6153e12 | ||
|
|
df3283f526 | ||
|
|
b5901a2819 | ||
|
|
6d5f1402fe | ||
|
|
65e3782b2e | ||
|
|
135981dd30 | ||
|
|
fe5cf2d922 | ||
|
|
e15425cc2e | ||
|
|
c3cb0a4e33 | ||
|
|
cc32976bdd | ||
|
|
bc08318716 | ||
|
|
ee1ab30c2c | ||
|
|
7fa1a66f7e | ||
|
|
946bf37406 | ||
|
|
8706f6931a | ||
|
|
f551674860 | ||
|
|
d90fe0ef07 | ||
|
|
bf979fa3b3 | ||
|
|
b3b88891e9 | ||
|
|
01c5de60dc | ||
|
|
47b8fe5022 | ||
|
|
324b37b875 | ||
|
|
76e7e192d8 | ||
|
|
f9f3c4d923 | ||
|
|
a476476bd1 | ||
|
|
82a60a884a | ||
|
|
f17727de18 | ||
|
|
f1f33c79f4 | ||
|
|
d52eaafdbb | ||
|
|
e7a3e50ed1 | ||
|
|
2e09a87baa | ||
|
|
b92ae7e47e | ||
|
|
f98446e139 | ||
|
|
57a48dadaa | ||
|
|
c65ec68e46 | ||
|
|
d6559be3fc | ||
|
|
6fbf67f9a9 | ||
|
|
59df5b24fe | ||
|
|
3e1544294b | ||
|
|
a12c398816 | ||
|
|
0bd3e28348 | ||
|
|
ad4e39c088 | ||
|
|
2668d96e6a | ||
|
|
357c496aad | ||
|
|
22a513ba22 | ||
|
|
e6dd1119be | ||
|
|
2dbe454e04 | ||
|
|
e3a59b76eb | ||
|
|
01008acfcd | ||
|
|
b67a162d3f | ||
|
|
8bfde9fbaf | ||
|
|
1fdecf8def | ||
|
|
1141d4b833 | ||
|
|
cdac92ca7b | ||
|
|
2a57c260cc | ||
|
|
f41e29ab3a | ||
|
|
8f10624073 | ||
|
|
eb1f8be11e | ||
|
|
3333501ab9 | ||
|
|
0f40820315 | ||
|
|
5f1a839620 | ||
|
|
b9bb1c775d | ||
|
|
1059b7376b | ||
|
|
f38ab4a9ce | ||
|
|
9e7450c012 | ||
|
|
99a3e360fe | ||
|
|
d45f8f78d6 | ||
|
|
648af61116 | ||
|
|
eebf1b89b1 | ||
|
|
f8094bb624 | ||
|
|
ed13e0d2c2 | ||
|
|
aa830589b4 | ||
|
|
999a2bd929 | ||
|
|
d69ee98450 | ||
|
|
f6712c24ad | ||
|
|
89d2772194 | ||
|
|
03352142b6 | ||
|
|
73a51e0c00 | ||
|
|
2e26f8caa6 | ||
|
|
f9bcce9e43 | ||
|
|
2ecc877ba8 | ||
|
|
3f8a3c69e3 | ||
|
|
67c37a0984 | ||
|
|
a58a55d00d | ||
|
|
06d51dd0b8 | ||
|
|
d5db028f57 | ||
|
|
fcb85ff4be | ||
|
|
3695b4363d | ||
|
|
cb11e6437c | ||
|
|
5127bd11ce | ||
|
|
91f90aefa1 | ||
|
|
0a067bce36 | ||
|
|
f0aba433bf | ||
|
|
f06467a0e3 | ||
|
|
68bcd3c710 | ||
|
|
a645733cc5 | ||
|
|
49fe5baf94 | ||
|
|
411ee55653 | ||
|
|
e351ce7411 | ||
|
|
f33e585a71 | ||
|
|
77f3096e0d | ||
|
|
9a5b18c4e6 | ||
|
|
0c7112869a | ||
|
|
f66a4d71ea | ||
|
|
9b0ab878df | ||
|
|
d30b90dfd0 | ||
|
|
efd28f8c27 | ||
|
|
e05e722387 | ||
|
|
748e81956d | ||
|
|
c9a41febce | ||
|
|
18e244b756 | ||
|
|
47676139a3 | ||
|
|
1ed933b7ad | ||
|
|
f6a343ccd6 | ||
|
|
dd6cdc22e5 | ||
|
|
f70f4348b3 | ||
|
|
ec7f801929 | ||
|
|
0f8aca382e | ||
|
|
0270f77eaa | ||
|
|
dcba71ada4 | ||
|
|
6080f76a9c | ||
|
|
e7349161f3 | ||
|
|
2e2907ea09 | ||
|
|
25e84b2a6c | ||
|
|
5efd424172 | ||
|
|
2672502c07 | ||
|
|
83440cc3d9 | ||
|
|
8e5f93be1c | ||
|
|
c1afc07955 | ||
|
|
4a83e14878 | ||
|
|
832320abc6 | ||
|
|
70c237da72 | ||
|
|
edfcc5c023 | ||
|
|
0668debec6 | ||
|
|
02d6463faa | ||
|
|
1fdb85234c | ||
|
|
44b7dd1808 | ||
|
|
e983ee3101 | ||
|
|
75e7e81c05 | ||
|
|
31ce3b9c08 | ||
|
|
ed93c50fef | ||
|
|
4845660eb5 | ||
|
|
c7919210a2 | ||
|
|
9491418f3b | ||
|
|
e8de403286 | ||
|
|
dfb625377b | ||
|
|
2c0f2a8be6 | ||
|
|
787d3cb3e9 | ||
|
|
96ca17d26c | ||
|
|
3dd0f7f2c3 | ||
|
|
10ba0cf976 | ||
|
|
276c15cc23 | ||
|
|
2584b848ad | ||
|
|
6471efed5f | ||
|
|
5b7d7ad65d | ||
|
|
712851a8a5 | ||
|
|
63d291cb47 | ||
|
|
f825c92111 | ||
|
|
419eb2ea41 | ||
|
|
89b58dd64e | ||
|
|
6bc5f81440 | ||
|
|
424f4b7b5e | ||
|
|
9271a1caac | ||
|
|
0ee4df03f9 | ||
|
|
8ac713ce32 | ||
|
|
76b2489fe9 | ||
|
|
6786095154 | ||
|
|
2c5793ef37 | ||
|
|
d483f25b96 | ||
|
|
7118368979 | ||
|
|
59256c2e80 | ||
|
|
1fb8a0db1e | ||
|
|
07d0c8e8fa | ||
|
|
98452ead17 | ||
|
|
d8c9f40377 | ||
|
|
8148d5eec6 | ||
|
|
4c429ad604 | ||
|
|
a9c52de8d5 | ||
|
|
f648aa1f91 | ||
|
|
eaba388bdd | ||
|
|
73e6afcbc6 | ||
|
|
8c3b72adf2 | ||
|
|
ae18ff4262 | ||
|
|
1adc8af543 | ||
|
|
7413df6f1e | ||
|
|
bda8559190 | ||
|
|
b74612fa41 | ||
|
|
22e0c20dcd | ||
|
|
08f92e1100 | ||
|
|
bb12ece46e | ||
|
|
0065438305 | ||
|
|
7f115c1b3e | ||
|
|
4e0114ab0d | ||
|
|
0ef64fa4bd | ||
|
|
84dbc17c22 | ||
|
|
16e16e356d | ||
|
|
978ee885c4 | ||
|
|
850d43df63 | ||
|
|
fc109fd1b1 | ||
|
|
9aefc55534 | ||
|
|
2829947b93 | ||
|
|
0c2af89a1c | ||
|
|
14c2dc9624 | ||
|
|
4f35d710a6 | ||
|
|
fdb5e018e5 | ||
|
|
6001fde25c | ||
|
|
ae63c0adaf | ||
|
|
ad1539c8f6 | ||
|
|
634a0c8cd0 | ||
|
|
773f9a0c63 | ||
|
|
e4e320e9e3 | ||
|
|
3b4b400e6b | ||
|
|
a950be2d95 | ||
|
|
ba6f5ab984 | ||
|
|
f3a5e3f5ed | ||
|
|
e685e621f3 | ||
|
|
2cd2be9b67 | ||
|
|
e73d9e8a03 | ||
|
|
476f74a604 | ||
|
|
ab0d1d99e6 | ||
|
|
d5680ffd5d | ||
|
|
3c091a88d4 | ||
|
|
49b70b33de | ||
|
|
c5ff2716fb | ||
|
|
400fdf0896 | ||
|
|
cbb8c7323c | ||
|
|
60e79f8f77 | ||
|
|
0e829d739a | ||
|
|
62abb274e0 | ||
|
|
e4028785de | ||
|
|
2bb44bcb76 | ||
|
|
684599f81b | ||
|
|
b56d28f5df | ||
|
|
02b9ac04c8 | ||
|
|
2fc283990a | ||
|
|
abb652ebd5 | ||
|
|
55786cb7f7 | ||
|
|
447a575f4f | ||
|
|
49280a8617 | ||
|
|
ff78a9cc35 | ||
|
|
3fea5c9c8c | ||
|
|
aea63cad52 | ||
|
|
800abe9920 | ||
|
|
dd6069e41a | ||
|
|
90d31aeff0 | ||
|
|
4d9a327b1f | ||
|
|
06a97ef076 | ||
|
|
76c2477387 | ||
|
|
bc4dac9cad | ||
|
|
36e3d6c18a | ||
|
|
edfd84a8e3 | ||
|
|
fb20cf6069 | ||
|
|
abbe47f6b9 | ||
|
|
f84d250134 | ||
|
|
3ffb4b8990 | ||
|
|
f70cfabede | ||
|
|
3a81783d77 | ||
|
|
237a4a90ff | ||
|
|
99c7dfc98d | ||
|
|
48aba34370 | ||
|
|
29cca36f2c | ||
|
|
0f5819f5c2 | ||
|
|
373772de2d | ||
|
|
7f5bbe8b5f | ||
|
|
daee57167b | ||
|
|
03467196b9 | ||
|
|
d3f3531cdb | ||
|
|
883b694592 | ||
|
|
6c89d66af9 | ||
|
|
fb0a76b418 | ||
|
|
64f77fca5b | ||
|
|
b1fca2c5be | ||
|
|
108d705f09 | ||
|
|
a77242e66c | ||
|
|
8b153113ff | ||
|
|
6d0ec37135 | ||
|
|
603dadff35 | ||
|
|
1a4ef3d9c1 | ||
|
|
788468054a | ||
|
|
bdb44c6dce | ||
|
|
251086f9e9 | ||
|
|
b22aa62046 | ||
|
|
c6e4b60424 |
51
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
51
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
name: 报告Bug
|
||||
about: 报告KnowStreaming的相关Bug
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
- [ ] 我已经在 [issues](https://github.com/didi/KnowStreaming/issues) 搜索过相关问题了,并没有重复的。
|
||||
|
||||
你是否希望来认领这个Bug。
|
||||
|
||||
「 Y / N 」
|
||||
|
||||
### 环境信息
|
||||
|
||||
* KnowStreaming version : <font size=4 color =red> xxx </font>
|
||||
* Operating System version : <font size=4 color =red> xxx </font>
|
||||
* Java version : <font size=4 color =red> xxx </font>
|
||||
|
||||
|
||||
### 重现该问题的步骤
|
||||
|
||||
1. xxx
|
||||
|
||||
|
||||
|
||||
2. xxx
|
||||
|
||||
|
||||
3. xxx
|
||||
|
||||
|
||||
|
||||
### 预期结果
|
||||
|
||||
<!-- 写下应该出现的预期结果?-->
|
||||
|
||||
### 实际结果
|
||||
|
||||
<!-- 实际发生了什么? -->
|
||||
|
||||
|
||||
---
|
||||
|
||||
如果有异常,请附上异常Trace:
|
||||
|
||||
```
|
||||
Just put your stack trace here!
|
||||
```
|
||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: 讨论问题
|
||||
url: https://github.com/didi/KnowStreaming/discussions/new
|
||||
about: 发起问题、讨论 等等
|
||||
- name: KnowStreaming官网
|
||||
url: https://knowstreaming.com/
|
||||
about: KnowStreaming website
|
||||
26
.github/ISSUE_TEMPLATE/detail_optimizing.md
vendored
Normal file
26
.github/ISSUE_TEMPLATE/detail_optimizing.md
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
name: 优化建议
|
||||
about: 相关功能优化建议
|
||||
title: ''
|
||||
labels: Optimization Suggestions
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
- [ ] 我已经在 [issues](https://github.com/didi/KnowStreaming/issues) 搜索过相关问题了,并没有重复的。
|
||||
|
||||
你是否希望来认领这个优化建议。
|
||||
|
||||
「 Y / N 」
|
||||
|
||||
### 环境信息
|
||||
|
||||
* KnowStreaming version : <font size=4 color =red> xxx </font>
|
||||
* Operating System version : <font size=4 color =red> xxx </font>
|
||||
* Java version : <font size=4 color =red> xxx </font>
|
||||
|
||||
### 需要优化的功能点
|
||||
|
||||
|
||||
### 建议如何优化
|
||||
|
||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: 提议新功能/需求
|
||||
about: 给KnowStreaming提一个功能需求
|
||||
title: ''
|
||||
labels: feature
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
- [ ] 我在 [issues](https://github.com/didi/KnowStreaming/issues) 中并未搜索到与此相关的功能需求。
|
||||
- [ ] 我在 [release note](https://github.com/didi/KnowStreaming/releases) 已经发布的版本中并没有搜到相关功能.
|
||||
|
||||
你是否希望来认领这个Feature。
|
||||
|
||||
「 Y / N 」
|
||||
|
||||
|
||||
## 这里描述需求
|
||||
<!--请尽可能的描述清楚您的需求 -->
|
||||
|
||||
12
.github/ISSUE_TEMPLATE/question.md
vendored
Normal file
12
.github/ISSUE_TEMPLATE/question.md
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
name: 提个问题
|
||||
about: 问KnowStreaming相关问题
|
||||
title: ''
|
||||
labels: question
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
- [ ] 我已经在 [issues](https://github.com/didi/KnowStreaming/issues) 搜索过相关问题了,并没有重复的。
|
||||
|
||||
## 在这里提出你的问题
|
||||
23
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
23
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
请不要在没有先创建Issue的情况下创建Pull Request。
|
||||
|
||||
## 变更的目的是什么
|
||||
|
||||
XXXXX
|
||||
|
||||
## 简短的更新日志
|
||||
|
||||
XX
|
||||
|
||||
## 验证这一变化
|
||||
|
||||
XXXX
|
||||
|
||||
请遵循此清单,以帮助我们快速轻松地整合您的贡献:
|
||||
|
||||
* [ ] 一个 PR(Pull Request的简写)只解决一个问题,禁止一个 PR 解决多个问题;
|
||||
* [ ] 确保 PR 有对应的 Issue(通常在您开始处理之前创建),除非是书写错误之类的琐碎更改不需要 Issue ;
|
||||
* [ ] 格式化 PR 及 Commit-Log 的标题及内容,例如 #861 。PS:Commit-Log 需要在 Git Commit 代码时进行填写,在 GitHub 上修改不了;
|
||||
* [ ] 编写足够详细的 PR 描述,以了解 PR 的作用、方式和原因;
|
||||
* [ ] 编写必要的单元测试来验证您的逻辑更正。如果提交了新功能或重大更改,请记住在 test 模块中添加 integration-test;
|
||||
* [ ] 确保编译通过,集成测试通过;
|
||||
|
||||
43
.github/workflows/ci_build.yml
vendored
Normal file
43
.github/workflows/ci_build.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: KnowStreaming Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up JDK 11
|
||||
uses: actions/setup-java@v3
|
||||
with:
|
||||
java-version: '11'
|
||||
distribution: 'temurin'
|
||||
cache: maven
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: '12.22.12'
|
||||
|
||||
- name: Build With Maven
|
||||
run: mvn -Prelease-package -Dmaven.test.skip=true clean install -U
|
||||
|
||||
- name: Get KnowStreaming Version
|
||||
if: ${{ success() }}
|
||||
run: |
|
||||
version=`mvn -Dexec.executable='echo' -Dexec.args='${project.version}' --non-recursive exec:exec -q`
|
||||
echo "VERSION=${version}" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload Binary Package
|
||||
if: ${{ success() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: KnowStreaming-${{ env.VERSION }}.tar.gz
|
||||
path: km-dist/target/KnowStreaming-${{ env.VERSION }}.tar.gz
|
||||
227
.gitignore
vendored
227
.gitignore
vendored
@@ -1,111 +1,116 @@
|
||||
### Intellij ###
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
|
||||
|
||||
*.iml
|
||||
|
||||
## Directory-based project format:
|
||||
.idea/
|
||||
# if you remove the above rule, at least ignore the following:
|
||||
|
||||
# User-specific stuff:
|
||||
# .idea/workspace.xml
|
||||
# .idea/tasks.xml
|
||||
# .idea/dictionaries
|
||||
# .idea/shelf
|
||||
|
||||
# Sensitive or high-churn files:
|
||||
.idea/dataSources.ids
|
||||
.idea/dataSources.xml
|
||||
.idea/sqlDataSources.xml
|
||||
.idea/dynamic.xml
|
||||
.idea/uiDesigner.xml
|
||||
|
||||
|
||||
# Mongo Explorer plugin:
|
||||
.idea/mongoSettings.xml
|
||||
|
||||
## File-based project format:
|
||||
*.ipr
|
||||
*.iws
|
||||
|
||||
## Plugin-specific files:
|
||||
|
||||
# IntelliJ
|
||||
/out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
||||
|
||||
|
||||
### Java ###
|
||||
*.class
|
||||
|
||||
# Mobile Tools for Java (J2ME)
|
||||
.mtj.tmp/
|
||||
|
||||
# Package Files #
|
||||
*.jar
|
||||
*.war
|
||||
*.ear
|
||||
|
||||
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
|
||||
hs_err_pid*
|
||||
|
||||
|
||||
### OSX ###
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
# Icon must end with two \r
|
||||
Icon
|
||||
|
||||
|
||||
# Thumbnails
|
||||
._*
|
||||
|
||||
# Files that might appear in the root of a volume
|
||||
.DocumentRevisions-V100
|
||||
.fseventsd
|
||||
.Spotlight-V100
|
||||
.TemporaryItems
|
||||
.Trashes
|
||||
.VolumeIcon.icns
|
||||
|
||||
# Directories potentially created on remote AFP share
|
||||
.AppleDB
|
||||
.AppleDesktop
|
||||
Network Trash Folder
|
||||
Temporary Items
|
||||
.apdisk
|
||||
|
||||
/target
|
||||
target/
|
||||
*.log
|
||||
*.log.*
|
||||
*.bak
|
||||
*.vscode
|
||||
*/.vscode/*
|
||||
*/.vscode
|
||||
*/velocity.log*
|
||||
*/*.log
|
||||
*/*.log.*
|
||||
web/node_modules/
|
||||
web/node_modules/*
|
||||
workspace.xml
|
||||
/output/*
|
||||
.gitversion
|
||||
*/node_modules/*
|
||||
web/src/main/resources/templates/*
|
||||
*/out/*
|
||||
*/dist/*
|
||||
.DS_Store
|
||||
### Intellij ###
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
|
||||
|
||||
*.iml
|
||||
|
||||
## Directory-based project format:
|
||||
.idea/
|
||||
# if you remove the above rule, at least ignore the following:
|
||||
|
||||
# User-specific stuff:
|
||||
# .idea/workspace.xml
|
||||
# .idea/tasks.xml
|
||||
# .idea/dictionaries
|
||||
# .idea/shelf
|
||||
|
||||
# Sensitive or high-churn files:
|
||||
.idea/dataSources.ids
|
||||
.idea/dataSources.xml
|
||||
.idea/sqlDataSources.xml
|
||||
.idea/dynamic.xml
|
||||
.idea/uiDesigner.xml
|
||||
|
||||
|
||||
# Mongo Explorer plugin:
|
||||
.idea/mongoSettings.xml
|
||||
|
||||
## File-based project format:
|
||||
*.ipr
|
||||
*.iws
|
||||
|
||||
## Plugin-specific files:
|
||||
|
||||
# IntelliJ
|
||||
/out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
||||
|
||||
|
||||
### Java ###
|
||||
*.class
|
||||
|
||||
# Mobile Tools for Java (J2ME)
|
||||
.mtj.tmp/
|
||||
|
||||
# Package Files #
|
||||
*.jar
|
||||
*.war
|
||||
*.ear
|
||||
*.tar.gz
|
||||
|
||||
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
|
||||
hs_err_pid*
|
||||
|
||||
|
||||
### OSX ###
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
# Icon must end with two \r
|
||||
Icon
|
||||
|
||||
|
||||
# Thumbnails
|
||||
._*
|
||||
|
||||
# Files that might appear in the root of a volume
|
||||
.DocumentRevisions-V100
|
||||
.fseventsd
|
||||
.Spotlight-V100
|
||||
.TemporaryItems
|
||||
.Trashes
|
||||
.VolumeIcon.icns
|
||||
|
||||
# Directories potentially created on remote AFP share
|
||||
.AppleDB
|
||||
.AppleDesktop
|
||||
Network Trash Folder
|
||||
Temporary Items
|
||||
.apdisk
|
||||
|
||||
/target
|
||||
target/
|
||||
*.log
|
||||
*.log.*
|
||||
*.bak
|
||||
*.vscode
|
||||
*/.vscode/*
|
||||
*/.vscode
|
||||
*/velocity.log*
|
||||
*/*.log
|
||||
*/*.log.*
|
||||
node_modules/
|
||||
node_modules/*
|
||||
workspace.xml
|
||||
/output/*
|
||||
.gitversion
|
||||
out/*
|
||||
dist/
|
||||
dist/*
|
||||
km-rest/src/main/resources/templates/
|
||||
*dependency-reduced-pom*
|
||||
#filter flattened xml
|
||||
*/.flattened-pom.xml
|
||||
.flattened-pom.xml
|
||||
*/*/.flattened-pom.xml
|
||||
74
CODE_OF_CONDUCT.md
Normal file
74
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,74 @@
|
||||
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project, and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
education, socio-economic status, nationality, personal appearance, race,
|
||||
religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at https://knowstreaming.com/support-center . All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
158
CONTRIBUTING.md
158
CONTRIBUTING.md
@@ -1,28 +1,150 @@
|
||||
# Contribution Guideline
|
||||
|
||||
Thanks for considering to contribute this project. All issues and pull requests are highly appreciated.
|
||||
|
||||
## Pull Requests
|
||||
|
||||
Before sending pull request to this project, please read and follow guidelines below.
|
||||
# 为KnowStreaming做贡献
|
||||
|
||||
1. Branch: We only accept pull request on `dev` branch.
|
||||
2. Coding style: Follow the coding style used in kafka-manager.
|
||||
3. Commit message: Use English and be aware of your spell.
|
||||
4. Test: Make sure to test your code.
|
||||
|
||||
Add device mode, API version, related log, screenshots and other related information in your pull request if possible.
|
||||
欢迎👏🏻来到KnowStreaming!本文档是关于如何为KnowStreaming做出贡献的指南。
|
||||
|
||||
NOTE: We assume all your contribution can be licensed under the [Apache License 2.0](LICENSE).
|
||||
如果您发现不正确或遗漏的内容, 请留下意见/建议。
|
||||
|
||||
## Issues
|
||||
## 行为守则
|
||||
请务必阅读并遵守我们的 [行为准则](./CODE_OF_CONDUCT.md).
|
||||
|
||||
We love clearly described issues. :)
|
||||
|
||||
Following information can help us to resolve the issue faster.
|
||||
|
||||
* Device mode and hardware information.
|
||||
* API version.
|
||||
* Logs.
|
||||
* Screenshots.
|
||||
* Steps to reproduce the issue.
|
||||
## 贡献
|
||||
|
||||
**KnowStreaming** 欢迎任何角色的新参与者,包括 **User** 、**Contributor**、**Committer**、**PMC** 。
|
||||
|
||||
我们鼓励新人积极加入 **KnowStreaming** 项目,从User到Contributor、Committer ,甚至是 PMC 角色。
|
||||
|
||||
为了做到这一点,新人需要积极地为 **KnowStreaming** 项目做出贡献。以下介绍如何对 **KnowStreaming** 进行贡献。
|
||||
|
||||
|
||||
### 创建/打开 Issue
|
||||
|
||||
如果您在文档中发现拼写错误、在代码中**发现错误**或想要**新功能**或想要**提供建议**,您可以在 GitHub 上[创建一个Issue](https://github.com/didi/KnowStreaming/issues/new/choose) 进行报告。
|
||||
|
||||
|
||||
如果您想直接贡献, 您可以选择下面标签的问题。
|
||||
|
||||
- [contribution welcome](https://github.com/didi/KnowStreaming/labels/contribution%20welcome) : 非常需要解决/新增 的Issues
|
||||
- [good first issue](https://github.com/didi/KnowStreaming/labels/good%20first%20issue): 对新人比较友好, 新人可以拿这个Issue来练练手热热身。
|
||||
|
||||
<font color=red ><b> 请注意,任何 PR 都必须与有效issue相关联。否则,PR 将被拒绝。</b></font>
|
||||
|
||||
|
||||
|
||||
### 开始你的贡献
|
||||
|
||||
**分支介绍**
|
||||
|
||||
我们将 `dev`分支作为开发分支, 说明这是一个不稳定的分支。
|
||||
|
||||
此外,我们的分支模型符合 [https://nvie.com/posts/a-successful-git-branching-model/](https://nvie.com/posts/a-successful-git-branching-model/). 我们强烈建议新人在创建PR之前先阅读上述文章。
|
||||
|
||||
|
||||
|
||||
**贡献流程**
|
||||
|
||||
为方便描述,我们这里定义一下2个名词:
|
||||
|
||||
自己Fork出来的仓库是私人仓库, 我们这里称之为 :**分叉仓库**
|
||||
Fork的源项目,我们称之为:**源仓库**
|
||||
|
||||
|
||||
现在,如果您准备好创建PR, 以下是贡献者的工作流程:
|
||||
|
||||
1. Fork [KnowStreaming](https://github.com/didi/KnowStreaming) 项目到自己的仓库
|
||||
|
||||
2. 从源仓库的`dev`拉取并创建自己的本地分支,例如: `dev`
|
||||
3. 在本地分支上对代码进行修改
|
||||
4. Rebase 开发分支, 并解决冲突
|
||||
5. commit 并 push 您的更改到您自己的**分叉仓库**
|
||||
6. 创建一个 Pull Request 到**源仓库**的`dev`分支中。
|
||||
7. 等待回复。如果回复的慢,请无情的催促。
|
||||
|
||||
|
||||
更为详细的贡献流程请看:[贡献流程](./docs/contributer_guide/贡献流程.md)
|
||||
|
||||
创建Pull Request时:
|
||||
|
||||
1. 请遵循 PR的 [模板](./.github/PULL_REQUEST_TEMPLATE.md)
|
||||
2. 请确保 PR 有相应的issue。
|
||||
3. 如果您的 PR 包含较大的更改,例如组件重构或新组件,请编写有关其设计和使用的详细文档(在对应的issue中)。
|
||||
4. 注意单个 PR 不能太大。如果需要进行大量更改,最好将更改分成几个单独的 PR。
|
||||
5. 在合并PR之前,尽量的将最终的提交信息清晰简洁, 将多次修改的提交尽可能的合并为一次提交。
|
||||
6. 创建 PR 后,将为PR分配一个或多个reviewers。
|
||||
|
||||
|
||||
<font color=red><b>如果您的 PR 包含较大的更改,例如组件重构或新组件,请编写有关其设计和使用的详细文档。</b></font>
|
||||
|
||||
|
||||
# 代码审查指南
|
||||
|
||||
Commiter将轮流review代码,以确保在合并前至少有一名Commiter
|
||||
|
||||
一些原则:
|
||||
|
||||
- 可读性——重要的代码应该有详细的文档。API 应该有 Javadoc。代码风格应与现有风格保持一致。
|
||||
- 优雅:新的函数、类或组件应该设计得很好。
|
||||
- 可测试性——单元测试用例应该覆盖 80% 的新代码。
|
||||
- 可维护性 - 遵守我们的编码规范。
|
||||
|
||||
|
||||
# 开发者
|
||||
|
||||
## 成为Contributor
|
||||
|
||||
只要成功提交并合并PR , 则为Contributor
|
||||
|
||||
贡献者名单请看:[贡献者名单](./docs/contributer_guide/开发者名单.md)
|
||||
|
||||
## 尝试成为Commiter
|
||||
|
||||
一般来说, 贡献8个重要的补丁并至少让三个不同的人来Review他们(您需要3个Commiter的支持)。
|
||||
然后请人给你提名, 您需要展示您的
|
||||
|
||||
1. 至少8个重要的PR和项目的相关问题
|
||||
2. 与团队合作的能力
|
||||
3. 了解项目的代码库和编码风格
|
||||
4. 编写好代码的能力
|
||||
|
||||
当前的Commiter可以通过在KnowStreaming中的Issue标签 `nomination`(提名)来提名您
|
||||
|
||||
1. 你的名字和姓氏
|
||||
2. 指向您的Git个人资料的链接
|
||||
3. 解释为什么你应该成为Commiter
|
||||
4. 详细说明提名人与您合作的3个PR以及相关问题,这些问题可以证明您的能力。
|
||||
|
||||
另外2个Commiter需要支持您的**提名**,如果5个工作日内没有人反对,您就是提交者,如果有人反对或者想要更多的信息,Commiter会讨论并通常达成共识(5个工作日内) 。
|
||||
|
||||
|
||||
# 开源奖励计划
|
||||
|
||||
|
||||
我们非常欢迎开发者们为KnowStreaming开源项目贡献一份力量,相应也将给予贡献者激励以表认可与感谢。
|
||||
|
||||
|
||||
## 参与贡献
|
||||
|
||||
1. 积极参与 Issue 的讨论,如答疑解惑、提供想法或报告无法解决的错误(Issue)
|
||||
2. 撰写和改进项目的文档(Wiki)
|
||||
3. 提交补丁优化代码(Coding)
|
||||
|
||||
|
||||
## 你将获得
|
||||
|
||||
1. 加入KnowStreaming开源项目贡献者名单并展示
|
||||
2. KnowStreaming开源贡献者证书(纸质&电子版)
|
||||
3. KnowStreaming贡献者精美大礼包(KnowStreamin/滴滴 周边)
|
||||
|
||||
|
||||
## 相关规则
|
||||
|
||||
- Contributer和Commiter都会有对应的证书和对应的礼包
|
||||
- 每季度有KnowStreaming项目团队评选出杰出贡献者,颁发相应证书。
|
||||
- 年末进行年度评选
|
||||
|
||||
贡献者名单请看:[贡献者名单](./docs/contributer_guide/开发者名单.md)
|
||||
@@ -1,7 +0,0 @@
|
||||
FROM fabric8/java-alpine-openjdk8-jdk
|
||||
MAINTAINER xuzhengxi
|
||||
ENV LANG=C.UTF-8 LC_ALL=C.UTF-8
|
||||
ADD ./web/target/kafka-manager-web-1.1.0-SNAPSHOT.jar kafka-manager-web.jar
|
||||
ADD ./docker/kafka-manager/application-standalone.yml application.yml
|
||||
ENTRYPOINT ["java","-jar","/kafka-manager-web.jar","--spring.config.location=./application.yml"]
|
||||
EXPOSE 8080
|
||||
296
README.md
296
README.md
@@ -1,135 +1,161 @@
|
||||
|
||||
---
|
||||
|
||||

|
||||
|
||||
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
||||
|
||||
---
|
||||
|
||||
## 主要功能特性
|
||||
|
||||
|
||||
### 集群监控维度
|
||||
|
||||
- 多版本集群管控,支持从`0.10.2`到`2.4`版本;
|
||||
- 集群Topic、Broker等多维度历史与实时关键指标查看;
|
||||
|
||||
|
||||
### 集群管控维度
|
||||
|
||||
- 集群运维,包括逻辑Region方式管理集群;
|
||||
- Broker运维,包括优先副本选举;
|
||||
- Topic运维,包括创建、查询、扩容、修改属性、数据采样及迁移等;
|
||||
- 消费组运维,包括指定时间或指定偏移两种方式进行重置消费偏移;
|
||||
|
||||
|
||||
### 用户使用维度
|
||||
|
||||
- 管理员用户与普通用户视角区分;
|
||||
- 管理员用户与普通用户权限区分;
|
||||
|
||||
---
|
||||
|
||||
## kafka-manager架构图
|
||||
|
||||

|
||||
|
||||
|
||||
---
|
||||
|
||||
## 安装手册
|
||||
|
||||
### 环境依赖
|
||||
|
||||
- `Maven 3.5.0+`(后端打包依赖)
|
||||
- `node v8.12.0+`(前端打包依赖)
|
||||
- `Java 8+`(运行环境需要)
|
||||
- `MySQL` 或 `PostgreSQL`(数据存储)
|
||||
|
||||
---
|
||||
|
||||
### 环境初始化
|
||||
|
||||
**MySQL**
|
||||
|
||||
执行[create_mysql_table.sql](doc/create_mysql_table.sql)中的SQL命令,从而创建所需的MySQL库及表,默认创建的库名是`kafka_manager`。
|
||||
|
||||
```
|
||||
############# 示例:
|
||||
mysql -uXXXX -pXXX -h XXX.XXX.XXX.XXX -PXXXX < ./create_mysql_table.sql
|
||||
```
|
||||
|
||||
**PostgreSQL**
|
||||
|
||||
执行[create_postgresql_table.sql](doc/create_postgresql_table.sql)中的SQL命令,从而创建所需的PostgreSQL表。
|
||||
|
||||
```
|
||||
############# 示例:
|
||||
psql -h XXX.XXX.XXX.XXX -U XXXX -d kafka_manager -f ./create_postgresql_table.sql
|
||||
```
|
||||
|
||||
*PostgreSQL 用户、数据库创建方式*
|
||||
|
||||
```sql
|
||||
create user admin encrypted password 'admin';
|
||||
create database kafka_manager owner=admin template=template0 encoding='UTF-8' lc_collate='zh_CN.UTF-8' lc_ctype='zh_CN.UTF-8';
|
||||
```
|
||||
|
||||
***默认配置使用 MySQL 数据库,若要使用 PostgreSQL 数据库,使用 `-Dspring.profiles.active=pg` 指定 `application-pg.yml` 配置文件。***
|
||||
|
||||
---
|
||||
|
||||
|
||||
### 打包
|
||||
|
||||
执行`mvn install`命令即可。
|
||||
|
||||
备注:每一次执行`mvn install`命令,都将在`web/src/main/resources/templates`下面生成最新的前端资源文件,如果`console`模块下的代码没有变更,可以修改`./pom.xml`文件,忽略对`console`模块的打包。
|
||||
|
||||
---
|
||||
|
||||
### 启动
|
||||
|
||||
```
|
||||
############# application.yml 是配置文件
|
||||
cp web/src/main/resources/application.yml web/target/
|
||||
cd web/target/
|
||||
nohup java -jar kafka-manager-web-1.1.0-SNAPSHOT.jar --spring.config.location=./application.yml > /dev/null 2>&1 &
|
||||
```
|
||||
|
||||
### 使用
|
||||
|
||||
本地启动的话,访问`http://localhost:8080`,输入帐号及密码进行登录。更多参考:[kafka-manager使用手册](doc/user_cn_guide.md)
|
||||
|
||||
|
||||
---
|
||||
|
||||
## 相关文档
|
||||
|
||||
- [kafka-manager使用手册](doc/user_cn_guide.md)
|
||||
|
||||
|
||||
## 钉钉交流群
|
||||
|
||||
搜索群号:`32821440` 或者扫码可入群交流. 备注:在钉钉搜索框搜索`32821440`,然后搜索结果中点击 "网络查找手机/邮箱/钉钉号" 即可看到我们的钉钉群:滴滴KafkaManager开源用户群。
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
## 项目成员
|
||||
|
||||
### 内部核心人员
|
||||
|
||||
`iceyuhui`、`liuyaguang`、`limengmonty`、`zhangliangmike`、`nullhuangyiming`、`zengqiao`、`eilenexuzhe`、`huangjiaweihjw`
|
||||
|
||||
|
||||
### 外部贡献者
|
||||
|
||||
`fangjunyu`、`zhoutaiyang`
|
||||
|
||||
|
||||
## 协议
|
||||
|
||||
`kafka-manager`基于`Apache-2.0`协议进行分发和使用,更多信息参见[协议文件](./LICENSE)
|
||||
|
||||
<p align="center">
|
||||
<img src="https://user-images.githubusercontent.com/71620349/185368586-aed82d30-1534-453d-86ff-ecfa9d0f35bd.png" width = "256" div align=center />
|
||||
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://knowstreaming.com">产品官网</a> |
|
||||
<a href="https://github.com/didi/KnowStreaming/releases">下载地址</a> |
|
||||
<a href="https://doc.knowstreaming.com/product">文档资源</a> |
|
||||
<a href="https://demo.knowstreaming.com">体验环境</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<!--最近一次提交时间-->
|
||||
<a href="https://img.shields.io/github/last-commit/didi/KnowStreaming">
|
||||
<img src="https://img.shields.io/github/last-commit/didi/KnowStreaming" alt="LastCommit">
|
||||
</a>
|
||||
|
||||
<!--最新版本-->
|
||||
<a href="https://github.com/didi/KnowStreaming/blob/master/LICENSE">
|
||||
<img src="https://img.shields.io/github/v/release/didi/KnowStreaming" alt="License">
|
||||
</a>
|
||||
|
||||
<!--License信息-->
|
||||
<a href="https://github.com/didi/KnowStreaming/blob/master/LICENSE">
|
||||
<img src="https://img.shields.io/github/license/didi/KnowStreaming" alt="License">
|
||||
</a>
|
||||
|
||||
<!--Open-Issue-->
|
||||
<a href="https://github.com/didi/KnowStreaming/issues">
|
||||
<img src="https://img.shields.io/github/issues-raw/didi/KnowStreaming" alt="Issues">
|
||||
</a>
|
||||
|
||||
<!--知识星球-->
|
||||
<a href="https://z.didi.cn/5gSF9">
|
||||
<img src="https://img.shields.io/badge/join-%E7%9F%A5%E8%AF%86%E6%98%9F%E7%90%83-red" alt="Slack">
|
||||
</a>
|
||||
|
||||
</p>
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## `Know Streaming` 简介
|
||||
|
||||
`Know Streaming`是一套云原生的Kafka管控平台,脱胎于众多互联网内部多年的Kafka运营实践经验,专注于Kafka运维管控、监控告警、资源治理、多活容灾等核心场景。在用户体验、监控、运维管控上进行了平台化、可视化、智能化的建设,提供一系列特色的功能,极大地方便了用户和运维人员的日常使用,让普通运维人员都能成为Kafka专家。
|
||||
|
||||
我们现在正在收集 Know Streaming 用户信息,以帮助我们进一步改进 Know Streaming。
|
||||
请在 [issue#663](https://github.com/didi/KnowStreaming/issues/663) 上提供您的使用信息来支持我们:[谁在使用 Know Streaming](https://github.com/didi/KnowStreaming/issues/663)
|
||||
|
||||
|
||||
|
||||
整体具有以下特点:
|
||||
|
||||
- 👀 **零侵入、全覆盖**
|
||||
- 无需侵入改造 `Apache Kafka` ,一键便能纳管 `0.10.x` ~ `3.x.x` 众多版本的Kafka,包括 `ZK` 或 `Raft` 运行模式的版本,同时在兼容架构上具备良好的扩展性,帮助您提升集群管理水平;
|
||||
|
||||
- 🌪️ **零成本、界面化**
|
||||
- 提炼高频 CLI 能力,设计合理的产品路径,提供清新美观的 GUI 界面,支持 Cluster、Broker、Zookeeper、Topic、ConsumerGroup、Message、ACL、Connect 等组件 GUI 管理,普通用户5分钟即可上手;
|
||||
|
||||
- 👏 **云原生、插件化**
|
||||
- 基于云原生构建,具备水平扩展能力,只需要增加节点即可获取更强的采集及对外服务能力,提供众多可热插拔的企业级特性,覆盖可观测性生态整合、资源治理、多活容灾等核心场景;
|
||||
|
||||
- 🚀 **专业能力**
|
||||
- 集群管理:支持一键纳管,健康分析、核心组件观测 等功能;
|
||||
- 观测提升:多维度指标观测大盘、观测指标最佳实践 等功能;
|
||||
- 异常巡检:集群多维度健康巡检、集群多维度健康分 等功能;
|
||||
- 能力增强:集群负载均衡、Topic扩缩副本、Topic副本迁移 等功能;
|
||||
|
||||
|
||||
|
||||
**产品图**
|
||||
|
||||
<p align="center">
|
||||
|
||||
<img src="http://img-ys011.didistatic.com/static/dc2img/do1_sPmS4SNLX9m1zlpmHaLJ" width = "768" height = "473" div align=center />
|
||||
|
||||
</p>
|
||||
|
||||
|
||||
|
||||
|
||||
## 文档资源
|
||||
|
||||
**`开发相关手册`**
|
||||
|
||||
- [打包编译手册](docs/install_guide/源码编译打包手册.md)
|
||||
- [单机部署手册](docs/install_guide/单机部署手册.md)
|
||||
- [版本升级手册](docs/install_guide/版本升级手册.md)
|
||||
- [本地源码启动手册](docs/dev_guide/本地源码启动手册.md)
|
||||
- [页面无数据排查手册](docs/dev_guide/页面无数据排查手册.md)
|
||||
|
||||
**`产品相关手册`**
|
||||
|
||||
- [产品使用指南](docs/user_guide/用户使用手册.md)
|
||||
- [2.x与3.x新旧对比手册](docs/user_guide/新旧对比手册.md)
|
||||
- [FAQ](docs/user_guide/faq.md)
|
||||
|
||||
|
||||
**点击 [这里](https://doc.knowstreaming.com/product),也可以从官网获取到更多文档**
|
||||
|
||||
**`产品网址`**
|
||||
- [产品官网:https://knowstreaming.com](https://knowstreaming.com)
|
||||
- [体验环境:https://demo.knowstreaming.com](https://demo.knowstreaming.com),登陆账号:admin/admin
|
||||
|
||||
|
||||
|
||||
## 成为社区贡献者
|
||||
|
||||
1. [贡献源码](https://doc.knowstreaming.com/product/10-contribution) 了解如何成为 Know Streaming 的贡献者
|
||||
2. [具体贡献流程](https://doc.knowstreaming.com/product/10-contribution#102-贡献流程)
|
||||
3. [开源激励计划](https://doc.knowstreaming.com/product/10-contribution#105-开源激励计划)
|
||||
4. [贡献者名单](https://doc.knowstreaming.com/product/10-contribution#106-贡献者名单)
|
||||
|
||||
|
||||
获取KnowStreaming开源社区证书。
|
||||
|
||||
## 加入技术交流群
|
||||
|
||||
**`1、知识星球`**
|
||||
|
||||
<p align="left">
|
||||
<img src="https://user-images.githubusercontent.com/71620349/185357284-fdff1dad-c5e9-4ddf-9a82-0be1c970980d.JPG" height = "180" div align=left />
|
||||
</p>
|
||||
|
||||
<br/>
|
||||
<br/>
|
||||
<br/>
|
||||
<br/>
|
||||
<br/>
|
||||
<br/>
|
||||
<br/>
|
||||
<br/>
|
||||
|
||||
👍 我们正在组建国内最大,最权威的 **[Kafka中文社区](https://z.didi.cn/5gSF9)**
|
||||
|
||||
在这里你可以结交各大互联网的 Kafka大佬 以及 4000+ Kafka爱好者,一起实现知识共享,实时掌控最新行业资讯,期待 👏 您的加入中~ https://z.didi.cn/5gSF9
|
||||
|
||||
有问必答~! 互动有礼~!
|
||||
|
||||
PS: 提问请尽量把问题一次性描述清楚,并告知环境信息情况~!如使用版本、操作步骤、报错/警告信息等,方便大V们快速解答~
|
||||
|
||||
|
||||
|
||||
**`2、微信群`**
|
||||
|
||||
微信加群:添加`PenceXie` 、`szzdzhp001`的微信号备注KnowStreaming加群。
|
||||
<br/>
|
||||
|
||||
加群之前有劳点一下 star,一个小小的 star 是对KnowStreaming作者们努力建设社区的动力。
|
||||
|
||||
感谢感谢!!!
|
||||
|
||||
<img width="116" alt="wx" src="https://user-images.githubusercontent.com/71620349/192257217-c4ebc16c-3ad9-485d-a914-5911d3a4f46b.png">
|
||||
|
||||
## Star History
|
||||
|
||||
[](https://star-history.com/#didi/KnowStreaming&Date)
|
||||
|
||||
|
||||
646
Releases_Notes.md
Normal file
646
Releases_Notes.md
Normal file
@@ -0,0 +1,646 @@
|
||||
|
||||
## v3.4.0
|
||||
|
||||
|
||||
|
||||
**问题修复**
|
||||
- [Bugfix]修复 Overview 指标文案错误的错误 ([#1190](https://github.com/didi/KnowStreaming/issues/1190))
|
||||
- [Bugfix]修复删除 Kafka 集群后,Connect 集群任务出现 NPE 问题 ([#1129](https://github.com/didi/KnowStreaming/issues/1129))
|
||||
- [Bugfix]修复在 Ldap 登录时,设置 auth-user-registration: false 会导致空指针的问题 ([#1117](https://github.com/didi/KnowStreaming/issues/1117))
|
||||
- [Bugfix]修复 Ldap 登录,调用 user.getId() 出现 NPE 的问题 ([#1108](https://github.com/didi/KnowStreaming/issues/1108))
|
||||
- [Bugfix]修复前端新增角色失败等问题 ([#1107](https://github.com/didi/KnowStreaming/issues/1107))
|
||||
- [Bugfix]修复 ZK 四字命令解析错误的问题
|
||||
- [Bugfix]修复 zk standalone 模式下,状态获取错误的问题
|
||||
- [Bugfix]修复 Broker 元信息解析方法未调用导致接入集群失败的问题 ([#993](https://github.com/didi/KnowStreaming/issues/993))
|
||||
- [Bugfix]修复 ConsumerAssignment 类型转换错误的问题
|
||||
- [Bugfix]修复对 Connect 集群的 clusterUrl 的动态更新导致配置不生效的问题 ([#1079](https://github.com/didi/KnowStreaming/issues/1079))
|
||||
- [Bugfix]修复消费组不支持重置到最旧 Offset 的问题 ([#1059](https://github.com/didi/KnowStreaming/issues/1059))
|
||||
- [Bugfix]后端增加查看 User 密码的权限点 ([#1095](https://github.com/didi/KnowStreaming/issues/1095))
|
||||
- [Bugfix]修复 Connect-JMX 端口维护信息错误的问题 ([#1146](https://github.com/didi/KnowStreaming/issues/1146))
|
||||
- [Bugfix]修复系统管理子应用无法正常启动的问题 ([#1167](https://github.com/didi/KnowStreaming/issues/1167))
|
||||
- [Bugfix]修复 Security 模块,权限点缺失问题 ([#1069](https://github.com/didi/KnowStreaming/issues/1069)), ([#1154](https://github.com/didi/KnowStreaming/issues/1154))
|
||||
- [Bugfix]修复 Connect-Worker Jmx 不生效的问题 ([#1067](https://github.com/didi/KnowStreaming/issues/1067))
|
||||
- [Bugfix]修复权限 ACL 管理中,消费组列表展示错误的问题 ([#1037](https://github.com/didi/KnowStreaming/issues/1037))
|
||||
- [Bugfix]修复 Connect 模块没有默认勾选指标的问题([#1022](https://github.com/didi/KnowStreaming/issues/1022))
|
||||
- [Bugfix]修复 es 索引 create/delete 死循环的问题 ([#1021](https://github.com/didi/KnowStreaming/issues/1021))
|
||||
- [Bugfix]修复 Connect-GroupDescription 解析失败的问题 ([#1015](https://github.com/didi/KnowStreaming/issues/1015))
|
||||
- [Bugfix]修复 Prometheus 开放接口中,Partition 指标 tag 缺失的问题 ([#1014](https://github.com/didi/KnowStreaming/issues/1014))
|
||||
- [Bugfix]修复 Topic 消息展示,offset 为 0 不显示的问题 ([#1192](https://github.com/didi/KnowStreaming/issues/1192))
|
||||
- [Bugfix]修复重置offset接口调用过多问题
|
||||
- [Bugfix]Connect 提交任务变更为只保存用户修改的配置,并修复 JSON 模式下配置展示不全的问题 ([#1158](https://github.com/didi/KnowStreaming/issues/1158))
|
||||
- [Bugfix]修复消费组 Offset 重置后,提示重置成功,但是前端不刷新数据,Offset 无变化的问题 ([#1090](https://github.com/didi/KnowStreaming/issues/1090))
|
||||
- [Bugfix]修复未勾选系统管理查看权限,但是依然可以查看系统管理的问题 ([#1105](https://github.com/didi/KnowStreaming/issues/1105))
|
||||
|
||||
|
||||
**产品优化**
|
||||
- [Optimize]补充接入集群时,可选的 Kafka 版本列表 ([#1204](https://github.com/didi/KnowStreaming/issues/1204))
|
||||
- [Optimize]GroupTopic 信息修改为实时获取 ([#1196](https://github.com/didi/KnowStreaming/issues/1196))
|
||||
- [Optimize]增加 AdminClient 观测信息 ([#1111](https://github.com/didi/KnowStreaming/issues/1111))
|
||||
- [Optimize]增加 Connector 运行状态指标 ([#1110](https://github.com/didi/KnowStreaming/issues/1110))
|
||||
- [Optimize]统一 DB 元信息更新格式 ([#1127](https://github.com/didi/KnowStreaming/issues/1127)), ([#1125](https://github.com/didi/KnowStreaming/issues/1125)), ([#1006](https://github.com/didi/KnowStreaming/issues/1006))
|
||||
- [Optimize]日志输出增加支持 MDC,方便用户在 logback.xml 中 json 格式化日志 ([#1032](https://github.com/didi/KnowStreaming/issues/1032))
|
||||
- [Optimize]Jmx 相关日志优化 ([#1082](https://github.com/didi/KnowStreaming/issues/1082))
|
||||
- [Optimize]Topic-Partitions增加主动超时功能 ([#1076](https://github.com/didi/KnowStreaming/issues/1076))
|
||||
- [Optimize]Topic-Messages页面后端增加按照Partition和Offset纬度的排序 ([#1075](https://github.com/didi/KnowStreaming/issues/1075))
|
||||
- [Optimize]Connect-JSON模式下的JSON格式和官方API的格式不一致 ([#1080](https://github.com/didi/KnowStreaming/issues/1080)), ([#1153](https://github.com/didi/KnowStreaming/issues/1153)), ([#1192](https://github.com/didi/KnowStreaming/issues/1192))
|
||||
- [Optimize]登录页面展示的 star 数量修改为最新的数量
|
||||
- [Optimize]Group 列表的 maxLag 指标调整为实时获取 ([#1074](https://github.com/didi/KnowStreaming/issues/1074))
|
||||
- [Optimize]Connector增加重启、编辑、删除等权限点 ([#1066](https://github.com/didi/KnowStreaming/issues/1066)), ([#1147](https://github.com/didi/KnowStreaming/issues/1147))
|
||||
- [Optimize]优化 pom.xml 中,KS版本的标签名
|
||||
- [Optimize]优化集群Brokers中, Controller显示存在延迟的问题 ([#1162](https://github.com/didi/KnowStreaming/issues/1162))
|
||||
- [Optimize]bump jackson version to 2.13.5
|
||||
- [Optimize]权限新增 ACL,自定义权限配置,资源 TransactionalId 优化 ([#1192](https://github.com/didi/KnowStreaming/issues/1192))
|
||||
- [Optimize]Connect 样式优化
|
||||
- [Optimize]消费组详情控制数据实时刷新
|
||||
|
||||
|
||||
**功能新增**
|
||||
- [Feature]新增删除 Group 或 GroupOffset 功能 ([#1064](https://github.com/didi/KnowStreaming/issues/1064)), ([#1084](https://github.com/didi/KnowStreaming/issues/1084)), ([#1040](https://github.com/didi/KnowStreaming/issues/1040)), ([#1144](https://github.com/didi/KnowStreaming/issues/1144))
|
||||
- [Feature]增加 Truncate 数据功能 ([#1062](https://github.com/didi/KnowStreaming/issues/1062)), ([#1043](https://github.com/didi/KnowStreaming/issues/1043)), ([#1145](https://github.com/didi/KnowStreaming/issues/1145))
|
||||
- [Feature]支持指定 Server 的具体 Jmx 端口 ([#965](https://github.com/didi/KnowStreaming/issues/965))
|
||||
|
||||
|
||||
**文档更新**
|
||||
- [Doc]FAQ 补充 ES 8.x 版本使用说明 ([#1189](https://github.com/didi/KnowStreaming/issues/1189))
|
||||
- [Doc]补充启动失败的说明 ([#1126](https://github.com/didi/KnowStreaming/issues/1126))
|
||||
- [Doc]补充 ZK 无数据排查说明 ([#1004](https://github.com/didi/KnowStreaming/issues/1004))
|
||||
- [Doc]无数据排查文档,补充 ES 集群 Shard 满的异常日志
|
||||
- [Doc]README 补充页面无数据排查手册链接
|
||||
- [Doc]补充连接特定 Jmx 端口的说明 ([#965](https://github.com/didi/KnowStreaming/issues/965))
|
||||
- [Doc]补充 zk_properties 字段的使用说明 ([#1003](https://github.com/didi/KnowStreaming/issues/1003))
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## v3.3.0
|
||||
|
||||
**问题修复**
|
||||
- 修复 Connect 的 JMX-Port 配置未生效问题;
|
||||
- 修复 不存在 Connector 时,OverView 页面的数据一直处于加载中的问题;
|
||||
- 修复 Group 分区信息,分页时展示不全的问题;
|
||||
- 修复采集副本指标时,参数传递错误的问题;
|
||||
- 修复用户信息修改后,用户列表会抛出空指针异常的问题;
|
||||
- 修复 Topic 详情页面,查看消息时,选择分区不生效问题;
|
||||
- 修复对 ZK 客户端进行配置后不生效的问题;
|
||||
- 修复 connect 模块,指标中缺少健康巡检项通过数的问题;
|
||||
- 修复 connect 模块,指标获取方法存在映射错误的问题;
|
||||
- 修复 connect 模块,max 纬度指标获取错误的问题;
|
||||
- 修复 Topic 指标大盘 TopN 指标显示信息错误的问题;
|
||||
- 修复 Broker Similar Config 显示错误的问题;
|
||||
- 修复解析 ZK 四字命令时,数据类型设置错误导致空指针的问题;
|
||||
- 修复新增 Topic 时,清理策略选项版本控制错误的问题;
|
||||
- 修复新接入集群时 Controller-Host 信息不显示的问题;
|
||||
- 修复 Connector 和 MM2 列表搜索不生效的问题;
|
||||
- 修复 Zookeeper 页面,Leader 显示存在异常的问题;
|
||||
- 修复前端打包失败的问题;
|
||||
|
||||
|
||||
**产品优化**
|
||||
- ZK Overview 页面补充默认展示的指标;
|
||||
- 统一初始化 ES 索引模版的脚本为 init_es_template.sh,同时新增缺失的 connect 索引模版初始化脚本,去除多余的 replica 和 zookeper 索引模版初始化脚本;
|
||||
- 指标大盘页面,优化指标筛选操作后,无指标数据的指标卡片由不显示改为显示,并增加无数据的兜底;
|
||||
- 删除从 ES 读写 replica 指标的相关代码;
|
||||
- 优化 Topic 健康巡检的日志,明确错误的原因;
|
||||
- 优化无 ZK 模块时,巡检详情忽略对 ZK 的展示;
|
||||
- 优化本地缓存大小为可配置;
|
||||
- Task 模块中的返回中,补充任务的分组信息;
|
||||
- FAQ 补充 Ldap 的配置说明;
|
||||
- FAQ 补充接入 Kerberos 认证的 Kafka 集群的配置说明;
|
||||
- ks_km_kafka_change_record 表增加时间纬度的索引,优化查询性能;
|
||||
- 优化 ZK 健康巡检的日志,便于问题的排查;
|
||||
|
||||
**功能新增**
|
||||
- 新增基于滴滴 Kafka 的 Topic 复制功能(需使用滴滴 Kafka 才可具备该能力);
|
||||
- Topic 指标大盘,新增 Topic 复制相关的指标;
|
||||
- 新增基于 TestContainers 的单测;
|
||||
|
||||
|
||||
**Kafka MM2 Beta版 (v3.3.0版本新增发布)**
|
||||
- MM2 任务的增删改查;
|
||||
- MM2 任务的指标大盘;
|
||||
- MM2 任务的健康状态;
|
||||
|
||||
---
|
||||
|
||||
|
||||
## v3.2.0
|
||||
|
||||
**问题修复**
|
||||
- 修复健康巡检结果更新至 DB 时,出现死锁问题;
|
||||
- 修复 KafkaJMXClient 类中,logger错误的问题;
|
||||
- 后端修复 Topic 过期策略在 0.10.1.0 版本能多选的问题,实际应该只能二选一;
|
||||
- 修复接入集群时,不填写集群配置会报错的问题;
|
||||
- 升级 spring-context 至 5.3.19 版本,修复安全漏洞;
|
||||
- 修复 Broker & Topic 修改配置时,多版本兼容配置的版本信息错误的问题;
|
||||
- 修复 Topic 列表的健康分为健康状态;
|
||||
- 修复 Broker LogSize 指标存储名称错误导致查询不到的问题;
|
||||
- 修复 Prometheus 中,缺少 Group 部分指标的问题;
|
||||
- 修复因缺少健康状态指标导致集群数错误的问题;
|
||||
- 修复后台任务记录操作日志时,因缺少操作用户信息导致出现异常的问题;
|
||||
- 修复 Replica 指标查询时,DSL 错误的问题;
|
||||
- 关闭 errorLogger,修复错误日志重复输出的问题;
|
||||
- 修复系统管理更新用户信息失败的问题;
|
||||
- 修复因原AR信息丢失,导致迁移任务一直处于执行中的错误;
|
||||
- 修复集群 Topic 列表实时数据查询时,出现失败的问题;
|
||||
- 修复集群 Topic 列表,页面白屏问题;
|
||||
- 修复副本变更时,因AR数据异常,导致数组访问越界的问题;
|
||||
|
||||
|
||||
**产品优化**
|
||||
- 优化健康巡检为按照资源维度多线程并发处理;
|
||||
- 统一日志输出格式,并优化部分输出的日志;
|
||||
- 优化 ZK 四字命令结果解析过程中,容易引起误解的 WARN 日志;
|
||||
- 优化 Zookeeper 详情中,目录结构的搜索文案;
|
||||
- 优化线程池的名称,方便第三方系统进行相关问题的分析;
|
||||
- 去除 ESClient 的并发访问控制,降低 ESClient 创建数及提升利用率;
|
||||
- 优化 Topic Messages 抽屉文案;
|
||||
- 优化 ZK 健康巡检失败时的错误日志信息;
|
||||
- 提高 Offset 信息获取的超时时间,降低并发过高时出现请求超时的概率;
|
||||
- 优化 Topic & Partition 元信息的更新策略,降低对 DB 连接的占用;
|
||||
- 优化 Sonar 代码扫码问题;
|
||||
- 优化分区 Offset 指标的采集;
|
||||
- 优化前端图表相关组件逻辑;
|
||||
- 优化产品主题色;
|
||||
- Consumer 列表刷新按钮新增 hover 提示;
|
||||
- 优化配置 Topic 的消息大小时的测试弹框体验;
|
||||
- 优化 Overview 页面 TopN 查询的流程;
|
||||
|
||||
|
||||
**功能新增**
|
||||
- 新增页面无数据排查文档;
|
||||
- 增加 ES 索引删除的功能;
|
||||
- 支持拆分API服务和Job服务部署;
|
||||
|
||||
|
||||
**Kafka Connect Beta版 (v3.2.0版本新增发布)**
|
||||
- Connect 集群的纳管;
|
||||
- Connector 的增删改查;
|
||||
- Connect 集群 & Connector 的指标大盘;
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## v3.1.0
|
||||
|
||||
**Bug修复**
|
||||
- 修复重置 Group Offset 的提示信息中,缺少Dead状态也可进行重置的描述;
|
||||
- 修复新建 Topic 后,立即查看 Topic Messages 信息时,会提示 Topic 不存在的问题;
|
||||
- 修复副本变更时,优先副本选举未被正常处罚执行的问题;
|
||||
- 修复 git 目录不存在时,打包不能正常进行的问题;
|
||||
- 修复 KRaft 模式的 Kafka 集群,JMX PORT 显示 -1 的问题;
|
||||
|
||||
|
||||
**体验优化**
|
||||
- 优化Cluster、Broker、Topic、Group的健康分为健康状态;
|
||||
- 去除健康巡检配置中的权重信息;
|
||||
- 错误提示页面展示优化;
|
||||
- 前端打包编译依赖默认使用 taobao 镜像;
|
||||
- 重新设计优化导航栏的 icon ;
|
||||
|
||||
|
||||
**新增**
|
||||
- 个人头像下拉信息中,新增产品版本信息;
|
||||
- 多集群列表页面,新增集群健康状态分布信息;
|
||||
|
||||
|
||||
**Kafka ZK 部分 (v3.1.0版本正式发布)**
|
||||
- 新增 ZK 集群的指标大盘信息;
|
||||
- 新增 ZK 集群的服务状态概览信息;
|
||||
- 新增 ZK 集群的服务节点列表信息;
|
||||
- 新增 Kafka 在 ZK 的存储数据查看功能;
|
||||
- 新增 ZK 的健康巡检及健康状态计算;
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## v3.0.1
|
||||
|
||||
**Bug修复**
|
||||
- 修复重置 Group Offset 时,提示信息中缺少 Dead 状态也可进行重置的信息;
|
||||
- 修复 Ldap 某个属性不存在时,会直接抛出空指针导致登陆失败的问题;
|
||||
- 修复集群 Topic 列表页,健康分详情信息中,检查时间展示错误的问题;
|
||||
- 修复更新健康检查结果时,出现死锁的问题;
|
||||
- 修复 Replica 索引模版错误的问题;
|
||||
- 修复 FAQ 文档中的错误链接;
|
||||
- 修复 Broker 的 TopN 指标不存在时,页面数据不展示的问题;
|
||||
- 修复 Group 详情页,图表时间范围选择不生效的问题;
|
||||
|
||||
|
||||
**体验优化**
|
||||
- 集群 Group 列表按照 Group 维度进行展示;
|
||||
- 优化避免因 ES 中该指标不存在,导致日志中出现大量空指针的问题;
|
||||
- 优化全局 Message & Notification 展示效果;
|
||||
- 优化 Topic 扩分区名称 & 描述展示;
|
||||
|
||||
|
||||
**新增**
|
||||
- Broker 列表页面,新增 JMX 是否成功连接的信息;
|
||||
|
||||
|
||||
**ZK 部分(未完全发布)**
|
||||
- 后端补充 Kafka ZK 指标采集,Kafka ZK 信息获取相关功能;
|
||||
- 增加本地缓存,避免同一采集周期内 ZK 指标重复采集;
|
||||
- 增加 ZK 节点采集失败跳过策略,避免不断对存在问题的节点不断尝试;
|
||||
- 修复 zkAvgLatency 指标转 Long 时抛出异常问题;
|
||||
- 修复 ks_km_zookeeper 表中,role 字段类型错误问题;
|
||||
|
||||
---
|
||||
|
||||
## v3.0.0
|
||||
|
||||
**Bug修复**
|
||||
- 修复 Group 指标防重复采集不生效问题
|
||||
- 修复自动创建 ES 索引模版失败问题
|
||||
- 修复 Group+Topic 列表中存在已删除Topic的问题
|
||||
- 修复使用 MySQL-8 ,因兼容问题, start_time 信息为 NULL 时,会导致创建任务失败的问题
|
||||
- 修复 Group 信息表更新时,出现死锁的问题
|
||||
- 修复图表补点逻辑与图表时间范围不适配的问题
|
||||
|
||||
|
||||
**体验优化**
|
||||
- 按照资源类别,拆分健康巡检任务
|
||||
- 优化 Group 详情页的指标为实时获取
|
||||
- 图表拖拽排序支持用户级存储
|
||||
- 多集群列表 ZK 信息展示兼容无 ZK 情况
|
||||
- Topic 详情消息预览支持复制功能
|
||||
- 部分内容大数字支持千位分割符展示
|
||||
|
||||
|
||||
**新增**
|
||||
- 集群信息中,新增 Zookeeper 客户端配置字段
|
||||
- 集群信息中,新增 Kafka 集群运行模式字段
|
||||
- 新增 docker-compose 的部署方式
|
||||
|
||||
---
|
||||
|
||||
## v3.0.0-beta.3
|
||||
|
||||
**文档**
|
||||
- FAQ 补充权限识别失败问题的说明
|
||||
- 同步更新文档,保持与官网一致
|
||||
|
||||
|
||||
**Bug修复**
|
||||
- Offset 信息获取时,过滤掉无 Leader 的分区
|
||||
- 升级 oshi-core 版本至 5.6.1 版本,修复 Windows 系统获取系统指标失败问题
|
||||
- 修复 JMX 连接被关闭后,未进行重建的问题
|
||||
- 修复因 DB 中 Broker 信息不存在导致 TotalLogSize 指标获取时抛空指针问题
|
||||
- 修复 dml-logi.sql 中,SQL 注释错误的问题
|
||||
- 修复 startup.sh 中,识别操作系统类型错误的问题
|
||||
- 修复配置管理页面删除配置失败的问题
|
||||
- 修复系统管理应用文件引用路径
|
||||
- 修复 Topic Messages 详情提示信息点击跳转 404 的问题
|
||||
- 修复扩副本时,当前副本数不显示问题
|
||||
|
||||
|
||||
**体验优化**
|
||||
- Topic-Messages 页面,增加返回数据的排序以及按照Earliest/Latest的获取方式
|
||||
- 优化 GroupOffsetResetEnum 类名为 OffsetTypeEnum,使得类名含义更准确
|
||||
- 移动 KafkaZKDAO 类,及 Kafka Znode 实体类的位置,使得 Kafka Zookeeper DAO 更加内聚及便于识别
|
||||
- 后端补充 Overview 页面指标排序的功能
|
||||
- 前端 Webpack 配置优化
|
||||
- Cluster Overview 图表取消放大展示功能
|
||||
- 列表页增加手动刷新功能
|
||||
- 接入/编辑集群,优化 JMX-PORT,Version 信息的回显,优化JMX信息的展示
|
||||
- 提高登录页面图片展示清晰度
|
||||
- 部分样式和文案优化
|
||||
|
||||
---
|
||||
|
||||
## v3.0.0-beta.2
|
||||
|
||||
**文档**
|
||||
- 新增登录系统对接文档
|
||||
- 优化前端工程打包构建部分文档说明
|
||||
- FAQ补充KnowStreaming连接特定JMX IP的说明
|
||||
|
||||
|
||||
**Bug修复**
|
||||
- 修复logi_security_oplog表字段过短,导致删除Topic等操作无法记录的问题
|
||||
- 修复ES查询时,抛java.lang.NumberFormatException: For input string: "{"value":0,"relation":"eq"}" 问题
|
||||
- 修复LogStartOffset和LogEndOffset指标单位错误问题
|
||||
- 修复进行副本变更时,旧副本数为NULL的问题
|
||||
- 修复集群Group列表,在第二页搜索时,搜索时返回的分页信息错误问题
|
||||
- 修复重置Offset时,返回的错误信息提示不一致的问题
|
||||
- 修复集群查看,系统查看,LoadRebalance等页面权限点缺失问题
|
||||
- 修复查询不存在的Topic时,错误信息提示不明显的问题
|
||||
- 修复Windows用户打包前端工程报错的问题
|
||||
- package-lock.json锁定前端依赖版本号,修复因依赖自动升级导致打包失败等问题
|
||||
- 系统管理子应用,补充后端返回的Code码拦截,解决后端接口返回报错不展示的问题
|
||||
- 修复用户登出后,依旧可以访问系统的问题
|
||||
- 修复巡检任务配置时,数值显示错误的问题
|
||||
- 修复Broker/Topic Overview 图表和图表详情问题
|
||||
- 修复Job扩缩副本任务明细数据错误的问题
|
||||
- 修复重置Offset时,分区ID,Offset数值无限制问题
|
||||
- 修复扩缩/迁移副本时,无法选中Kafka系统Topic的问题
|
||||
- 修复Topic的Config页面,编辑表单时不能正确回显当前值的问题
|
||||
- 修复Broker Card返回数据后依旧展示加载态的问题
|
||||
|
||||
|
||||
|
||||
**体验优化**
|
||||
- 优化默认用户密码为 admin/admin
|
||||
- 缩短新增集群后,集群信息加载的耗时
|
||||
- 集群Broker列表,增加Controller角色信息
|
||||
- 副本变更任务结束后,增加进行优先副本选举的操作
|
||||
- Task模块任务分为Metrics、Common、Metadata三类任务,每类任务配备独立线程池,减少对Job模块的线程池,以及不同类任务之间的相互影响
|
||||
- 删除代码中存在的多余无用文件
|
||||
- 自动新增ES索引模版及近7天索引,减少用户搭建时需要做的事项
|
||||
- 优化前端工程打包流程
|
||||
- 优化登录页文案,页面左侧栏内容,单集群详情样式,Topic列表趋势图等
|
||||
- 首次进入Broker/Topic图表详情时,进行预缓存数据从而优化体验
|
||||
- 优化Topic详情Partition Tab的展示
|
||||
- 多集群列表页增加编辑功能
|
||||
- 优化副本变更时,迁移时间支持分钟级别粒度
|
||||
- logi-security版本升级至2.10.13
|
||||
- logi-elasticsearch-client版本升级至1.0.24
|
||||
|
||||
|
||||
**能力提升**
|
||||
- 支持Ldap登录认证
|
||||
|
||||
---
|
||||
|
||||
## v3.0.0-beta.1
|
||||
|
||||
**文档**
|
||||
- 新增Task模块说明文档
|
||||
- FAQ补充 `Specified key was too long; max key length is 767 bytes ` 错误说明
|
||||
- FAQ补充 `出现ESIndexNotFoundException报错` 错误说明
|
||||
|
||||
|
||||
**Bug修复**
|
||||
- 修复 Consumer 点击 Stop 未停止检索的问题
|
||||
- 修复创建/编辑角色权限报错问题
|
||||
- 修复多集群管理/单集群详情均衡卡片状态错误问题
|
||||
- 修复版本列表未排序问题
|
||||
- 修复Raft集群Controller信息不断记录问题
|
||||
- 修复部分版本消费组描述信息获取失败问题
|
||||
- 修复分区Offset获取失败的日志中,缺少Topic名称信息问题
|
||||
- 修复GitHub图地址错误,及图裂问题
|
||||
- 修复Broker默认使用的地址和注释不一致问题
|
||||
- 修复 Consumer 列表分页不生效问题
|
||||
- 修复操作记录表operation_methods字段缺少默认值问题
|
||||
- 修复集群均衡表中move_broker_list字段无效的问题
|
||||
- 修复KafkaUser、KafkaACL信息获取时,日志一直重复提示不支持问题
|
||||
- 修复指标缺失时,曲线出现掉底的问题
|
||||
|
||||
|
||||
**体验优化**
|
||||
- 优化前端构建时间和打包体积,增加依赖打包的分包策略
|
||||
- 优化产品样式和文案展示
|
||||
- 优化ES客户端数为可配置
|
||||
- 优化日志中大量出现的MySQL Key冲突日志
|
||||
|
||||
|
||||
**能力提升**
|
||||
- 增加周期任务,用于主动创建缺少的ES模版及索引的能力,减少额外的脚本操作
|
||||
- 增加JMX连接的Broker地址可选择的能力
|
||||
|
||||
---
|
||||
|
||||
## v3.0.0-beta.0
|
||||
|
||||
**1、多集群管理**
|
||||
|
||||
- 增加健康监测体系、关键组件&指标 GUI 展示
|
||||
- 增加 2.8.x 以上 Kafka 集群接入,覆盖 0.10.x-3.x
|
||||
- 删除逻辑集群、共享集群、Region 概念
|
||||
|
||||
**2、Cluster 管理**
|
||||
|
||||
- 增加集群概览信息、集群配置变更记录
|
||||
- 增加 Cluster 健康分,健康检查规则支持自定义配置
|
||||
- 增加 Cluster 关键指标统计和 GUI 展示,支持自定义配置
|
||||
- 增加 Cluster 层 I/O、Disk 的 Load Reblance 功能,支持定时均衡任务(企业版)
|
||||
- 删除限流、鉴权功能
|
||||
- 删除 APPID 概念
|
||||
|
||||
**3、Broker 管理**
|
||||
|
||||
- 增加 Broker 健康分
|
||||
- 增加 Broker 关键指标统计和 GUI 展示,支持自定义配置
|
||||
- 增加 Broker 参数配置功能,需重启生效
|
||||
- 增加 Controller 变更记录
|
||||
- 增加 Broker Datalogs 记录
|
||||
- 删除 Leader Rebalance 功能
|
||||
- 删除 Broker 优先副本选举
|
||||
|
||||
**4、Topic 管理**
|
||||
|
||||
- 增加 Topic 健康分
|
||||
- 增加 Topic 关键指标统计和 GUI 展示,支持自定义配置
|
||||
- 增加 Topic 参数配置功能,可实时生效
|
||||
- 增加 Topic 批量迁移、Topic 批量扩缩副本功能
|
||||
- 增加查看系统 Topic 功能
|
||||
- 优化 Partition 分布的 GUI 展示
|
||||
- 优化 Topic Message 数据采样
|
||||
- 删除 Topic 过期概念
|
||||
- 删除 Topic 申请配额功能
|
||||
|
||||
**5、Consumer 管理**
|
||||
|
||||
- 优化了 ConsumerGroup 展示形式,增加 Consumer Lag 的 GUI 展示
|
||||
|
||||
**6、ACL 管理**
|
||||
|
||||
- 增加原生 ACL GUI 配置功能,可配置生产、消费、自定义多种组合权限
|
||||
- 增加 KafkaUser 功能,可自定义新增 KafkaUser
|
||||
|
||||
**7、消息测试(企业版)**
|
||||
|
||||
- 增加生产者消息模拟器,支持 Data、Flow、Header、Options 自定义配置(企业版)
|
||||
- 增加消费者消息模拟器,支持 Data、Flow、Header、Options 自定义配置(企业版)
|
||||
|
||||
**8、Job**
|
||||
|
||||
- 优化 Job 模块,支持任务进度管理
|
||||
|
||||
**9、系统管理**
|
||||
|
||||
- 优化用户、角色管理体系,支持自定义角色配置页面及操作权限
|
||||
- 优化审计日志信息
|
||||
- 删除多租户体系
|
||||
- 删除工单流程
|
||||
|
||||
---
|
||||
|
||||
## v2.6.0
|
||||
|
||||
版本上线时间:2022-01-24
|
||||
|
||||
### 能力提升
|
||||
- 增加简单回退工具类
|
||||
|
||||
### 体验优化
|
||||
- 补充周期任务说明文档
|
||||
- 补充集群安装部署使用说明文档
|
||||
- 升级Swagger、SpringFramework、SpringBoot、EChats版本
|
||||
- 优化Task模块的日志输出
|
||||
- 优化corn表达式解析失败后退出无任何日志提示问题
|
||||
- Ldap用户接入时,增加部门及邮箱信息等
|
||||
- 对Jmx模块,增加连接失败后的回退机制及错误日志优化
|
||||
- 增加线程池、客户端池可配置
|
||||
- 删除无用的jmx_prometheus_javaagent-0.14.0.jar
|
||||
- 优化迁移任务名称
|
||||
- 优化创建Region时,Region容量信息不能立即被更新问题
|
||||
- 引入lombok
|
||||
- 更新视频教程
|
||||
- 优化kcm_script.sh脚本中的LogiKM地址为可通过程序传入
|
||||
- 第三方接口及网关接口,增加是否跳过登录的开关
|
||||
- extends模块相关配置调整为非必须在application.yml中配置
|
||||
|
||||
### bug修复
|
||||
- 修复批量往DB写入空指标数组时报SQL语法异常的问题
|
||||
- 修复网关增加配置及修改配置时,version不变化问题
|
||||
- 修复集群列表页,提示框遮挡问题
|
||||
- 修复对高版本Broker元信息协议解析失败的问题
|
||||
- 修复Dockerfile执行时提示缺少application.yml文件的问题
|
||||
- 修复逻辑集群更新时,会报空指针的问题
|
||||
|
||||
|
||||
## v2.5.0
|
||||
|
||||
版本上线时间:2021-07-10
|
||||
|
||||
### 体验优化
|
||||
- 更改产品名为LogiKM
|
||||
- 更新产品图标
|
||||
|
||||
|
||||
## v2.4.1+
|
||||
|
||||
版本上线时间:2021-05-21
|
||||
|
||||
### 能力提升
|
||||
- 增加直接增加权限和配额的接口(v2.4.1)
|
||||
- 增加接口调用可绕过登录的功能(v2.4.1)
|
||||
|
||||
### 体验优化
|
||||
- Tomcat 版本提升至8.5.66(v2.4.2)
|
||||
- op接口优化,拆分util接口为topic、leader两类接口(v2.4.1)
|
||||
- 简化Gateway配置的Key长度(v2.4.1)
|
||||
|
||||
### bug修复
|
||||
- 修复页面展示版本错误问题(v2.4.2)
|
||||
|
||||
|
||||
## v2.4.0
|
||||
|
||||
版本上线时间:2021-05-18
|
||||
|
||||
|
||||
### 能力提升
|
||||
|
||||
- 增加App与Topic自动化审批开关
|
||||
- Broker元信息中增加Rack信息
|
||||
- 升级MySQL 驱动,支持MySQL 8+
|
||||
- 增加操作记录查询界面
|
||||
|
||||
### 体验优化
|
||||
|
||||
- FAQ告警组说明优化
|
||||
- 用户手册共享及 独享集群概念优化
|
||||
- 用户管理界面,前端限制用户删除自己
|
||||
|
||||
### bug修复
|
||||
|
||||
- 修复op-util类中创建Topic失败的接口
|
||||
- 周期同步Topic到DB的任务修复,将Topic列表查询从缓存调整为直接查DB
|
||||
- 应用下线审批失败的功能修复,将权限为0(无权限)的数据进行过滤
|
||||
- 修复登录及权限绕过的漏洞
|
||||
- 修复研发角色展示接入集群、暂停监控等按钮的问题
|
||||
|
||||
|
||||
## v2.3.0
|
||||
|
||||
版本上线时间:2021-02-08
|
||||
|
||||
|
||||
### 能力提升
|
||||
|
||||
- 新增支持docker化部署
|
||||
- 可指定Broker作为候选controller
|
||||
- 可新增并管理网关配置
|
||||
- 可获取消费组状态
|
||||
- 增加集群的JMX认证
|
||||
|
||||
### 体验优化
|
||||
|
||||
- 优化编辑用户角色、修改密码的流程
|
||||
- 新增consumerID的搜索功能
|
||||
- 优化“Topic连接信息”、“消费组重置消费偏移”、“修改Topic保存时间”的文案提示
|
||||
- 在相应位置增加《资源申请文档》链接
|
||||
|
||||
### bug修复
|
||||
|
||||
- 修复Broker监控图表时间轴展示错误的问题
|
||||
- 修复创建夜莺监控告警规则时,使用的告警周期的单位不正确的问题
|
||||
|
||||
|
||||
|
||||
## v2.2.0
|
||||
|
||||
版本上线时间:2021-01-25
|
||||
|
||||
|
||||
|
||||
### 能力提升
|
||||
|
||||
- 优化工单批量操作流程
|
||||
- 增加获取Topic75分位/99分位的实时耗时数据
|
||||
- 增加定时任务,可将无主未落DB的Topic定期写入DB
|
||||
|
||||
### 体验优化
|
||||
|
||||
- 在相应位置增加《集群接入文档》链接
|
||||
- 优化物理集群、逻辑集群含义
|
||||
- 在Topic详情页、Topic扩分区操作弹窗增加展示Topic所属Region的信息
|
||||
- 优化Topic审批时,Topic数据保存时间的配置流程
|
||||
- 优化Topic/应用申请、审批时的错误提示文案
|
||||
- 优化Topic数据采样的操作项文案
|
||||
- 优化运维人员删除Topic时的提示文案
|
||||
- 优化运维人员删除Region的删除逻辑与提示文案
|
||||
- 优化运维人员删除逻辑集群的提示文案
|
||||
- 优化上传集群配置文件时的文件类型限制条件
|
||||
|
||||
### bug修复
|
||||
|
||||
- 修复填写应用名称时校验特殊字符出错的问题
|
||||
- 修复普通用户越权访问应用详情的问题
|
||||
- 修复由于Kafka版本升级,导致的数据压缩格式无法获取的问题
|
||||
- 修复删除逻辑集群或Topic之后,界面依旧展示的问题
|
||||
- 修复进行Leader rebalance操作时执行结果重复提示的问题
|
||||
|
||||
|
||||
## v2.1.0
|
||||
|
||||
版本上线时间:2020-12-19
|
||||
|
||||
|
||||
|
||||
### 体验优化
|
||||
|
||||
- 优化页面加载时的背景样式
|
||||
- 优化普通用户申请Topic权限的流程
|
||||
- 优化Topic申请配额、申请分区的权限限制
|
||||
- 优化取消Topic权限的文案提示
|
||||
- 优化申请配额表单的表单项名称
|
||||
- 优化重置消费偏移的操作流程
|
||||
- 优化创建Topic迁移任务的表单内容
|
||||
- 优化Topic扩分区操作的弹窗界面样式
|
||||
- 优化集群Broker监控可视化图表样式
|
||||
- 优化创建逻辑集群的表单内容
|
||||
- 优化集群安全协议的提示文案
|
||||
|
||||
### bug修复
|
||||
|
||||
- 修复偶发性重置消费偏移失败的问题
|
||||
|
||||
|
||||
|
||||
|
||||
1036
bin/init_es_template.sh
Normal file
1036
bin/init_es_template.sh
Normal file
File diff suppressed because it is too large
Load Diff
16
bin/shutdown.sh
Normal file
16
bin/shutdown.sh
Normal file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd `dirname $0`/../libs
|
||||
target_dir=`pwd`
|
||||
|
||||
pid=`ps ax | grep -i 'ks-km' | grep ${target_dir} | grep java | grep -v grep | awk '{print $1}'`
|
||||
if [ -z "$pid" ] ; then
|
||||
echo "No ks-km running."
|
||||
exit -1;
|
||||
fi
|
||||
|
||||
echo "The ks-km (${pid}) is running..."
|
||||
|
||||
kill ${pid}
|
||||
|
||||
echo "Send shutdown request to ks-km (${pid}) OK"
|
||||
82
bin/startup.sh
Normal file
82
bin/startup.sh
Normal file
@@ -0,0 +1,82 @@
|
||||
error_exit ()
|
||||
{
|
||||
echo "ERROR: $1 !!"
|
||||
exit 1
|
||||
}
|
||||
|
||||
[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=$HOME/jdk/java
|
||||
[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=/usr/java
|
||||
[ ! -e "$JAVA_HOME/bin/java" ] && unset JAVA_HOME
|
||||
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
if [ "Darwin" = "$(uname -s)" ]; then
|
||||
|
||||
if [ -x '/usr/libexec/java_home' ] ; then
|
||||
export JAVA_HOME=`/usr/libexec/java_home`
|
||||
|
||||
elif [ -d "/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home" ]; then
|
||||
export JAVA_HOME="/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home"
|
||||
fi
|
||||
else
|
||||
JAVA_PATH=`dirname $(readlink -f $(which javac))`
|
||||
if [ "x$JAVA_PATH" != "x" ]; then
|
||||
export JAVA_HOME=`dirname $JAVA_PATH 2>/dev/null`
|
||||
fi
|
||||
fi
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
error_exit "Please set the JAVA_HOME variable in your environment, We need java(x64)! jdk8 or later is better!"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
|
||||
|
||||
export WEB_SERVER="ks-km"
|
||||
export JAVA_HOME
|
||||
export JAVA="$JAVA_HOME/bin/java"
|
||||
export BASE_DIR=`cd $(dirname $0)/..; pwd`
|
||||
export CUSTOM_SEARCH_LOCATIONS=file:${BASE_DIR}/conf/
|
||||
|
||||
|
||||
#===========================================================================================
|
||||
# JVM Configuration
|
||||
#===========================================================================================
|
||||
|
||||
JAVA_OPT="${JAVA_OPT} -server -Xms2g -Xmx2g -Xmn1g -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=320m"
|
||||
JAVA_OPT="${JAVA_OPT} -XX:-OmitStackTraceInFastThrow -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${BASE_DIR}/logs/java_heapdump.hprof"
|
||||
|
||||
## jdk版本高的情况 有些 参数废弃了
|
||||
JAVA_MAJOR_VERSION=$($JAVA -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p')
|
||||
if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then
|
||||
JAVA_OPT="${JAVA_OPT} -Xlog:gc*:file=${BASE_DIR}/logs/km_gc.log:time,tags:filecount=10,filesize=102400"
|
||||
else
|
||||
JAVA_OPT="${JAVA_OPT} -Djava.ext.dirs=${JAVA_HOME}/jre/lib/ext:${JAVA_HOME}/lib/ext"
|
||||
JAVA_OPT="${JAVA_OPT} -Xloggc:${BASE_DIR}/logs/km_gc.log -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
|
||||
|
||||
fi
|
||||
|
||||
JAVA_OPT="${JAVA_OPT} -jar ${BASE_DIR}/libs/${WEB_SERVER}.jar"
|
||||
JAVA_OPT="${JAVA_OPT} --spring.config.additional-location=${CUSTOM_SEARCH_LOCATIONS}"
|
||||
JAVA_OPT="${JAVA_OPT} --logging.config=${BASE_DIR}/conf/logback-spring.xml"
|
||||
JAVA_OPT="${JAVA_OPT} --server.max-http-header-size=524288"
|
||||
|
||||
|
||||
|
||||
if [ ! -d "${BASE_DIR}/logs" ]; then
|
||||
mkdir ${BASE_DIR}/logs
|
||||
fi
|
||||
|
||||
echo "$JAVA ${JAVA_OPT}"
|
||||
|
||||
# check the start.out log output file
|
||||
if [ ! -f "${BASE_DIR}/logs/start.out" ]; then
|
||||
touch "${BASE_DIR}/logs/start.out"
|
||||
fi
|
||||
|
||||
# start
|
||||
echo -e "---- 启动脚本 ------\n $JAVA ${JAVA_OPT}" > ${BASE_DIR}/logs/start.out 2>&1 &
|
||||
|
||||
|
||||
nohup $JAVA ${JAVA_OPT} >> ${BASE_DIR}/logs/start.out 2>&1 &
|
||||
|
||||
echo "${WEB_SERVER} is starting,you can check the ${BASE_DIR}/logs/start.out"
|
||||
@@ -1,50 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.xiaojukeji.kafka</groupId>
|
||||
<artifactId>kafka-manager-common</artifactId>
|
||||
<version>1.1.0-SNAPSHOT</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<parent>
|
||||
<artifactId>kafka-manager</artifactId>
|
||||
<groupId>com.xiaojukeji.kafka</groupId>
|
||||
<version>1.1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<properties>
|
||||
<kafka-manager.revision>1.0.0-SNAPSHOT</kafka-manager.revision>
|
||||
<maven.test.skip>true</maven.test.skip>
|
||||
<downloadSources>true</downloadSources>
|
||||
<java_source_version>1.8</java_source_version>
|
||||
<java_target_version>1.8</java_target_version>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<file_encoding>UTF-8</file_encoding>
|
||||
</properties>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>commons-beanutils</groupId>
|
||||
<artifactId>commons-beanutils</artifactId>
|
||||
<version>1.9.3</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.curator</groupId>
|
||||
<artifactId>curator-recipes</artifactId>
|
||||
<version>2.10.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.zookeeper</groupId>
|
||||
<artifactId>zookeeper</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.alibaba</groupId>
|
||||
<artifactId>fastjson</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.10</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
@@ -1,21 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/2/28
|
||||
*/
|
||||
public class Constant {
|
||||
public static final String KAFKA_MANAGER_INNER_ERROR = "kafka-manager inner error";
|
||||
|
||||
public final static Map<Integer, List<String>> BROKER_METRICS_TYPE_MBEAN_NAME_MAP = new ConcurrentHashMap<>();
|
||||
|
||||
public final static Map<Integer, List<String>> TOPIC_METRICS_TYPE_MBEAN_NAME_MAP = new ConcurrentHashMap<>();
|
||||
|
||||
public static final String COLLECTOR_METRICS_LOGGER = "COLLECTOR_METRICS_LOGGER";
|
||||
|
||||
public static final String API_METRICS_LOGGER = "API_METRICS_LOGGER";
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant;
|
||||
|
||||
public class MetricsType {
|
||||
/**
|
||||
* Broker流量详情
|
||||
*/
|
||||
public static final int BROKER_FLOW_DETAIL = 0;
|
||||
public static final int BROKER_TO_DB_METRICS = 1; // Broker入DB的Metrics指标
|
||||
public static final int BROKER_REAL_TIME_METRICS = 2; // Broker入DB的Metrics指标
|
||||
public static final int BROKER_OVER_VIEW_METRICS = 3; // Broker状态概览的指标
|
||||
public static final int BROKER_OVER_ALL_METRICS = 4; // Broker状态总揽的指标
|
||||
public static final int BROKER_ANALYSIS_METRICS = 5; // Broker分析的指标
|
||||
public static final int BROKER_TOPIC_ANALYSIS_METRICS = 6; // Broker分析的指标
|
||||
|
||||
/**
|
||||
* Topic流量详情
|
||||
*/
|
||||
public static final int TOPIC_FLOW_DETAIL = 100;
|
||||
public static final int TOPIC_FLOW_OVERVIEW = 101;
|
||||
public static final int TOPIC_METRICS_TO_DB = 102;
|
||||
|
||||
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant;
|
||||
|
||||
/**
|
||||
* @author limeng
|
||||
* @date 2017/11/21
|
||||
*/
|
||||
public enum OffsetStoreLocation {
|
||||
|
||||
ZOOKEEPER("zookeeper"),
|
||||
|
||||
BROKER("broker");
|
||||
|
||||
private final String location;
|
||||
|
||||
OffsetStoreLocation(String location) {
|
||||
this.location = location;
|
||||
}
|
||||
|
||||
public String getLocation() {
|
||||
return location;
|
||||
}
|
||||
|
||||
public static OffsetStoreLocation getOffsetStoreLocation(String location) {
|
||||
if (location == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
for (OffsetStoreLocation offsetStoreLocation: OffsetStoreLocation.values()) {
|
||||
if (offsetStoreLocation.location.equals(location)) {
|
||||
return offsetStoreLocation;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant;
|
||||
|
||||
public class StatusCode {
|
||||
/*
|
||||
* kafka-manager status code: 17000 ~ 17999
|
||||
*
|
||||
* 正常 - 0
|
||||
* 参数错误 - 10000
|
||||
* 资源未就绪 - 10001
|
||||
*/
|
||||
|
||||
/*
|
||||
* 已约定的状态码
|
||||
*/
|
||||
public static final Integer SUCCESS = 0;
|
||||
public static final Integer PARAM_ERROR = 10000; //参数错误
|
||||
public static final Integer RES_UNREADY = 10001; //资源未就绪
|
||||
|
||||
public static final Integer MY_SQL_SELECT_ERROR = 17210; // MySQL 查询数据异常
|
||||
public static final Integer MY_SQL_INSERT_ERROR = 17211; // MySQL 插入数据异常
|
||||
public static final Integer MY_SQL_DELETE_ERROR = 17212; // MySQL 删除数据异常
|
||||
public static final Integer MY_SQL_UPDATE_ERROR = 17213; // MySQL 更新数据异常
|
||||
public static final Integer MY_SQL_REPLACE_ERROR = 17214; // MySQL 替换数据异常
|
||||
|
||||
public static final Integer OPERATION_ERROR = 17300; // 请求操作异常
|
||||
|
||||
|
||||
/**
|
||||
* Topic相关的异常
|
||||
*/
|
||||
public static final Integer TOPIC_EXISTED = 17400; //Topic已经存在了
|
||||
|
||||
public static final Integer PARTIAL_SUCESS = 17700; //操作部分成功
|
||||
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant.monitor;
|
||||
|
||||
import java.util.AbstractMap;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* 条件类型
|
||||
* @author zengqiao
|
||||
* @date 19/5/12
|
||||
*/
|
||||
public enum MonitorConditionType {
|
||||
BIGGER(">", "大于"),
|
||||
EQUAL("=", "等于"),
|
||||
LESS("<", "小于"),
|
||||
NOT_EQUAL("!=", "不等于");
|
||||
|
||||
private String name;
|
||||
|
||||
private String message;
|
||||
|
||||
MonitorConditionType(String name, String message) {
|
||||
this.name = name;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public static boolean legal(String name) {
|
||||
for (MonitorConditionType elem: MonitorConditionType.values()) {
|
||||
if (elem.name.equals(name)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ConditionType{" +
|
||||
"name='" + name + '\'' +
|
||||
", message='" + message + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
public static List<AbstractMap.SimpleEntry<String, String>> toList() {
|
||||
List<AbstractMap.SimpleEntry<String, String>> conditionTypeList = new ArrayList<>();
|
||||
for (MonitorConditionType elem: MonitorConditionType.values()) {
|
||||
conditionTypeList.add(new AbstractMap.SimpleEntry<>(elem.name, elem.message));
|
||||
}
|
||||
return conditionTypeList;
|
||||
}
|
||||
|
||||
/**
|
||||
* 计算 operation(data1, data2) 是否为true
|
||||
* @param data1
|
||||
* @param data2
|
||||
* @param operation
|
||||
* @author zengqiao
|
||||
* @date 19/5/12
|
||||
* @return boolean
|
||||
*/
|
||||
public static boolean matchCondition(Double data1, Double data2, String operation) {
|
||||
switch (operation) {
|
||||
case ">": return data1 > data2;
|
||||
case "<": return data1 < data2;
|
||||
case "=": return data1.equals(data2);
|
||||
case "!=": return !data1.equals(data2);
|
||||
default:
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant.monitor;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/3/18
|
||||
*/
|
||||
public enum MonitorMatchStatus {
|
||||
UNKNOWN(0),
|
||||
|
||||
YES(1),
|
||||
|
||||
NO(2);
|
||||
|
||||
public Integer status;
|
||||
|
||||
MonitorMatchStatus(Integer status) {
|
||||
this.status = status;
|
||||
}
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant.monitor;
|
||||
|
||||
import java.util.AbstractMap;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* 指标类型
|
||||
* @author zengqiao
|
||||
* @date 19/5/12
|
||||
*/
|
||||
public enum MonitorMetricsType {
|
||||
BYTES_IN("BytesIn", "流入流量"),
|
||||
BYTES_OUT("BytesOut", "流出流量"),
|
||||
LAG("Lag", "消费组Lag");
|
||||
|
||||
private String name;
|
||||
|
||||
private String message;
|
||||
|
||||
MonitorMetricsType(String name, String message) {
|
||||
this.name = name;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public static boolean legal(String name) {
|
||||
for (MonitorMetricsType elem: MonitorMetricsType.values()) {
|
||||
if (elem.name.equals(name)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "MetricType{" +
|
||||
"name='" + name + '\'' +
|
||||
", message='" + message + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
public static List<AbstractMap.SimpleEntry<String, String>> toList() {
|
||||
List<AbstractMap.SimpleEntry<String, String>> metricTypeList = new ArrayList<>();
|
||||
for (MonitorMetricsType elem: MonitorMetricsType.values()) {
|
||||
metricTypeList.add(new AbstractMap.SimpleEntry<>(elem.name, elem.message));
|
||||
}
|
||||
return metricTypeList;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant.monitor;
|
||||
|
||||
import java.util.AbstractMap;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* 通知类型
|
||||
* @author huangyiminghappy@163.com
|
||||
* @date 2019-05-06
|
||||
*/
|
||||
public enum MonitorNotifyType {
|
||||
KAFKA_MESSAGE("KAFKA", "告警发送到KAFKA");
|
||||
|
||||
String name;
|
||||
|
||||
String message;
|
||||
|
||||
MonitorNotifyType(String name, String message){
|
||||
this.name = name;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
public static boolean legal(String name) {
|
||||
for (MonitorNotifyType elem: MonitorNotifyType.values()) {
|
||||
if (elem.name.equals(name)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "NotifyType{" +
|
||||
"name='" + name + '\'' +
|
||||
", message='" + message + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
public static List<AbstractMap.SimpleEntry<String, String>> toList() {
|
||||
List<AbstractMap.SimpleEntry<String, String>> notifyTypeList = new ArrayList<>();
|
||||
for (MonitorNotifyType elem: MonitorNotifyType.values()) {
|
||||
notifyTypeList.add(new AbstractMap.SimpleEntry<>(elem.name, elem.message));
|
||||
}
|
||||
return notifyTypeList;
|
||||
}
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity;
|
||||
|
||||
import kafka.admin.AdminClient;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/5/14
|
||||
*/
|
||||
public class ConsumerMetadata {
|
||||
private Set<String> consumerGroupSet = new HashSet<>();
|
||||
|
||||
private Map<String, Set<String>> topicNameConsumerGroupMap = new HashMap<>();
|
||||
|
||||
private Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap = new HashMap<>();
|
||||
|
||||
public ConsumerMetadata(Set<String> consumerGroupSet,
|
||||
Map<String, Set<String>> topicNameConsumerGroupMap,
|
||||
Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap) {
|
||||
this.consumerGroupSet = consumerGroupSet;
|
||||
this.topicNameConsumerGroupMap = topicNameConsumerGroupMap;
|
||||
this.consumerGroupSummaryMap = consumerGroupSummaryMap;
|
||||
}
|
||||
|
||||
public Set<String> getConsumerGroupSet() {
|
||||
return consumerGroupSet;
|
||||
}
|
||||
|
||||
public Map<String, Set<String>> getTopicNameConsumerGroupMap() {
|
||||
return topicNameConsumerGroupMap;
|
||||
}
|
||||
|
||||
public Map<String, AdminClient.ConsumerGroupSummary> getConsumerGroupSummaryMap() {
|
||||
return consumerGroupSummaryMap;
|
||||
}
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity;
|
||||
|
||||
/**
|
||||
* ConsumerMetrics
|
||||
* @author tukun
|
||||
* @date 2015/11/12
|
||||
*/
|
||||
public class ConsumerMetrics {
|
||||
private Long clusterId;
|
||||
|
||||
private String topicName;
|
||||
|
||||
private String consumerGroup;
|
||||
|
||||
private String location;
|
||||
|
||||
private Long sumLag;
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public String getTopicName() {
|
||||
return topicName;
|
||||
}
|
||||
|
||||
public void setTopicName(String topicName) {
|
||||
this.topicName = topicName;
|
||||
}
|
||||
|
||||
public String getConsumerGroup() {
|
||||
return consumerGroup;
|
||||
}
|
||||
|
||||
public void setConsumerGroup(String consumerGroup) {
|
||||
this.consumerGroup = consumerGroup;
|
||||
}
|
||||
|
||||
public String getLocation() {
|
||||
return location;
|
||||
}
|
||||
|
||||
public void setLocation(String location) {
|
||||
this.location = location;
|
||||
}
|
||||
|
||||
public Long getSumLag() {
|
||||
return sumLag;
|
||||
}
|
||||
|
||||
public void setSumLag(Long sumLag) {
|
||||
this.sumLag = sumLag;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ConsumerMetrics{" +
|
||||
"clusterId=" + clusterId +
|
||||
", topicName='" + topicName + '\'' +
|
||||
", consumerGroup='" + consumerGroup + '\'' +
|
||||
", location='" + location + '\'' +
|
||||
", sumLag=" + sumLag +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.StatusCode;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* @author huangyiminghappy@163.com
|
||||
* @date 2019-07-08
|
||||
*/
|
||||
public class Result<T> implements Serializable {
|
||||
private static final long serialVersionUID = -2772975319944108658L;
|
||||
|
||||
private T data;
|
||||
private String message;
|
||||
private Integer code;
|
||||
|
||||
public Result(T data) {
|
||||
this.data = data;
|
||||
this.code = StatusCode.SUCCESS;
|
||||
this.message = "成功";
|
||||
}
|
||||
|
||||
public Result() {
|
||||
this(null);
|
||||
}
|
||||
|
||||
public Result(Integer code, String message) {
|
||||
this.message = message;
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
public Result(Integer code, T data, String message) {
|
||||
this.data = data;
|
||||
this.message = message;
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
|
||||
public T getData()
|
||||
{
|
||||
return (T)this.data;
|
||||
}
|
||||
|
||||
public void setData(T data)
|
||||
{
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
public String getMessage()
|
||||
{
|
||||
return this.message;
|
||||
}
|
||||
|
||||
public void setMessage(String message)
|
||||
{
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public Integer getCode()
|
||||
{
|
||||
return this.code;
|
||||
}
|
||||
|
||||
public void setCode(Integer code)
|
||||
{
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
return JSON.toJSONString(this);
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.annotations;
|
||||
|
||||
import java.lang.annotation.Documented;
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
import static java.lang.annotation.RetentionPolicy.RUNTIME;
|
||||
|
||||
/**
|
||||
* FieldSelector
|
||||
* @author huangyiminghappy@163.com
|
||||
* @date 2019-06-19
|
||||
*/
|
||||
@Target(ElementType.FIELD)
|
||||
@Retention(RUNTIME)
|
||||
@Documented
|
||||
public @interface FieldSelector {
|
||||
//注解的属性
|
||||
String name() default "";
|
||||
|
||||
int[] types() default {};
|
||||
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
|
||||
|
||||
/**
|
||||
* 用户角色
|
||||
* @author zengqiao_cn@163.com
|
||||
* @date 19/4/15
|
||||
*/
|
||||
public enum AccountRoleEnum {
|
||||
UNKNOWN(-1),
|
||||
|
||||
NORMAL(0),
|
||||
|
||||
SRE(1),
|
||||
|
||||
ADMIN(2);
|
||||
|
||||
private Integer role;
|
||||
|
||||
AccountRoleEnum(Integer role) {
|
||||
this.role = role;
|
||||
}
|
||||
|
||||
public Integer getRole() {
|
||||
return role;
|
||||
}
|
||||
|
||||
public static AccountRoleEnum getUserRoleEnum(Integer role) {
|
||||
for (AccountRoleEnum elem: AccountRoleEnum.values()) {
|
||||
if (elem.getRole().equals(role)) {
|
||||
return elem;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
|
||||
|
||||
/**
|
||||
* 操作Topic的状态
|
||||
* @author zengqiao
|
||||
* @date 19/11/26
|
||||
*/
|
||||
public enum AdminTopicStatusEnum {
|
||||
SUCCESS(0, "成功"),
|
||||
REPLACE_DB_FAILED(1, "更新DB失败"),
|
||||
PARAM_NULL_POINTER(2, "参数错误"),
|
||||
PARTITION_NUM_ILLEGAL(3, "分区数错误"),
|
||||
BROKER_NUM_NOT_ENOUGH(4, "Broker数不足错误"),
|
||||
TOPIC_NAME_ILLEGAL(5, "Topic名称非法"),
|
||||
TOPIC_EXISTED(6, "Topic已存在"),
|
||||
UNKNOWN_TOPIC_PARTITION(7, "Topic未知"),
|
||||
TOPIC_CONFIG_ILLEGAL(8, "Topic配置错误"),
|
||||
TOPIC_IN_DELETING(9, "Topic正在删除"),
|
||||
UNKNOWN_ERROR(10, "未知错误");
|
||||
|
||||
private Integer code;
|
||||
|
||||
private String message;
|
||||
|
||||
AdminTopicStatusEnum(Integer code, String message) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
|
||||
|
||||
/**
|
||||
* DBStatus状态含义
|
||||
* @author zengqiao_cn@163.com
|
||||
* @date 19/4/15
|
||||
*/
|
||||
public enum DBStatusEnum {
|
||||
/**
|
||||
* 逻辑删除
|
||||
*/
|
||||
DELETED(-1),
|
||||
|
||||
/**
|
||||
* 普通
|
||||
*/
|
||||
NORMAL(0),
|
||||
|
||||
/**
|
||||
* 已完成并通过
|
||||
*/
|
||||
PASSED(1);
|
||||
|
||||
private Integer status;
|
||||
|
||||
DBStatusEnum(Integer status) {
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public Integer getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public static DBStatusEnum getDBStatusEnum(Integer status) {
|
||||
for (DBStatusEnum elem: DBStatusEnum.values()) {
|
||||
if (elem.getStatus().equals(status)) {
|
||||
return elem;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
|
||||
|
||||
/**
|
||||
* 操作类型
|
||||
* @author zengqiao
|
||||
* @date 19/11/21
|
||||
*/
|
||||
public enum OperationEnum {
|
||||
CREATE_TOPIC("create_topic"),
|
||||
DELETE_TOPIC("delete_topic"),
|
||||
MODIFY_TOPIC_CONFIG("modify_topic_config"),
|
||||
EXPAND_TOPIC_PARTITION("expand_topic_partition");
|
||||
|
||||
public String message;
|
||||
|
||||
OperationEnum(String message) {
|
||||
this.message = message;
|
||||
}
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
|
||||
|
||||
public enum OrderStatusEnum {
|
||||
WAIT_DEAL(0, "待处理"),
|
||||
|
||||
PASSED(1, "通过"),
|
||||
|
||||
REFUSED(2, "拒绝"),
|
||||
|
||||
CANCELLED(3, "取消");
|
||||
|
||||
private Integer code;
|
||||
|
||||
private String message;
|
||||
|
||||
OrderStatusEnum(Integer code, String message) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
|
||||
|
||||
/**
|
||||
* 工单类型
|
||||
* @author zengqiao
|
||||
* @date 19/6/23
|
||||
*/
|
||||
public enum OrderTypeEnum {
|
||||
UNKNOWN(-1),
|
||||
|
||||
APPLY_TOPIC(0),
|
||||
|
||||
APPLY_PARTITION(1);
|
||||
|
||||
private Integer code;
|
||||
|
||||
OrderTypeEnum(Integer code) {
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public static OrderTypeEnum getOrderTypeEnum(Integer code) {
|
||||
for (OrderTypeEnum elem: OrderTypeEnum.values()) {
|
||||
if (elem.getCode().equals(code)) {
|
||||
return elem;
|
||||
}
|
||||
}
|
||||
return OrderTypeEnum.UNKNOWN;
|
||||
}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
|
||||
|
||||
/**
|
||||
* 优先副本选举状态
|
||||
* @author zengqiao
|
||||
* @date 2017/6/29.
|
||||
*/
|
||||
public enum PreferredReplicaElectEnum {
|
||||
SUCCESS(0, "成功[创建成功|执行成功]"),
|
||||
RUNNING(1, "正在执行"),
|
||||
ALREADY_EXIST(2, "任务已存在"),
|
||||
PARAM_ILLEGAL(3, "参数错误"),
|
||||
UNKNOWN(4, "进度未知");
|
||||
|
||||
private Integer code;
|
||||
|
||||
private String message;
|
||||
|
||||
PreferredReplicaElectEnum(Integer code, String message) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
|
||||
|
||||
/**
|
||||
* 迁移状态
|
||||
* @author zengqiao
|
||||
* @date 19/12/29
|
||||
*/
|
||||
public enum ReassignmentStatusEnum {
|
||||
WAITING(0, "等待执行"),
|
||||
RUNNING(1, "正在执行"),
|
||||
SUCCESS(2, "迁移成功"),
|
||||
FAILED(3, "迁移失败"),
|
||||
CANCELED(4, "取消任务");
|
||||
|
||||
private Integer code;
|
||||
|
||||
private String message;
|
||||
|
||||
ReassignmentStatusEnum(Integer code, String message) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
public static boolean triggerTask(Integer status) {
|
||||
if (WAITING.code.equals(status) || RUNNING.code.equals(status)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public static boolean cancelTask(Integer status) {
|
||||
if (WAITING.code.equals(status)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto;
|
||||
|
||||
/**
|
||||
* Broker基本信息
|
||||
* @author zengqiao_cn@163.com
|
||||
* @date 19/4/8
|
||||
*/
|
||||
public class BrokerBasicDTO {
|
||||
private String host;
|
||||
|
||||
private Integer port;
|
||||
|
||||
private Integer jmxPort;
|
||||
|
||||
private Integer topicNum;
|
||||
|
||||
private Integer partitionCount;
|
||||
|
||||
private Long startTime;
|
||||
|
||||
private Integer leaderCount;
|
||||
|
||||
public String getHost() {
|
||||
return host;
|
||||
}
|
||||
|
||||
public void setHost(String host) {
|
||||
this.host = host;
|
||||
}
|
||||
|
||||
public Integer getPort() {
|
||||
return port;
|
||||
}
|
||||
|
||||
public void setPort(Integer port) {
|
||||
this.port = port;
|
||||
}
|
||||
|
||||
public Integer getJmxPort() {
|
||||
return jmxPort;
|
||||
}
|
||||
|
||||
public void setJmxPort(Integer jmxPort) {
|
||||
this.jmxPort = jmxPort;
|
||||
}
|
||||
|
||||
public Integer getTopicNum() {
|
||||
return topicNum;
|
||||
}
|
||||
|
||||
public void setTopicNum(Integer topicNum) {
|
||||
this.topicNum = topicNum;
|
||||
}
|
||||
|
||||
public Integer getPartitionCount() {
|
||||
return partitionCount;
|
||||
}
|
||||
|
||||
public void setPartitionCount(Integer partitionCount) {
|
||||
this.partitionCount = partitionCount;
|
||||
}
|
||||
|
||||
public Long getStartTime() {
|
||||
return startTime;
|
||||
}
|
||||
|
||||
public void setStartTime(Long startTime) {
|
||||
this.startTime = startTime;
|
||||
}
|
||||
|
||||
public Integer getLeaderCount() {
|
||||
return leaderCount;
|
||||
}
|
||||
|
||||
public void setLeaderCount(Integer leaderCount) {
|
||||
this.leaderCount = leaderCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "BrokerBasicInfoDTO{" +
|
||||
"host='" + host + '\'' +
|
||||
", port=" + port +
|
||||
", jmxPort=" + jmxPort +
|
||||
", topicNum=" + topicNum +
|
||||
", partitionCount=" + partitionCount +
|
||||
", startTime=" + startTime +
|
||||
", leaderCount=" + leaderCount +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,132 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.BrokerMetadata;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/4/21
|
||||
*/
|
||||
public class BrokerOverallDTO {
|
||||
private Integer brokerId;
|
||||
|
||||
private String host;
|
||||
|
||||
private Integer port;
|
||||
|
||||
private Integer jmxPort;
|
||||
|
||||
private Long startTime;
|
||||
|
||||
private Integer partitionCount;
|
||||
|
||||
private Integer underReplicatedPartitions;
|
||||
|
||||
private Integer leaderCount;
|
||||
|
||||
private Double bytesInPerSec;
|
||||
|
||||
public Integer getBrokerId() {
|
||||
return brokerId;
|
||||
}
|
||||
|
||||
public void setBrokerId(Integer brokerId) {
|
||||
this.brokerId = brokerId;
|
||||
}
|
||||
|
||||
public String getHost() {
|
||||
return host;
|
||||
}
|
||||
|
||||
public void setHost(String host) {
|
||||
this.host = host;
|
||||
}
|
||||
|
||||
public Integer getPort() {
|
||||
return port;
|
||||
}
|
||||
|
||||
public void setPort(Integer port) {
|
||||
this.port = port;
|
||||
}
|
||||
|
||||
public Integer getJmxPort() {
|
||||
return jmxPort;
|
||||
}
|
||||
|
||||
public void setJmxPort(Integer jmxPort) {
|
||||
this.jmxPort = jmxPort;
|
||||
}
|
||||
|
||||
public Long getStartTime() {
|
||||
return startTime;
|
||||
}
|
||||
|
||||
public void setStartTime(Long startTime) {
|
||||
this.startTime = startTime;
|
||||
}
|
||||
|
||||
public Integer getPartitionCount() {
|
||||
return partitionCount;
|
||||
}
|
||||
|
||||
public void setPartitionCount(Integer partitionCount) {
|
||||
this.partitionCount = partitionCount;
|
||||
}
|
||||
|
||||
public Integer getUnderReplicatedPartitions() {
|
||||
return underReplicatedPartitions;
|
||||
}
|
||||
|
||||
public void setUnderReplicatedPartitions(Integer underReplicatedPartitions) {
|
||||
this.underReplicatedPartitions = underReplicatedPartitions;
|
||||
}
|
||||
|
||||
public Integer getLeaderCount() {
|
||||
return leaderCount;
|
||||
}
|
||||
|
||||
public void setLeaderCount(Integer leaderCount) {
|
||||
this.leaderCount = leaderCount;
|
||||
}
|
||||
|
||||
public Double getBytesInPerSec() {
|
||||
return bytesInPerSec;
|
||||
}
|
||||
|
||||
public void setBytesInPerSec(Double bytesInPerSec) {
|
||||
this.bytesInPerSec = bytesInPerSec;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "BrokerOverallDTO{" +
|
||||
"brokerId=" + brokerId +
|
||||
", host='" + host + '\'' +
|
||||
", port=" + port +
|
||||
", jmxPort=" + jmxPort +
|
||||
", startTime=" + startTime +
|
||||
", partitionCount=" + partitionCount +
|
||||
", underReplicatedPartitions=" + underReplicatedPartitions +
|
||||
", leaderCount=" + leaderCount +
|
||||
", bytesInPerSec=" + bytesInPerSec +
|
||||
'}';
|
||||
}
|
||||
|
||||
public static BrokerOverallDTO newInstance(BrokerMetadata brokerMetadata, BrokerMetrics brokerMetrics) {
|
||||
BrokerOverallDTO brokerOverallDTO = new BrokerOverallDTO();
|
||||
brokerOverallDTO.setBrokerId(brokerMetadata.getBrokerId());
|
||||
brokerOverallDTO.setHost(brokerMetadata.getHost());
|
||||
brokerOverallDTO.setPort(brokerMetadata.getPort());
|
||||
brokerOverallDTO.setJmxPort(brokerMetadata.getJmxPort());
|
||||
brokerOverallDTO.setStartTime(brokerMetadata.getTimestamp());
|
||||
if (brokerMetrics == null) {
|
||||
return brokerOverallDTO;
|
||||
}
|
||||
brokerOverallDTO.setPartitionCount(brokerMetrics.getPartitionCount());
|
||||
brokerOverallDTO.setLeaderCount(brokerMetrics.getLeaderCount());
|
||||
brokerOverallDTO.setBytesInPerSec(brokerMetrics.getBytesInPerSec());
|
||||
brokerOverallDTO.setUnderReplicatedPartitions(brokerMetrics.getUnderReplicatedPartitions());
|
||||
return brokerOverallDTO;
|
||||
}
|
||||
}
|
||||
@@ -1,121 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.BrokerMetadata;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.bizenum.DBStatusEnum;
|
||||
|
||||
/**
|
||||
* @author zengqiao_cn@163.com
|
||||
* @date 19/4/21
|
||||
*/
|
||||
public class BrokerOverviewDTO {
|
||||
private Integer brokerId;
|
||||
|
||||
private String host;
|
||||
|
||||
private Integer port;
|
||||
|
||||
private Integer jmxPort;
|
||||
|
||||
private Long startTime;
|
||||
|
||||
private Double byteIn;
|
||||
|
||||
private Double byteOut;
|
||||
|
||||
private Integer status;
|
||||
|
||||
public Integer getBrokerId() {
|
||||
return brokerId;
|
||||
}
|
||||
|
||||
public void setBrokerId(Integer brokerId) {
|
||||
this.brokerId = brokerId;
|
||||
}
|
||||
|
||||
public String getHost() {
|
||||
return host;
|
||||
}
|
||||
|
||||
public void setHost(String host) {
|
||||
this.host = host;
|
||||
}
|
||||
|
||||
public Integer getPort() {
|
||||
return port;
|
||||
}
|
||||
|
||||
public void setPort(Integer port) {
|
||||
this.port = port;
|
||||
}
|
||||
|
||||
public Integer getJmxPort() {
|
||||
return jmxPort;
|
||||
}
|
||||
|
||||
public void setJmxPort(Integer jmxPort) {
|
||||
this.jmxPort = jmxPort;
|
||||
}
|
||||
|
||||
public Long getStartTime() {
|
||||
return startTime;
|
||||
}
|
||||
|
||||
public void setStartTime(Long startTime) {
|
||||
this.startTime = startTime;
|
||||
}
|
||||
|
||||
public Double getByteIn() {
|
||||
return byteIn;
|
||||
}
|
||||
|
||||
public void setByteIn(Double byteIn) {
|
||||
this.byteIn = byteIn;
|
||||
}
|
||||
|
||||
public Double getByteOut() {
|
||||
return byteOut;
|
||||
}
|
||||
|
||||
public void setByteOut(Double byteOut) {
|
||||
this.byteOut = byteOut;
|
||||
}
|
||||
|
||||
public Integer getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public void setStatus(Integer status) {
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "BrokerInfoDTO{" +
|
||||
"brokerId=" + brokerId +
|
||||
", host='" + host + '\'' +
|
||||
", port=" + port +
|
||||
", jmxPort=" + jmxPort +
|
||||
", startTime=" + startTime +
|
||||
", byteIn=" + byteIn +
|
||||
", byteOut=" + byteOut +
|
||||
", status=" + status +
|
||||
'}';
|
||||
}
|
||||
|
||||
public static BrokerOverviewDTO newInstance(BrokerMetadata brokerMetadata, BrokerMetrics brokerMetrics) {
|
||||
BrokerOverviewDTO brokerOverviewDTO = new BrokerOverviewDTO();
|
||||
brokerOverviewDTO.setBrokerId(brokerMetadata.getBrokerId());
|
||||
brokerOverviewDTO.setHost(brokerMetadata.getHost());
|
||||
brokerOverviewDTO.setPort(brokerMetadata.getPort());
|
||||
brokerOverviewDTO.setJmxPort(brokerMetadata.getJmxPort());
|
||||
brokerOverviewDTO.setStartTime(brokerMetadata.getTimestamp());
|
||||
brokerOverviewDTO.setStatus(DBStatusEnum.NORMAL.getStatus());
|
||||
if (brokerMetrics == null) {
|
||||
return brokerOverviewDTO;
|
||||
}
|
||||
brokerOverviewDTO.setByteIn(brokerMetrics.getBytesInPerSec());
|
||||
brokerOverviewDTO.setByteOut(brokerMetrics.getBytesOutPerSec());
|
||||
return brokerOverviewDTO;
|
||||
}
|
||||
}
|
||||
@@ -1,70 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto;
|
||||
|
||||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/4/22
|
||||
*/
|
||||
public class ControllerDTO {
|
||||
private String clusterName;
|
||||
|
||||
private Integer brokerId;
|
||||
|
||||
private String host;
|
||||
|
||||
private Integer controllerVersion;
|
||||
|
||||
private Date controllerTimestamp;
|
||||
|
||||
public String getClusterName() {
|
||||
return clusterName;
|
||||
}
|
||||
|
||||
public void setClusterName(String clusterName) {
|
||||
this.clusterName = clusterName;
|
||||
}
|
||||
|
||||
public Integer getBrokerId() {
|
||||
return brokerId;
|
||||
}
|
||||
|
||||
public void setBrokerId(Integer brokerId) {
|
||||
this.brokerId = brokerId;
|
||||
}
|
||||
|
||||
public String getHost() {
|
||||
return host;
|
||||
}
|
||||
|
||||
public void setHost(String host) {
|
||||
this.host = host;
|
||||
}
|
||||
|
||||
public Integer getControllerVersion() {
|
||||
return controllerVersion;
|
||||
}
|
||||
|
||||
public void setControllerVersion(Integer controllerVersion) {
|
||||
this.controllerVersion = controllerVersion;
|
||||
}
|
||||
|
||||
public Date getControllerTimestamp() {
|
||||
return controllerTimestamp;
|
||||
}
|
||||
|
||||
public void setControllerTimestamp(Date controllerTimestamp) {
|
||||
this.controllerTimestamp = controllerTimestamp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ControllerInfoDTO{" +
|
||||
"clusterName='" + clusterName + '\'' +
|
||||
", brokerId=" + brokerId +
|
||||
", host='" + host + '\'' +
|
||||
", controllerVersion=" + controllerVersion +
|
||||
", controllerTimestamp=" + controllerTimestamp +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto;
|
||||
|
||||
/**
|
||||
* Topic Offset
|
||||
* @author zengqiao
|
||||
* @date 19/6/2
|
||||
*/
|
||||
public class PartitionOffsetDTO {
|
||||
private Integer partitionId;
|
||||
|
||||
private Long offset;
|
||||
|
||||
private Long timestamp;
|
||||
|
||||
public PartitionOffsetDTO() {
|
||||
}
|
||||
|
||||
public PartitionOffsetDTO(Integer partitionId, Long offset) {
|
||||
this.partitionId = partitionId;
|
||||
this.offset = offset;
|
||||
}
|
||||
|
||||
public PartitionOffsetDTO(Integer partitionId, Long offset, Long timestamp) {
|
||||
this.partitionId = partitionId;
|
||||
this.offset = offset;
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
|
||||
public Integer getPartitionId() {
|
||||
return partitionId;
|
||||
}
|
||||
|
||||
public void setPartitionId(Integer partitionId) {
|
||||
this.partitionId = partitionId;
|
||||
}
|
||||
|
||||
public Long getOffset() {
|
||||
return offset;
|
||||
}
|
||||
|
||||
public void setOffset(Long offset) {
|
||||
this.offset = offset;
|
||||
}
|
||||
|
||||
public Long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public void setTimestamp(Long timestamp) {
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TopicOffsetDTO{" +
|
||||
", partitionId=" + partitionId +
|
||||
", offset=" + offset +
|
||||
", timestamp=" + timestamp +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto;
|
||||
|
||||
/**
|
||||
* @author arthur
|
||||
* @date 2018/09/03
|
||||
*/
|
||||
public class TopicBasicDTO {
|
||||
private String topicName;
|
||||
|
||||
private Integer partitionNum;
|
||||
|
||||
private Integer replicaNum;
|
||||
|
||||
private Integer brokerNum;
|
||||
|
||||
private String remark;
|
||||
|
||||
private Long modifyTime;
|
||||
|
||||
private Long createTime;
|
||||
|
||||
private String region;
|
||||
|
||||
private Long retentionTime;
|
||||
|
||||
private String principal;
|
||||
|
||||
public String getTopicName() {
|
||||
return topicName;
|
||||
}
|
||||
|
||||
public void setTopicName(String topicName) {
|
||||
this.topicName = topicName;
|
||||
}
|
||||
|
||||
public Integer getPartitionNum() {
|
||||
return partitionNum;
|
||||
}
|
||||
|
||||
public void setPartitionNum(Integer partitionNum) {
|
||||
this.partitionNum = partitionNum;
|
||||
}
|
||||
|
||||
public Integer getReplicaNum() {
|
||||
return replicaNum;
|
||||
}
|
||||
|
||||
public void setReplicaNum(Integer replicaNum) {
|
||||
this.replicaNum = replicaNum;
|
||||
}
|
||||
|
||||
public Integer getBrokerNum() {
|
||||
return brokerNum;
|
||||
}
|
||||
|
||||
public void setBrokerNum(Integer brokerNum) {
|
||||
this.brokerNum = brokerNum;
|
||||
}
|
||||
|
||||
public String getRemark() {
|
||||
return remark;
|
||||
}
|
||||
|
||||
public void setRemark(String remark) {
|
||||
this.remark = remark;
|
||||
}
|
||||
|
||||
public String getRegion() {
|
||||
return region;
|
||||
}
|
||||
|
||||
public void setRegion(String region) {
|
||||
this.region = region;
|
||||
}
|
||||
|
||||
public Long getRetentionTime() {
|
||||
return retentionTime;
|
||||
}
|
||||
|
||||
public void setRetentionTime(Long retentionTime) {
|
||||
this.retentionTime = retentionTime;
|
||||
}
|
||||
|
||||
public Long getModifyTime() {
|
||||
return modifyTime;
|
||||
}
|
||||
|
||||
public void setModifyTime(Long modifyTime) {
|
||||
this.modifyTime = modifyTime;
|
||||
}
|
||||
|
||||
public Long getCreateTime() {
|
||||
return createTime;
|
||||
}
|
||||
|
||||
public void setCreateTime(Long createTime) {
|
||||
this.createTime = createTime;
|
||||
}
|
||||
|
||||
public String getPrincipal() {
|
||||
return principal;
|
||||
}
|
||||
|
||||
public void setPrincipal(String principal) {
|
||||
this.principal = principal;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TopicBasicInfoDTO{" +
|
||||
"topicName='" + topicName + '\'' +
|
||||
", partitionNum=" + partitionNum +
|
||||
", replicaNum=" + replicaNum +
|
||||
", brokerNum=" + brokerNum +
|
||||
", remark='" + remark + '\'' +
|
||||
", modifyTime=" + modifyTime +
|
||||
", createTime=" + createTime +
|
||||
", region='" + region + '\'' +
|
||||
", retentionTime=" + retentionTime +
|
||||
", principal='" + principal + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto;
|
||||
|
||||
public class TopicOverviewDTO {
|
||||
private Long clusterId;
|
||||
|
||||
private String topicName;
|
||||
|
||||
private Integer replicaNum;
|
||||
|
||||
private Integer partitionNum;
|
||||
|
||||
private Double bytesInPerSec;
|
||||
|
||||
private Double produceRequestPerSec;
|
||||
|
||||
private Long updateTime;
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public String getTopicName() {
|
||||
return topicName;
|
||||
}
|
||||
|
||||
public void setTopicName(String topicName) {
|
||||
this.topicName = topicName;
|
||||
}
|
||||
|
||||
public Integer getReplicaNum() {
|
||||
return replicaNum;
|
||||
}
|
||||
|
||||
public void setReplicaNum(Integer replicaNum) {
|
||||
this.replicaNum = replicaNum;
|
||||
}
|
||||
|
||||
public Integer getPartitionNum() {
|
||||
return partitionNum;
|
||||
}
|
||||
|
||||
public void setPartitionNum(Integer partitionNum) {
|
||||
this.partitionNum = partitionNum;
|
||||
}
|
||||
|
||||
public Double getBytesInPerSec() {
|
||||
return bytesInPerSec;
|
||||
}
|
||||
|
||||
public void setBytesInPerSec(Double bytesInPerSec) {
|
||||
this.bytesInPerSec = bytesInPerSec;
|
||||
}
|
||||
|
||||
public Double getProduceRequestPerSec() {
|
||||
return produceRequestPerSec;
|
||||
}
|
||||
|
||||
public void setProduceRequestPerSec(Double produceRequestPerSec) {
|
||||
this.produceRequestPerSec = produceRequestPerSec;
|
||||
}
|
||||
|
||||
public Long getUpdateTime() {
|
||||
return updateTime;
|
||||
}
|
||||
|
||||
public void setUpdateTime(Long updateTime) {
|
||||
this.updateTime = updateTime;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TopicOverviewDTO{" +
|
||||
"clusterId=" + clusterId +
|
||||
", topicName='" + topicName + '\'' +
|
||||
", replicaNum=" + replicaNum +
|
||||
", partitionNum=" + partitionNum +
|
||||
", bytesInPerSec=" + bytesInPerSec +
|
||||
", produceRequestPerSec=" + produceRequestPerSec +
|
||||
", updateTime=" + updateTime +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author arthur
|
||||
* @date 2017/6/6.
|
||||
*/
|
||||
public class TopicPartitionDTO implements Serializable {
|
||||
|
||||
private Integer partitionId;
|
||||
|
||||
private Long offset;
|
||||
|
||||
private Integer leaderBrokerId;
|
||||
|
||||
private Integer preferredBrokerId;
|
||||
|
||||
private Integer leaderEpoch;
|
||||
|
||||
private List<Integer> replicasBroker;
|
||||
|
||||
private List<Integer> isr;
|
||||
|
||||
private Boolean underReplicated;
|
||||
|
||||
public Integer getPartitionId() {
|
||||
return partitionId;
|
||||
}
|
||||
|
||||
public void setPartitionId(Integer partitionId) {
|
||||
this.partitionId = partitionId;
|
||||
}
|
||||
|
||||
public Long getOffset() {
|
||||
return offset;
|
||||
}
|
||||
|
||||
public void setOffset(Long offset) {
|
||||
this.offset = offset;
|
||||
}
|
||||
|
||||
public Integer getLeaderBrokerId() {
|
||||
return leaderBrokerId;
|
||||
}
|
||||
|
||||
public void setLeaderBrokerId(Integer leaderBrokerId) {
|
||||
this.leaderBrokerId = leaderBrokerId;
|
||||
}
|
||||
|
||||
public Integer getPreferredBrokerId() {
|
||||
return preferredBrokerId;
|
||||
}
|
||||
|
||||
public void setPreferredBrokerId(Integer preferredBrokerId) {
|
||||
this.preferredBrokerId = preferredBrokerId;
|
||||
}
|
||||
|
||||
public Integer getLeaderEpoch() {
|
||||
return leaderEpoch;
|
||||
}
|
||||
|
||||
public void setLeaderEpoch(Integer leaderEpoch) {
|
||||
this.leaderEpoch = leaderEpoch;
|
||||
}
|
||||
|
||||
public List<Integer> getReplicasBroker() {
|
||||
return replicasBroker;
|
||||
}
|
||||
|
||||
public void setReplicasBroker(List<Integer> replicasBroker) {
|
||||
this.replicasBroker = replicasBroker;
|
||||
}
|
||||
|
||||
public List<Integer> getIsr() {
|
||||
return isr;
|
||||
}
|
||||
|
||||
public void setIsr(List<Integer> isr) {
|
||||
this.isr = isr;
|
||||
}
|
||||
|
||||
public boolean isUnderReplicated() {
|
||||
return underReplicated;
|
||||
}
|
||||
|
||||
public void setUnderReplicated(boolean underReplicated) {
|
||||
this.underReplicated = underReplicated;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TopicPartitionDTO{" +
|
||||
"partitionId=" + partitionId +
|
||||
", offset=" + offset +
|
||||
", leaderBrokerId=" + leaderBrokerId +
|
||||
", preferredBrokerId=" + preferredBrokerId +
|
||||
", leaderEpoch=" + leaderEpoch +
|
||||
", replicasBroker=" + replicasBroker +
|
||||
", isr=" + isr +
|
||||
", underReplicated=" + underReplicated +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto.alarm;
|
||||
|
||||
/**
|
||||
* 告警通知
|
||||
* @author zengqiao
|
||||
* @date 2020-02-14
|
||||
*/
|
||||
public class AlarmNotifyDTO {
|
||||
private Long alarmRuleId;
|
||||
|
||||
private String actionTag;
|
||||
|
||||
private String message;
|
||||
|
||||
public Long getAlarmRuleId() {
|
||||
return alarmRuleId;
|
||||
}
|
||||
|
||||
public void setAlarmRuleId(Long alarmRuleId) {
|
||||
this.alarmRuleId = alarmRuleId;
|
||||
}
|
||||
|
||||
public String getActionTag() {
|
||||
return actionTag;
|
||||
}
|
||||
|
||||
public void setActionTag(String actionTag) {
|
||||
this.actionTag = actionTag;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
public void setMessage(String message) {
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlarmNotifyDTO{" +
|
||||
"alarmRuleId=" + alarmRuleId +
|
||||
", actionTag='" + actionTag + '\'' +
|
||||
", message='" + message + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto.alarm;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/12/16
|
||||
*/
|
||||
public class AlarmRuleDTO {
|
||||
/**
|
||||
* 告警ID
|
||||
*/
|
||||
private Long id;
|
||||
|
||||
/**
|
||||
* 告警名称
|
||||
*/
|
||||
private String name;
|
||||
|
||||
/**
|
||||
* 已持续次数
|
||||
*/
|
||||
private Integer duration;
|
||||
|
||||
/**
|
||||
* 集群ID, 过滤条件中必有的, 单独拿出来
|
||||
*/
|
||||
private Long clusterId;
|
||||
|
||||
/**
|
||||
* 告警策略表达式
|
||||
*/
|
||||
private AlarmStrategyExpressionDTO strategyExpression;
|
||||
|
||||
/**
|
||||
* 告警策略过滤条件
|
||||
*/
|
||||
private Map<String, String> strategyFilterMap;
|
||||
|
||||
/**
|
||||
* 告警策略Action方式
|
||||
*/
|
||||
private Map<String, AlarmStrategyActionDTO> strategyActionMap;
|
||||
|
||||
/**
|
||||
* 修改时间
|
||||
*/
|
||||
private Long gmtModify;
|
||||
|
||||
public Long getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(Long id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public Integer getDuration() {
|
||||
return duration;
|
||||
}
|
||||
|
||||
public void setDuration(Integer duration) {
|
||||
this.duration = duration;
|
||||
}
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public AlarmStrategyExpressionDTO getStrategyExpression() {
|
||||
return strategyExpression;
|
||||
}
|
||||
|
||||
public void setStrategyExpression(AlarmStrategyExpressionDTO strategyExpression) {
|
||||
this.strategyExpression = strategyExpression;
|
||||
}
|
||||
|
||||
public Map<String, String> getStrategyFilterMap() {
|
||||
return strategyFilterMap;
|
||||
}
|
||||
|
||||
public void setStrategyFilterMap(Map<String, String> strategyFilterMap) {
|
||||
this.strategyFilterMap = strategyFilterMap;
|
||||
}
|
||||
|
||||
public Map<String, AlarmStrategyActionDTO> getStrategyActionMap() {
|
||||
return strategyActionMap;
|
||||
}
|
||||
|
||||
public void setStrategyActionMap(Map<String, AlarmStrategyActionDTO> strategyActionMap) {
|
||||
this.strategyActionMap = strategyActionMap;
|
||||
}
|
||||
|
||||
public Long getGmtModify() {
|
||||
return gmtModify;
|
||||
}
|
||||
|
||||
public void setGmtModify(Long gmtModify) {
|
||||
this.gmtModify = gmtModify;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlarmRuleDTO{" +
|
||||
"id=" + id +
|
||||
", name='" + name + '\'' +
|
||||
", duration=" + duration +
|
||||
", clusterId=" + clusterId +
|
||||
", strategyExpression=" + strategyExpression +
|
||||
", strategyFilterMap=" + strategyFilterMap +
|
||||
", strategyActionMap=" + strategyActionMap +
|
||||
", gmtModify=" + gmtModify +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto.alarm;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/12/16
|
||||
*/
|
||||
public class AlarmStrategyActionDTO {
|
||||
private String actionWay; // 告知方式: kafka
|
||||
|
||||
private String actionTag;
|
||||
|
||||
public String getActionWay() {
|
||||
return actionWay;
|
||||
}
|
||||
|
||||
public void setActionWay(String actionWay) {
|
||||
this.actionWay = actionWay;
|
||||
}
|
||||
|
||||
public String getActionTag() {
|
||||
return actionTag;
|
||||
}
|
||||
|
||||
public void setActionTag(String actionTag) {
|
||||
this.actionTag = actionTag;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlarmStrategyActionDTO{" +
|
||||
"actionWay='" + actionWay + '\'' +
|
||||
", actionTag='" + actionTag + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
public boolean legal() {
|
||||
if (actionWay == null
|
||||
|| actionTag == null) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto.alarm;
|
||||
|
||||
/**
|
||||
* 策略表达式
|
||||
* @author zengqiao
|
||||
* @date 19/12/16
|
||||
*/
|
||||
public class AlarmStrategyExpressionDTO {
|
||||
private String metric;
|
||||
|
||||
private String opt;
|
||||
|
||||
private Long threshold;
|
||||
|
||||
private Integer duration;
|
||||
|
||||
public String getMetric() {
|
||||
return metric;
|
||||
}
|
||||
|
||||
public void setMetric(String metric) {
|
||||
this.metric = metric;
|
||||
}
|
||||
|
||||
public String getOpt() {
|
||||
return opt;
|
||||
}
|
||||
|
||||
public void setOpt(String opt) {
|
||||
this.opt = opt;
|
||||
}
|
||||
|
||||
public Long getThreshold() {
|
||||
return threshold;
|
||||
}
|
||||
|
||||
public void setThreshold(Long threshold) {
|
||||
this.threshold = threshold;
|
||||
}
|
||||
|
||||
public Integer getDuration() {
|
||||
return duration;
|
||||
}
|
||||
|
||||
public void setDuration(Integer duration) {
|
||||
this.duration = duration;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlarmStrategyExpressionModel{" +
|
||||
"metric='" + metric + '\'' +
|
||||
", opt='" + opt + '\'' +
|
||||
", threshold=" + threshold +
|
||||
", duration=" + duration +
|
||||
'}';
|
||||
}
|
||||
|
||||
public boolean legal() {
|
||||
if (metric == null
|
||||
|| opt == null
|
||||
|| threshold == null
|
||||
|| duration == null || duration <= 0) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto.alarm;
|
||||
|
||||
/**
|
||||
* 告警过滤条件
|
||||
* @author zengqiao
|
||||
* @date 19/12/16
|
||||
*/
|
||||
public class AlarmStrategyFilterDTO {
|
||||
private String key;
|
||||
|
||||
private String value;
|
||||
|
||||
public String getKey() {
|
||||
return key;
|
||||
}
|
||||
|
||||
public void setKey(String key) {
|
||||
this.key = key;
|
||||
}
|
||||
|
||||
public String getValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
public void setValue(String value) {
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlarmStrategyFilterModel{" +
|
||||
"key='" + key + '\'' +
|
||||
", value='" + value + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
public boolean legal() {
|
||||
if (key == null
|
||||
|| value == null) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -1,114 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto.analysis;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/12/29
|
||||
*/
|
||||
public class AnalysisBrokerDTO {
|
||||
private Long clusterId;
|
||||
|
||||
private Integer brokerId;
|
||||
|
||||
private Long baseTime;
|
||||
|
||||
private Double bytesIn;
|
||||
|
||||
private Double bytesOut;
|
||||
|
||||
private Double messagesIn;
|
||||
|
||||
private Double totalFetchRequests;
|
||||
|
||||
private Double totalProduceRequests;
|
||||
|
||||
List<AnalysisTopicDTO> topicAnalysisVOList;
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public Integer getBrokerId() {
|
||||
return brokerId;
|
||||
}
|
||||
|
||||
public void setBrokerId(Integer brokerId) {
|
||||
this.brokerId = brokerId;
|
||||
}
|
||||
|
||||
public Long getBaseTime() {
|
||||
return baseTime;
|
||||
}
|
||||
|
||||
public void setBaseTime(Long baseTime) {
|
||||
this.baseTime = baseTime;
|
||||
}
|
||||
|
||||
public Double getBytesIn() {
|
||||
return bytesIn;
|
||||
}
|
||||
|
||||
public void setBytesIn(Double bytesIn) {
|
||||
this.bytesIn = bytesIn;
|
||||
}
|
||||
|
||||
public Double getBytesOut() {
|
||||
return bytesOut;
|
||||
}
|
||||
|
||||
public void setBytesOut(Double bytesOut) {
|
||||
this.bytesOut = bytesOut;
|
||||
}
|
||||
|
||||
public Double getMessagesIn() {
|
||||
return messagesIn;
|
||||
}
|
||||
|
||||
public void setMessagesIn(Double messagesIn) {
|
||||
this.messagesIn = messagesIn;
|
||||
}
|
||||
|
||||
public Double getTotalFetchRequests() {
|
||||
return totalFetchRequests;
|
||||
}
|
||||
|
||||
public void setTotalFetchRequests(Double totalFetchRequests) {
|
||||
this.totalFetchRequests = totalFetchRequests;
|
||||
}
|
||||
|
||||
public Double getTotalProduceRequests() {
|
||||
return totalProduceRequests;
|
||||
}
|
||||
|
||||
public void setTotalProduceRequests(Double totalProduceRequests) {
|
||||
this.totalProduceRequests = totalProduceRequests;
|
||||
}
|
||||
|
||||
public List<AnalysisTopicDTO> getTopicAnalysisVOList() {
|
||||
return topicAnalysisVOList;
|
||||
}
|
||||
|
||||
public void setTopicAnalysisVOList(List<AnalysisTopicDTO> topicAnalysisVOList) {
|
||||
this.topicAnalysisVOList = topicAnalysisVOList;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AnalysisBrokerDTO{" +
|
||||
"clusterId=" + clusterId +
|
||||
", brokerId=" + brokerId +
|
||||
", baseTime=" + baseTime +
|
||||
", bytesIn=" + bytesIn +
|
||||
", bytesOut=" + bytesOut +
|
||||
", messagesIn=" + messagesIn +
|
||||
", totalFetchRequests=" + totalFetchRequests +
|
||||
", totalProduceRequests=" + totalProduceRequests +
|
||||
", topicAnalysisVOList=" + topicAnalysisVOList +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,134 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto.analysis;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/12/29
|
||||
*/
|
||||
public class AnalysisTopicDTO {
|
||||
private String topicName;
|
||||
|
||||
private Double bytesIn;
|
||||
|
||||
private Double bytesInRate;
|
||||
|
||||
private Double bytesOut;
|
||||
|
||||
private Double bytesOutRate;
|
||||
|
||||
private Double messagesIn;
|
||||
|
||||
private Double messagesInRate;
|
||||
|
||||
private Double totalFetchRequests;
|
||||
|
||||
private Double totalFetchRequestsRate;
|
||||
|
||||
private Double totalProduceRequests;
|
||||
|
||||
private Double totalProduceRequestsRate;
|
||||
|
||||
public String getTopicName() {
|
||||
return topicName;
|
||||
}
|
||||
|
||||
public void setTopicName(String topicName) {
|
||||
this.topicName = topicName;
|
||||
}
|
||||
|
||||
public Double getBytesIn() {
|
||||
return bytesIn;
|
||||
}
|
||||
|
||||
public void setBytesIn(Double bytesIn) {
|
||||
this.bytesIn = bytesIn;
|
||||
}
|
||||
|
||||
public Double getBytesInRate() {
|
||||
return bytesInRate;
|
||||
}
|
||||
|
||||
public void setBytesInRate(Double bytesInRate) {
|
||||
this.bytesInRate = bytesInRate;
|
||||
}
|
||||
|
||||
public Double getBytesOut() {
|
||||
return bytesOut;
|
||||
}
|
||||
|
||||
public void setBytesOut(Double bytesOut) {
|
||||
this.bytesOut = bytesOut;
|
||||
}
|
||||
|
||||
public Double getBytesOutRate() {
|
||||
return bytesOutRate;
|
||||
}
|
||||
|
||||
public void setBytesOutRate(Double bytesOutRate) {
|
||||
this.bytesOutRate = bytesOutRate;
|
||||
}
|
||||
|
||||
public Double getMessagesIn() {
|
||||
return messagesIn;
|
||||
}
|
||||
|
||||
public void setMessagesIn(Double messagesIn) {
|
||||
this.messagesIn = messagesIn;
|
||||
}
|
||||
|
||||
public Double getMessagesInRate() {
|
||||
return messagesInRate;
|
||||
}
|
||||
|
||||
public void setMessagesInRate(Double messagesInRate) {
|
||||
this.messagesInRate = messagesInRate;
|
||||
}
|
||||
|
||||
public Double getTotalFetchRequests() {
|
||||
return totalFetchRequests;
|
||||
}
|
||||
|
||||
public void setTotalFetchRequests(Double totalFetchRequests) {
|
||||
this.totalFetchRequests = totalFetchRequests;
|
||||
}
|
||||
|
||||
public Double getTotalFetchRequestsRate() {
|
||||
return totalFetchRequestsRate;
|
||||
}
|
||||
|
||||
public void setTotalFetchRequestsRate(Double totalFetchRequestsRate) {
|
||||
this.totalFetchRequestsRate = totalFetchRequestsRate;
|
||||
}
|
||||
|
||||
public Double getTotalProduceRequests() {
|
||||
return totalProduceRequests;
|
||||
}
|
||||
|
||||
public void setTotalProduceRequests(Double totalProduceRequests) {
|
||||
this.totalProduceRequests = totalProduceRequests;
|
||||
}
|
||||
|
||||
public Double getTotalProduceRequestsRate() {
|
||||
return totalProduceRequestsRate;
|
||||
}
|
||||
|
||||
public void setTotalProduceRequestsRate(Double totalProduceRequestsRate) {
|
||||
this.totalProduceRequestsRate = totalProduceRequestsRate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AnalysisTopicDTO{" +
|
||||
"topicName='" + topicName + '\'' +
|
||||
", bytesIn=" + bytesIn +
|
||||
", bytesInRate=" + bytesInRate +
|
||||
", bytesOut=" + bytesOut +
|
||||
", bytesOutRate=" + bytesOutRate +
|
||||
", messagesIn=" + messagesIn +
|
||||
", messagesInRate=" + messagesInRate +
|
||||
", totalFetchRequests=" + totalFetchRequests +
|
||||
", totalFetchRequestsRate=" + totalFetchRequestsRate +
|
||||
", totalProduceRequests=" + totalProduceRequests +
|
||||
", totalProduceRequestsRate=" + totalProduceRequestsRate +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto.consumer;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/1/9
|
||||
*/
|
||||
public class ConsumeDetailDTO {
|
||||
private Integer partitionId;
|
||||
|
||||
private Long offset;
|
||||
|
||||
private Long consumeOffset;
|
||||
|
||||
private String consumerId;
|
||||
|
||||
public Integer getPartitionId() {
|
||||
return partitionId;
|
||||
}
|
||||
|
||||
public void setPartitionId(Integer partitionId) {
|
||||
this.partitionId = partitionId;
|
||||
}
|
||||
|
||||
public Long getOffset() {
|
||||
return offset;
|
||||
}
|
||||
|
||||
public void setOffset(Long offset) {
|
||||
this.offset = offset;
|
||||
}
|
||||
|
||||
public Long getConsumeOffset() {
|
||||
return consumeOffset;
|
||||
}
|
||||
|
||||
public void setConsumeOffset(Long consumeOffset) {
|
||||
this.consumeOffset = consumeOffset;
|
||||
}
|
||||
|
||||
public String getConsumerId() {
|
||||
return consumerId;
|
||||
}
|
||||
|
||||
public void setConsumerId(String consumerId) {
|
||||
this.consumerId = consumerId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ConsumeDetailDTO{" +
|
||||
"partitionId=" + partitionId +
|
||||
", offset=" + offset +
|
||||
", consumeOffset=" + consumeOffset +
|
||||
", consumerId='" + consumerId + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,82 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto.consumer;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Consumer实体类
|
||||
* @author tukun
|
||||
* @date 2015/11/12
|
||||
*/
|
||||
public class ConsumerDTO {
|
||||
private Long clusterId;
|
||||
|
||||
private String topicName;
|
||||
|
||||
private String consumerGroup;
|
||||
|
||||
private String location;
|
||||
|
||||
private Map<Integer, Long> partitionOffsetMap;
|
||||
|
||||
private Map<Integer, Long> consumerOffsetMap;
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public String getTopicName() {
|
||||
return topicName;
|
||||
}
|
||||
|
||||
public void setTopicName(String topicName) {
|
||||
this.topicName = topicName;
|
||||
}
|
||||
|
||||
public String getConsumerGroup() {
|
||||
return consumerGroup;
|
||||
}
|
||||
|
||||
public void setConsumerGroup(String consumerGroup) {
|
||||
this.consumerGroup = consumerGroup;
|
||||
}
|
||||
|
||||
public String getLocation() {
|
||||
return location;
|
||||
}
|
||||
|
||||
public void setLocation(String location) {
|
||||
this.location = location;
|
||||
}
|
||||
|
||||
public Map<Integer, Long> getPartitionOffsetMap() {
|
||||
return partitionOffsetMap;
|
||||
}
|
||||
|
||||
public void setPartitionOffsetMap(Map<Integer, Long> partitionOffsetMap) {
|
||||
this.partitionOffsetMap = partitionOffsetMap;
|
||||
}
|
||||
|
||||
public Map<Integer, Long> getConsumerOffsetMap() {
|
||||
return consumerOffsetMap;
|
||||
}
|
||||
|
||||
public void setConsumerOffsetMap(Map<Integer, Long> consumerOffsetMap) {
|
||||
this.consumerOffsetMap = consumerOffsetMap;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ConsumerDTO{" +
|
||||
"clusterId=" + clusterId +
|
||||
", topicName='" + topicName + '\'' +
|
||||
", consumerGroup='" + consumerGroup + '\'' +
|
||||
", location='" + location + '\'' +
|
||||
", partitionOffsetMap=" + partitionOffsetMap +
|
||||
", consumerOffsetMap=" + consumerOffsetMap +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto.consumer;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.OffsetStoreLocation;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* 消费组信息
|
||||
* @author zengqiao
|
||||
* @date 19/4/18
|
||||
*/
|
||||
public class ConsumerGroupDTO {
|
||||
private Long clusterId;
|
||||
|
||||
private String consumerGroup;
|
||||
|
||||
private OffsetStoreLocation offsetStoreLocation;
|
||||
|
||||
public ConsumerGroupDTO(Long clusterId, String consumerGroup, OffsetStoreLocation offsetStoreLocation) {
|
||||
this.clusterId = clusterId;
|
||||
this.consumerGroup = consumerGroup;
|
||||
this.offsetStoreLocation = offsetStoreLocation;
|
||||
}
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public String getConsumerGroup() {
|
||||
return consumerGroup;
|
||||
}
|
||||
|
||||
public void setConsumerGroup(String consumerGroup) {
|
||||
this.consumerGroup = consumerGroup;
|
||||
}
|
||||
|
||||
public OffsetStoreLocation getOffsetStoreLocation() {
|
||||
return offsetStoreLocation;
|
||||
}
|
||||
|
||||
public void setOffsetStoreLocation(OffsetStoreLocation offsetStoreLocation) {
|
||||
this.offsetStoreLocation = offsetStoreLocation;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
ConsumerGroupDTO that = (ConsumerGroupDTO) o;
|
||||
return clusterId.equals(that.clusterId)
|
||||
&& consumerGroup.equals(that.consumerGroup)
|
||||
&& offsetStoreLocation == that.offsetStoreLocation;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(clusterId, consumerGroup, offsetStoreLocation);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ConsumerGroupDTO{" +
|
||||
"clusterId=" + clusterId +
|
||||
", consumerGroup='" + consumerGroup + '\'' +
|
||||
", offsetStoreLocation=" + offsetStoreLocation +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,394 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.metrics;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.MetricsType;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.annotations.FieldSelector;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.BaseEntryDO;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/11/25
|
||||
*/
|
||||
public class BaseMetrics extends BaseEntryDO {
|
||||
/**
|
||||
* 每秒流入的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
|
||||
*/
|
||||
@FieldSelector(types = {
|
||||
MetricsType.BROKER_FLOW_DETAIL,
|
||||
MetricsType.BROKER_TO_DB_METRICS,
|
||||
MetricsType.BROKER_REAL_TIME_METRICS,
|
||||
MetricsType.BROKER_OVER_VIEW_METRICS,
|
||||
MetricsType.BROKER_ANALYSIS_METRICS,
|
||||
MetricsType.BROKER_TOPIC_ANALYSIS_METRICS,
|
||||
MetricsType.TOPIC_FLOW_DETAIL,
|
||||
MetricsType.TOPIC_FLOW_OVERVIEW,
|
||||
MetricsType.TOPIC_METRICS_TO_DB
|
||||
})
|
||||
protected Double bytesInPerSec = 0.0;
|
||||
protected Double bytesInPerSecMeanRate = 0.0;
|
||||
protected Double bytesInPerSecFiveMinuteRate = 0.0;
|
||||
protected Double bytesInPerSecFifteenMinuteRate = 0.0;
|
||||
|
||||
/**
|
||||
* 每秒流出的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
|
||||
*/
|
||||
@FieldSelector(types = {
|
||||
MetricsType.BROKER_FLOW_DETAIL,
|
||||
MetricsType.BROKER_TO_DB_METRICS,
|
||||
MetricsType.BROKER_REAL_TIME_METRICS,
|
||||
MetricsType.BROKER_OVER_VIEW_METRICS,
|
||||
MetricsType.BROKER_ANALYSIS_METRICS,
|
||||
MetricsType.BROKER_TOPIC_ANALYSIS_METRICS,
|
||||
MetricsType.TOPIC_FLOW_DETAIL,
|
||||
MetricsType.TOPIC_METRICS_TO_DB
|
||||
})
|
||||
protected Double bytesOutPerSec = 0.0;
|
||||
protected Double bytesOutPerSecMeanRate = 0.0;
|
||||
protected Double bytesOutPerSecFiveMinuteRate = 0.0;
|
||||
protected Double bytesOutPerSecFifteenMinuteRate = 0.0;
|
||||
|
||||
/**
|
||||
* 每秒流入的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
|
||||
*/
|
||||
@FieldSelector(types = {
|
||||
MetricsType.BROKER_FLOW_DETAIL,
|
||||
MetricsType.BROKER_TO_DB_METRICS,
|
||||
MetricsType.BROKER_REAL_TIME_METRICS,
|
||||
MetricsType.BROKER_ANALYSIS_METRICS,
|
||||
MetricsType.BROKER_TOPIC_ANALYSIS_METRICS,
|
||||
MetricsType.TOPIC_FLOW_DETAIL,
|
||||
MetricsType.TOPIC_METRICS_TO_DB
|
||||
})
|
||||
protected Double messagesInPerSec = 0.0;
|
||||
protected Double messagesInPerSecMeanRate = 0.0;
|
||||
protected Double messagesInPerSecFiveMinuteRate = 0.0;
|
||||
protected Double messagesInPerSecFifteenMinuteRate = 0.0;
|
||||
|
||||
/**
|
||||
* 每秒拒绝的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
|
||||
*/
|
||||
@FieldSelector(types = {
|
||||
MetricsType.BROKER_FLOW_DETAIL,
|
||||
MetricsType.BROKER_TO_DB_METRICS,
|
||||
MetricsType.BROKER_REAL_TIME_METRICS,
|
||||
MetricsType.TOPIC_FLOW_DETAIL,
|
||||
MetricsType.TOPIC_METRICS_TO_DB
|
||||
})
|
||||
protected Double bytesRejectedPerSec = 0.0;
|
||||
protected Double bytesRejectedPerSecMeanRate = 0.0;
|
||||
protected Double bytesRejectedPerSecFiveMinuteRate = 0.0;
|
||||
protected Double bytesRejectedPerSecFifteenMinuteRate = 0.0;
|
||||
|
||||
/**
|
||||
* 每秒失败的Produce请求数的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
|
||||
*/
|
||||
@FieldSelector(types = {
|
||||
MetricsType.BROKER_FLOW_DETAIL,
|
||||
MetricsType.BROKER_TO_DB_METRICS,
|
||||
MetricsType.BROKER_REAL_TIME_METRICS,
|
||||
MetricsType.TOPIC_FLOW_DETAIL
|
||||
})
|
||||
protected Double failProduceRequestPerSec = 0.0;
|
||||
protected Double failProduceRequestPerSecMeanRate = 0.0;
|
||||
protected Double failProduceRequestPerSecFiveMinuteRate = 0.0;
|
||||
protected Double failProduceRequestPerSecFifteenMinuteRate = 0.0;
|
||||
|
||||
/**
|
||||
* 每秒失败的Fetch请求数的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
|
||||
*/
|
||||
@FieldSelector(types = {
|
||||
MetricsType.BROKER_FLOW_DETAIL,
|
||||
MetricsType.BROKER_TO_DB_METRICS,
|
||||
MetricsType.BROKER_REAL_TIME_METRICS,
|
||||
MetricsType.TOPIC_FLOW_DETAIL
|
||||
})
|
||||
protected Double failFetchRequestPerSec = 0.0;
|
||||
protected Double failFetchRequestPerSecMeanRate = 0.0;
|
||||
protected Double failFetchRequestPerSecFiveMinuteRate = 0.0;
|
||||
protected Double failFetchRequestPerSecFifteenMinuteRate = 0.0;
|
||||
|
||||
/**
|
||||
* 每秒总Produce请求数的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
|
||||
*/
|
||||
@FieldSelector(types = {
|
||||
MetricsType.BROKER_FLOW_DETAIL,
|
||||
MetricsType.BROKER_ANALYSIS_METRICS,
|
||||
MetricsType.BROKER_TOPIC_ANALYSIS_METRICS,
|
||||
MetricsType.TOPIC_FLOW_DETAIL,
|
||||
MetricsType.TOPIC_METRICS_TO_DB,
|
||||
MetricsType.TOPIC_FLOW_OVERVIEW
|
||||
})
|
||||
protected Double totalProduceRequestsPerSec = 0.0;
|
||||
protected Double totalProduceRequestsPerSecMeanRate = 0.0;
|
||||
protected Double totalProduceRequestsPerSecFiveMinuteRate = 0.0;
|
||||
protected Double totalProduceRequestsPerSecFifteenMinuteRate = 0.0;
|
||||
|
||||
/**
|
||||
* 每秒总Fetch请求数的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
|
||||
*/
|
||||
@FieldSelector(types = {
|
||||
MetricsType.BROKER_FLOW_DETAIL,
|
||||
MetricsType.BROKER_ANALYSIS_METRICS,
|
||||
MetricsType.BROKER_TOPIC_ANALYSIS_METRICS,
|
||||
MetricsType.TOPIC_FLOW_DETAIL
|
||||
})
|
||||
protected Double totalFetchRequestsPerSec = 0.0;
|
||||
protected Double totalFetchRequestsPerSecMeanRate = 0.0;
|
||||
protected Double totalFetchRequestsPerSecFiveMinuteRate = 0.0;
|
||||
protected Double totalFetchRequestsPerSecFifteenMinuteRate = 0.0;
|
||||
|
||||
public Double getBytesInPerSec() {
|
||||
return bytesInPerSec;
|
||||
}
|
||||
|
||||
public void setBytesInPerSec(Double bytesInPerSec) {
|
||||
this.bytesInPerSec = bytesInPerSec;
|
||||
}
|
||||
|
||||
public Double getBytesInPerSecMeanRate() {
|
||||
return bytesInPerSecMeanRate;
|
||||
}
|
||||
|
||||
public void setBytesInPerSecMeanRate(Double bytesInPerSecMeanRate) {
|
||||
this.bytesInPerSecMeanRate = bytesInPerSecMeanRate;
|
||||
}
|
||||
|
||||
public Double getBytesInPerSecFiveMinuteRate() {
|
||||
return bytesInPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public void setBytesInPerSecFiveMinuteRate(Double bytesInPerSecFiveMinuteRate) {
|
||||
this.bytesInPerSecFiveMinuteRate = bytesInPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public Double getBytesInPerSecFifteenMinuteRate() {
|
||||
return bytesInPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public void setBytesInPerSecFifteenMinuteRate(Double bytesInPerSecFifteenMinuteRate) {
|
||||
this.bytesInPerSecFifteenMinuteRate = bytesInPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public Double getBytesOutPerSec() {
|
||||
return bytesOutPerSec;
|
||||
}
|
||||
|
||||
public void setBytesOutPerSec(Double bytesOutPerSec) {
|
||||
this.bytesOutPerSec = bytesOutPerSec;
|
||||
}
|
||||
|
||||
public Double getBytesOutPerSecMeanRate() {
|
||||
return bytesOutPerSecMeanRate;
|
||||
}
|
||||
|
||||
public void setBytesOutPerSecMeanRate(Double bytesOutPerSecMeanRate) {
|
||||
this.bytesOutPerSecMeanRate = bytesOutPerSecMeanRate;
|
||||
}
|
||||
|
||||
public Double getBytesOutPerSecFiveMinuteRate() {
|
||||
return bytesOutPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public void setBytesOutPerSecFiveMinuteRate(Double bytesOutPerSecFiveMinuteRate) {
|
||||
this.bytesOutPerSecFiveMinuteRate = bytesOutPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public Double getBytesOutPerSecFifteenMinuteRate() {
|
||||
return bytesOutPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public void setBytesOutPerSecFifteenMinuteRate(Double bytesOutPerSecFifteenMinuteRate) {
|
||||
this.bytesOutPerSecFifteenMinuteRate = bytesOutPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public Double getMessagesInPerSec() {
|
||||
return messagesInPerSec;
|
||||
}
|
||||
|
||||
public void setMessagesInPerSec(Double messagesInPerSec) {
|
||||
this.messagesInPerSec = messagesInPerSec;
|
||||
}
|
||||
|
||||
public Double getMessagesInPerSecMeanRate() {
|
||||
return messagesInPerSecMeanRate;
|
||||
}
|
||||
|
||||
public void setMessagesInPerSecMeanRate(Double messagesInPerSecMeanRate) {
|
||||
this.messagesInPerSecMeanRate = messagesInPerSecMeanRate;
|
||||
}
|
||||
|
||||
public Double getMessagesInPerSecFiveMinuteRate() {
|
||||
return messagesInPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public void setMessagesInPerSecFiveMinuteRate(Double messagesInPerSecFiveMinuteRate) {
|
||||
this.messagesInPerSecFiveMinuteRate = messagesInPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public Double getMessagesInPerSecFifteenMinuteRate() {
|
||||
return messagesInPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public void setMessagesInPerSecFifteenMinuteRate(Double messagesInPerSecFifteenMinuteRate) {
|
||||
this.messagesInPerSecFifteenMinuteRate = messagesInPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public Double getBytesRejectedPerSec() {
|
||||
return bytesRejectedPerSec;
|
||||
}
|
||||
|
||||
public void setBytesRejectedPerSec(Double bytesRejectedPerSec) {
|
||||
this.bytesRejectedPerSec = bytesRejectedPerSec;
|
||||
}
|
||||
|
||||
public Double getBytesRejectedPerSecMeanRate() {
|
||||
return bytesRejectedPerSecMeanRate;
|
||||
}
|
||||
|
||||
public void setBytesRejectedPerSecMeanRate(Double bytesRejectedPerSecMeanRate) {
|
||||
this.bytesRejectedPerSecMeanRate = bytesRejectedPerSecMeanRate;
|
||||
}
|
||||
|
||||
public Double getBytesRejectedPerSecFiveMinuteRate() {
|
||||
return bytesRejectedPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public void setBytesRejectedPerSecFiveMinuteRate(Double bytesRejectedPerSecFiveMinuteRate) {
|
||||
this.bytesRejectedPerSecFiveMinuteRate = bytesRejectedPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public Double getBytesRejectedPerSecFifteenMinuteRate() {
|
||||
return bytesRejectedPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public void setBytesRejectedPerSecFifteenMinuteRate(Double bytesRejectedPerSecFifteenMinuteRate) {
|
||||
this.bytesRejectedPerSecFifteenMinuteRate = bytesRejectedPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public Double getFailProduceRequestPerSec() {
|
||||
return failProduceRequestPerSec;
|
||||
}
|
||||
|
||||
public void setFailProduceRequestPerSec(Double failProduceRequestPerSec) {
|
||||
this.failProduceRequestPerSec = failProduceRequestPerSec;
|
||||
}
|
||||
|
||||
public Double getFailProduceRequestPerSecMeanRate() {
|
||||
return failProduceRequestPerSecMeanRate;
|
||||
}
|
||||
|
||||
public void setFailProduceRequestPerSecMeanRate(Double failProduceRequestPerSecMeanRate) {
|
||||
this.failProduceRequestPerSecMeanRate = failProduceRequestPerSecMeanRate;
|
||||
}
|
||||
|
||||
public Double getFailProduceRequestPerSecFiveMinuteRate() {
|
||||
return failProduceRequestPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public void setFailProduceRequestPerSecFiveMinuteRate(Double failProduceRequestPerSecFiveMinuteRate) {
|
||||
this.failProduceRequestPerSecFiveMinuteRate = failProduceRequestPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public Double getFailProduceRequestPerSecFifteenMinuteRate() {
|
||||
return failProduceRequestPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public void setFailProduceRequestPerSecFifteenMinuteRate(Double failProduceRequestPerSecFifteenMinuteRate) {
|
||||
this.failProduceRequestPerSecFifteenMinuteRate = failProduceRequestPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public Double getFailFetchRequestPerSec() {
|
||||
return failFetchRequestPerSec;
|
||||
}
|
||||
|
||||
public void setFailFetchRequestPerSec(Double failFetchRequestPerSec) {
|
||||
this.failFetchRequestPerSec = failFetchRequestPerSec;
|
||||
}
|
||||
|
||||
public Double getFailFetchRequestPerSecMeanRate() {
|
||||
return failFetchRequestPerSecMeanRate;
|
||||
}
|
||||
|
||||
public void setFailFetchRequestPerSecMeanRate(Double failFetchRequestPerSecMeanRate) {
|
||||
this.failFetchRequestPerSecMeanRate = failFetchRequestPerSecMeanRate;
|
||||
}
|
||||
|
||||
public Double getFailFetchRequestPerSecFiveMinuteRate() {
|
||||
return failFetchRequestPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public void setFailFetchRequestPerSecFiveMinuteRate(Double failFetchRequestPerSecFiveMinuteRate) {
|
||||
this.failFetchRequestPerSecFiveMinuteRate = failFetchRequestPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public Double getFailFetchRequestPerSecFifteenMinuteRate() {
|
||||
return failFetchRequestPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public void setFailFetchRequestPerSecFifteenMinuteRate(Double failFetchRequestPerSecFifteenMinuteRate) {
|
||||
this.failFetchRequestPerSecFifteenMinuteRate = failFetchRequestPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public Double getTotalProduceRequestsPerSec() {
|
||||
return totalProduceRequestsPerSec;
|
||||
}
|
||||
|
||||
public void setTotalProduceRequestsPerSec(Double totalProduceRequestsPerSec) {
|
||||
this.totalProduceRequestsPerSec = totalProduceRequestsPerSec;
|
||||
}
|
||||
|
||||
public Double getTotalProduceRequestsPerSecMeanRate() {
|
||||
return totalProduceRequestsPerSecMeanRate;
|
||||
}
|
||||
|
||||
public void setTotalProduceRequestsPerSecMeanRate(Double totalProduceRequestsPerSecMeanRate) {
|
||||
this.totalProduceRequestsPerSecMeanRate = totalProduceRequestsPerSecMeanRate;
|
||||
}
|
||||
|
||||
public Double getTotalProduceRequestsPerSecFiveMinuteRate() {
|
||||
return totalProduceRequestsPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public void setTotalProduceRequestsPerSecFiveMinuteRate(Double totalProduceRequestsPerSecFiveMinuteRate) {
|
||||
this.totalProduceRequestsPerSecFiveMinuteRate = totalProduceRequestsPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public Double getTotalProduceRequestsPerSecFifteenMinuteRate() {
|
||||
return totalProduceRequestsPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public void setTotalProduceRequestsPerSecFifteenMinuteRate(Double totalProduceRequestsPerSecFifteenMinuteRate) {
|
||||
this.totalProduceRequestsPerSecFifteenMinuteRate = totalProduceRequestsPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public Double getTotalFetchRequestsPerSec() {
|
||||
return totalFetchRequestsPerSec;
|
||||
}
|
||||
|
||||
public void setTotalFetchRequestsPerSec(Double totalFetchRequestsPerSec) {
|
||||
this.totalFetchRequestsPerSec = totalFetchRequestsPerSec;
|
||||
}
|
||||
|
||||
public Double getTotalFetchRequestsPerSecMeanRate() {
|
||||
return totalFetchRequestsPerSecMeanRate;
|
||||
}
|
||||
|
||||
public void setTotalFetchRequestsPerSecMeanRate(Double totalFetchRequestsPerSecMeanRate) {
|
||||
this.totalFetchRequestsPerSecMeanRate = totalFetchRequestsPerSecMeanRate;
|
||||
}
|
||||
|
||||
public Double getTotalFetchRequestsPerSecFiveMinuteRate() {
|
||||
return totalFetchRequestsPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public void setTotalFetchRequestsPerSecFiveMinuteRate(Double totalFetchRequestsPerSecFiveMinuteRate) {
|
||||
this.totalFetchRequestsPerSecFiveMinuteRate = totalFetchRequestsPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public Double getTotalFetchRequestsPerSecFifteenMinuteRate() {
|
||||
return totalFetchRequestsPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public void setTotalFetchRequestsPerSecFifteenMinuteRate(Double totalFetchRequestsPerSecFifteenMinuteRate) {
|
||||
this.totalFetchRequestsPerSecFifteenMinuteRate = totalFetchRequestsPerSecFifteenMinuteRate;
|
||||
}
|
||||
}
|
||||
@@ -1,331 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.metrics;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.MetricsType;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.annotations.FieldSelector;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* 需要定时拉取的broker数据
|
||||
* @author tukun
|
||||
* @date 2015/11/6.
|
||||
*/
|
||||
public class BrokerMetrics extends BaseMetrics {
|
||||
/**
|
||||
* 集群ID
|
||||
*/
|
||||
private Long clusterId;
|
||||
|
||||
/**
|
||||
* Topic名称
|
||||
*/
|
||||
private Integer brokerId;
|
||||
|
||||
/**
|
||||
* 每秒Produce请求数的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
|
||||
*/
|
||||
@FieldSelector(types = {
|
||||
MetricsType.BROKER_FLOW_DETAIL,
|
||||
MetricsType.BROKER_TO_DB_METRICS,
|
||||
MetricsType.BROKER_REAL_TIME_METRICS
|
||||
})
|
||||
private Double produceRequestPerSec = 0.0;
|
||||
private Double produceRequestPerSecMeanRate = 0.0;
|
||||
private Double produceRequestPerSecFiveMinuteRate = 0.0;
|
||||
private Double produceRequestPerSecFifteenMinuteRate = 0.0;
|
||||
|
||||
/**
|
||||
* 每秒Fetch请求数的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
|
||||
*/
|
||||
@FieldSelector(types = {
|
||||
MetricsType.BROKER_FLOW_DETAIL,
|
||||
MetricsType.BROKER_TO_DB_METRICS,
|
||||
MetricsType.BROKER_REAL_TIME_METRICS
|
||||
})
|
||||
private Double fetchConsumerRequestPerSec = 0.0;
|
||||
private Double fetchConsumerRequestPerSecMeanRate = 0.0;
|
||||
private Double fetchConsumerRequestPerSecFiveMinuteRate = 0.0;
|
||||
private Double fetchConsumerRequestPerSecFifteenMinuteRate = 0.0;
|
||||
|
||||
/**
|
||||
* Broker分区数量
|
||||
*/
|
||||
@FieldSelector(types = {MetricsType.BROKER_OVER_ALL_METRICS, 5})
|
||||
private int partitionCount;
|
||||
|
||||
/**
|
||||
* Broker已同步分区数量
|
||||
*/
|
||||
@FieldSelector(types = {MetricsType.BROKER_OVER_ALL_METRICS})
|
||||
private int underReplicatedPartitions;
|
||||
|
||||
/**
|
||||
* Broker Leader的数量
|
||||
*/
|
||||
@FieldSelector(types = {MetricsType.BROKER_OVER_ALL_METRICS, 5})
|
||||
private int leaderCount;
|
||||
|
||||
/**
|
||||
* Broker请求处理器空闲百分比
|
||||
*/
|
||||
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
|
||||
private Double requestHandlerAvgIdlePercent = 0.0;
|
||||
|
||||
/**
|
||||
* 网络处理器空闲百分比
|
||||
*/
|
||||
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
|
||||
private Double networkProcessorAvgIdlePercent = 0.0;
|
||||
|
||||
/**
|
||||
* 请求列表大小
|
||||
*/
|
||||
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
|
||||
private Integer requestQueueSize = 0;
|
||||
|
||||
/**
|
||||
* 响应列表大小
|
||||
*/
|
||||
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
|
||||
private Integer responseQueueSize = 0;
|
||||
|
||||
/**
|
||||
* 刷日志时间
|
||||
*/
|
||||
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
|
||||
private Double logFlushRateAndTimeMs = 0.0;
|
||||
|
||||
/**
|
||||
* produce请求总时间-平均值
|
||||
*/
|
||||
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
|
||||
private Double totalTimeProduceMean = 0.0;
|
||||
|
||||
/**
|
||||
* produce请求总时间-99th
|
||||
*/
|
||||
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
|
||||
private Double totalTimeProduce99Th = 0.0;
|
||||
|
||||
/**
|
||||
* fetch consumer请求总时间-平均值
|
||||
*/
|
||||
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
|
||||
private Double totalTimeFetchConsumerMean = 0.0;
|
||||
|
||||
/**
|
||||
* fetch consumer请求总时间-99th
|
||||
*/
|
||||
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
|
||||
private Double totalTimeFetchConsumer99Th = 0.0;
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public Integer getBrokerId() {
|
||||
return brokerId;
|
||||
}
|
||||
|
||||
public void setBrokerId(Integer brokerId) {
|
||||
this.brokerId = brokerId;
|
||||
}
|
||||
|
||||
public Double getProduceRequestPerSec() {
|
||||
return produceRequestPerSec;
|
||||
}
|
||||
|
||||
public void setProduceRequestPerSec(Double produceRequestPerSec) {
|
||||
this.produceRequestPerSec = produceRequestPerSec;
|
||||
}
|
||||
|
||||
public Double getProduceRequestPerSecMeanRate() {
|
||||
return produceRequestPerSecMeanRate;
|
||||
}
|
||||
|
||||
public void setProduceRequestPerSecMeanRate(Double produceRequestPerSecMeanRate) {
|
||||
this.produceRequestPerSecMeanRate = produceRequestPerSecMeanRate;
|
||||
}
|
||||
|
||||
public Double getProduceRequestPerSecFiveMinuteRate() {
|
||||
return produceRequestPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public void setProduceRequestPerSecFiveMinuteRate(Double produceRequestPerSecFiveMinuteRate) {
|
||||
this.produceRequestPerSecFiveMinuteRate = produceRequestPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public Double getProduceRequestPerSecFifteenMinuteRate() {
|
||||
return produceRequestPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public void setProduceRequestPerSecFifteenMinuteRate(Double produceRequestPerSecFifteenMinuteRate) {
|
||||
this.produceRequestPerSecFifteenMinuteRate = produceRequestPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public Double getFetchConsumerRequestPerSec() {
|
||||
return fetchConsumerRequestPerSec;
|
||||
}
|
||||
|
||||
public void setFetchConsumerRequestPerSec(Double fetchConsumerRequestPerSec) {
|
||||
this.fetchConsumerRequestPerSec = fetchConsumerRequestPerSec;
|
||||
}
|
||||
|
||||
public Double getFetchConsumerRequestPerSecMeanRate() {
|
||||
return fetchConsumerRequestPerSecMeanRate;
|
||||
}
|
||||
|
||||
public void setFetchConsumerRequestPerSecMeanRate(Double fetchConsumerRequestPerSecMeanRate) {
|
||||
this.fetchConsumerRequestPerSecMeanRate = fetchConsumerRequestPerSecMeanRate;
|
||||
}
|
||||
|
||||
public Double getFetchConsumerRequestPerSecFiveMinuteRate() {
|
||||
return fetchConsumerRequestPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public void setFetchConsumerRequestPerSecFiveMinuteRate(Double fetchConsumerRequestPerSecFiveMinuteRate) {
|
||||
this.fetchConsumerRequestPerSecFiveMinuteRate = fetchConsumerRequestPerSecFiveMinuteRate;
|
||||
}
|
||||
|
||||
public Double getFetchConsumerRequestPerSecFifteenMinuteRate() {
|
||||
return fetchConsumerRequestPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public void setFetchConsumerRequestPerSecFifteenMinuteRate(Double fetchConsumerRequestPerSecFifteenMinuteRate) {
|
||||
this.fetchConsumerRequestPerSecFifteenMinuteRate = fetchConsumerRequestPerSecFifteenMinuteRate;
|
||||
}
|
||||
|
||||
public int getPartitionCount() {
|
||||
return partitionCount;
|
||||
}
|
||||
|
||||
public void setPartitionCount(int partitionCount) {
|
||||
this.partitionCount = partitionCount;
|
||||
}
|
||||
|
||||
public int getUnderReplicatedPartitions() {
|
||||
return underReplicatedPartitions;
|
||||
}
|
||||
|
||||
public void setUnderReplicatedPartitions(int underReplicatedPartitions) {
|
||||
this.underReplicatedPartitions = underReplicatedPartitions;
|
||||
}
|
||||
|
||||
public int getLeaderCount() {
|
||||
return leaderCount;
|
||||
}
|
||||
|
||||
public void setLeaderCount(int leaderCount) {
|
||||
this.leaderCount = leaderCount;
|
||||
}
|
||||
|
||||
public Double getRequestHandlerAvgIdlePercent() {
|
||||
return requestHandlerAvgIdlePercent;
|
||||
}
|
||||
|
||||
public void setRequestHandlerAvgIdlePercent(Double requestHandlerAvgIdlePercent) {
|
||||
this.requestHandlerAvgIdlePercent = requestHandlerAvgIdlePercent;
|
||||
}
|
||||
|
||||
public Double getNetworkProcessorAvgIdlePercent() {
|
||||
return networkProcessorAvgIdlePercent;
|
||||
}
|
||||
|
||||
public void setNetworkProcessorAvgIdlePercent(Double networkProcessorAvgIdlePercent) {
|
||||
this.networkProcessorAvgIdlePercent = networkProcessorAvgIdlePercent;
|
||||
}
|
||||
|
||||
public Integer getRequestQueueSize() {
|
||||
return requestQueueSize;
|
||||
}
|
||||
|
||||
public void setRequestQueueSize(Integer requestQueueSize) {
|
||||
this.requestQueueSize = requestQueueSize;
|
||||
}
|
||||
|
||||
public Integer getResponseQueueSize() {
|
||||
return responseQueueSize;
|
||||
}
|
||||
|
||||
public void setResponseQueueSize(Integer responseQueueSize) {
|
||||
this.responseQueueSize = responseQueueSize;
|
||||
}
|
||||
|
||||
public Double getLogFlushRateAndTimeMs() {
|
||||
return logFlushRateAndTimeMs;
|
||||
}
|
||||
|
||||
public void setLogFlushRateAndTimeMs(Double logFlushRateAndTimeMs) {
|
||||
this.logFlushRateAndTimeMs = logFlushRateAndTimeMs;
|
||||
}
|
||||
|
||||
public Double getTotalTimeProduceMean() {
|
||||
return totalTimeProduceMean;
|
||||
}
|
||||
|
||||
public void setTotalTimeProduceMean(Double totalTimeProduceMean) {
|
||||
this.totalTimeProduceMean = totalTimeProduceMean;
|
||||
}
|
||||
|
||||
public Double getTotalTimeProduce99Th() {
|
||||
return totalTimeProduce99Th;
|
||||
}
|
||||
|
||||
public void setTotalTimeProduce99Th(Double totalTimeProduce99Th) {
|
||||
this.totalTimeProduce99Th = totalTimeProduce99Th;
|
||||
}
|
||||
|
||||
public Double getTotalTimeFetchConsumerMean() {
|
||||
return totalTimeFetchConsumerMean;
|
||||
}
|
||||
|
||||
public void setTotalTimeFetchConsumerMean(Double totalTimeFetchConsumerMean) {
|
||||
this.totalTimeFetchConsumerMean = totalTimeFetchConsumerMean;
|
||||
}
|
||||
|
||||
public Double getTotalTimeFetchConsumer99Th() {
|
||||
return totalTimeFetchConsumer99Th;
|
||||
}
|
||||
|
||||
public void setTotalTimeFetchConsumer99Th(Double totalTimeFetchConsumer99Th) {
|
||||
this.totalTimeFetchConsumer99Th = totalTimeFetchConsumer99Th;
|
||||
}
|
||||
|
||||
private static void initialization(Field[] fields){
|
||||
for(Field field : fields){
|
||||
FieldSelector annotation = field.getAnnotation(FieldSelector.class);
|
||||
if(annotation ==null){
|
||||
continue;
|
||||
}
|
||||
|
||||
String fieldName;
|
||||
if("".equals(annotation.name())) {
|
||||
fieldName = field.getName().substring(0,1).toUpperCase() + field.getName().substring(1);
|
||||
} else{
|
||||
fieldName = annotation.name();
|
||||
}
|
||||
for(int type: annotation.types()){
|
||||
List<String> list = Constant.BROKER_METRICS_TYPE_MBEAN_NAME_MAP.getOrDefault(type, new ArrayList<>());
|
||||
list.add(fieldName);
|
||||
Constant.BROKER_METRICS_TYPE_MBEAN_NAME_MAP.put(type, list);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static List<String> getFieldNameList(int metricsType){
|
||||
synchronized (BrokerMetrics.class) {
|
||||
if (Constant.BROKER_METRICS_TYPE_MBEAN_NAME_MAP.isEmpty()) {
|
||||
initialization(BrokerMetrics.class.getDeclaredFields());
|
||||
initialization(BaseMetrics.class.getDeclaredFields());
|
||||
}
|
||||
}
|
||||
return Constant.BROKER_METRICS_TYPE_MBEAN_NAME_MAP.getOrDefault(metricsType, new ArrayList<>());
|
||||
}
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.metrics;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.annotations.FieldSelector;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class TopicMetrics extends BaseMetrics {
|
||||
/**
|
||||
* 集群ID
|
||||
*/
|
||||
private Long clusterId;
|
||||
|
||||
/**
|
||||
* Topic名称
|
||||
*/
|
||||
private String topicName;
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public String getTopicName() {
|
||||
return topicName;
|
||||
}
|
||||
|
||||
public void setTopicName(String topicName) {
|
||||
this.topicName = topicName;
|
||||
}
|
||||
|
||||
private static void initialization(Field[] fields){
|
||||
for(Field field : fields){
|
||||
FieldSelector annotation = field.getAnnotation(FieldSelector.class);
|
||||
if(annotation ==null){
|
||||
continue;
|
||||
}
|
||||
String fieldName;
|
||||
if("".equals(annotation.name())){
|
||||
String name = field.getName();
|
||||
fieldName = name.substring(0,1).toUpperCase()+name.substring(1);
|
||||
}else{
|
||||
fieldName = annotation.name();
|
||||
}
|
||||
for(int type: annotation.types()){
|
||||
List<String> list = Constant.TOPIC_METRICS_TYPE_MBEAN_NAME_MAP.getOrDefault(type, new ArrayList<>());
|
||||
list.add(fieldName);
|
||||
Constant.TOPIC_METRICS_TYPE_MBEAN_NAME_MAP.put(type, list);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static List<String> getFieldNameList(int type){
|
||||
synchronized (TopicMetrics.class) {
|
||||
if (Constant.TOPIC_METRICS_TYPE_MBEAN_NAME_MAP.isEmpty()) {
|
||||
initialization(TopicMetrics.class.getDeclaredFields());
|
||||
initialization(BaseMetrics.class.getDeclaredFields());
|
||||
}
|
||||
}
|
||||
return Constant.TOPIC_METRICS_TYPE_MBEAN_NAME_MAP.get(type);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.po;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/5/3
|
||||
*/
|
||||
public class AccountDO extends BaseDO {
|
||||
private String username;
|
||||
|
||||
private String password;
|
||||
|
||||
private Integer role;
|
||||
|
||||
public String getUsername() {
|
||||
return username;
|
||||
}
|
||||
|
||||
public void setUsername(String username) {
|
||||
this.username = username;
|
||||
}
|
||||
|
||||
public String getPassword() {
|
||||
return password;
|
||||
}
|
||||
|
||||
public void setPassword(String password) {
|
||||
this.password = password;
|
||||
}
|
||||
|
||||
public Integer getRole() {
|
||||
return role;
|
||||
}
|
||||
|
||||
public void setRole(Integer role) {
|
||||
this.role = role;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AccountDO{" +
|
||||
"username='" + username + '\'' +
|
||||
", password='" + password + '\'' +
|
||||
", role=" + role +
|
||||
", id=" + id +
|
||||
", status=" + status +
|
||||
", gmtCreate=" + gmtCreate +
|
||||
", gmtModify=" + gmtModify +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.po;
|
||||
|
||||
public class AlarmRuleDO extends BaseDO {
|
||||
private String alarmName;
|
||||
|
||||
private String strategyExpressions;
|
||||
|
||||
private String strategyFilters;
|
||||
|
||||
private String strategyActions;
|
||||
|
||||
private String principals;
|
||||
|
||||
public String getAlarmName() {
|
||||
return alarmName;
|
||||
}
|
||||
|
||||
public void setAlarmName(String alarmName) {
|
||||
this.alarmName = alarmName;
|
||||
}
|
||||
|
||||
public String getStrategyExpressions() {
|
||||
return strategyExpressions;
|
||||
}
|
||||
|
||||
public void setStrategyExpressions(String strategyExpressions) {
|
||||
this.strategyExpressions = strategyExpressions;
|
||||
}
|
||||
|
||||
public String getStrategyFilters() {
|
||||
return strategyFilters;
|
||||
}
|
||||
|
||||
public void setStrategyFilters(String strategyFilters) {
|
||||
this.strategyFilters = strategyFilters;
|
||||
}
|
||||
|
||||
public String getStrategyActions() {
|
||||
return strategyActions;
|
||||
}
|
||||
|
||||
public void setStrategyActions(String strategyActions) {
|
||||
this.strategyActions = strategyActions;
|
||||
}
|
||||
|
||||
public String getPrincipals() {
|
||||
return principals;
|
||||
}
|
||||
|
||||
public void setPrincipals(String principals) {
|
||||
this.principals = principals;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlarmRuleDO{" +
|
||||
"alarmName='" + alarmName + '\'' +
|
||||
", strategyExpressions='" + strategyExpressions + '\'' +
|
||||
", strategyFilters='" + strategyFilters + '\'' +
|
||||
", strategyActions='" + strategyActions + '\'' +
|
||||
", principals='" + principals + '\'' +
|
||||
", id=" + id +
|
||||
", status=" + status +
|
||||
", gmtCreate=" + gmtCreate +
|
||||
", gmtModify=" + gmtModify +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.po;
|
||||
|
||||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* @author arthur
|
||||
* @date 2017/7/25.
|
||||
*/
|
||||
public class BaseDO {
|
||||
protected Long id;
|
||||
|
||||
protected Integer status;
|
||||
|
||||
protected Date gmtCreate;
|
||||
|
||||
protected Date gmtModify;
|
||||
|
||||
public Long getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(Long id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public Integer getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public void setStatus(Integer status) {
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public Date getGmtCreate() {
|
||||
return gmtCreate;
|
||||
}
|
||||
|
||||
public void setGmtCreate(Date gmtCreate) {
|
||||
this.gmtCreate = gmtCreate;
|
||||
}
|
||||
|
||||
public Date getGmtModify() {
|
||||
return gmtModify;
|
||||
}
|
||||
|
||||
public void setGmtModify(Date gmtModify) {
|
||||
this.gmtModify = gmtModify;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "BaseDO{" +
|
||||
"id=" + id +
|
||||
", status=" + status +
|
||||
", gmtCreate=" + gmtCreate +
|
||||
", gmtModify=" + gmtModify +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.po;
|
||||
|
||||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/11/25
|
||||
*/
|
||||
public abstract class BaseEntryDO {
|
||||
protected Long id;
|
||||
|
||||
protected Date gmtCreate;
|
||||
|
||||
public Long getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(Long id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public Date getGmtCreate() {
|
||||
return gmtCreate;
|
||||
}
|
||||
|
||||
public void setGmtCreate(Date gmtCreate) {
|
||||
this.gmtCreate = gmtCreate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "BaseEntryDO{" +
|
||||
"id=" + id +
|
||||
", gmtCreate=" + gmtCreate +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.po;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/4/3
|
||||
*/
|
||||
public class BrokerDO extends BaseDO {
|
||||
private Long clusterId;
|
||||
|
||||
private Integer brokerId;
|
||||
|
||||
private String host;
|
||||
|
||||
private Integer port;
|
||||
|
||||
private Long timestamp;
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public Integer getBrokerId() {
|
||||
return brokerId;
|
||||
}
|
||||
|
||||
public void setBrokerId(Integer brokerId) {
|
||||
this.brokerId = brokerId;
|
||||
}
|
||||
|
||||
public String getHost() {
|
||||
return host;
|
||||
}
|
||||
|
||||
public void setHost(String host) {
|
||||
this.host = host;
|
||||
}
|
||||
|
||||
public Integer getPort() {
|
||||
return port;
|
||||
}
|
||||
|
||||
public void setPort(Integer port) {
|
||||
this.port = port;
|
||||
}
|
||||
|
||||
public Long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public void setTimestamp(Long timestamp) {
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "BrokerDO{" +
|
||||
"clusterId=" + clusterId +
|
||||
", brokerId=" + brokerId +
|
||||
", host='" + host + '\'' +
|
||||
", port=" + port +
|
||||
", timestamp=" + timestamp +
|
||||
", id=" + id +
|
||||
", status=" + status +
|
||||
", gmtCreate=" + gmtCreate +
|
||||
", gmtModify=" + gmtModify +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.po;
|
||||
|
||||
import java.util.Date;
|
||||
|
||||
public class ClusterDO extends BaseDO{
|
||||
private String clusterName;
|
||||
|
||||
private String zookeeper;
|
||||
|
||||
private String bootstrapServers;
|
||||
|
||||
private String kafkaVersion;
|
||||
|
||||
private Integer alarmFlag;
|
||||
|
||||
private String securityProtocol;
|
||||
|
||||
private String saslMechanism;
|
||||
|
||||
private String saslJaasConfig;
|
||||
|
||||
public String getClusterName() {
|
||||
return clusterName;
|
||||
}
|
||||
|
||||
public void setClusterName(String clusterName) {
|
||||
this.clusterName = clusterName;
|
||||
}
|
||||
|
||||
public String getZookeeper() {
|
||||
return zookeeper;
|
||||
}
|
||||
|
||||
public void setZookeeper(String zookeeper) {
|
||||
this.zookeeper = zookeeper;
|
||||
}
|
||||
|
||||
public String getBootstrapServers() {
|
||||
return bootstrapServers;
|
||||
}
|
||||
|
||||
public void setBootstrapServers(String bootstrapServers) {
|
||||
this.bootstrapServers = bootstrapServers;
|
||||
}
|
||||
|
||||
public String getKafkaVersion() {
|
||||
return kafkaVersion;
|
||||
}
|
||||
|
||||
public void setKafkaVersion(String kafkaVersion) {
|
||||
this.kafkaVersion = kafkaVersion;
|
||||
}
|
||||
|
||||
public Integer getAlarmFlag() {
|
||||
return alarmFlag;
|
||||
}
|
||||
|
||||
public void setAlarmFlag(Integer alarmFlag) {
|
||||
this.alarmFlag = alarmFlag;
|
||||
}
|
||||
|
||||
public String getSecurityProtocol() {
|
||||
return securityProtocol;
|
||||
}
|
||||
|
||||
public void setSecurityProtocol(String securityProtocol) {
|
||||
this.securityProtocol = securityProtocol;
|
||||
}
|
||||
|
||||
public String getSaslMechanism() {
|
||||
return saslMechanism;
|
||||
}
|
||||
|
||||
public void setSaslMechanism(String saslMechanism) {
|
||||
this.saslMechanism = saslMechanism;
|
||||
}
|
||||
|
||||
public String getSaslJaasConfig() {
|
||||
return saslJaasConfig;
|
||||
}
|
||||
|
||||
public void setSaslJaasConfig(String saslJaasConfig) {
|
||||
this.saslJaasConfig = saslJaasConfig;
|
||||
}
|
||||
|
||||
public Integer getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public void setStatus(Integer status) {
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public Date getGmtCreate() {
|
||||
return gmtCreate;
|
||||
}
|
||||
|
||||
public void setGmtCreate(Date gmtCreate) {
|
||||
this.gmtCreate = gmtCreate;
|
||||
}
|
||||
|
||||
public Date getGmtModify() {
|
||||
return gmtModify;
|
||||
}
|
||||
|
||||
public void setGmtModify(Date gmtModify) {
|
||||
this.gmtModify = gmtModify;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ClusterDO{" +
|
||||
"clusterName='" + clusterName + '\'' +
|
||||
", zookeeper='" + zookeeper + '\'' +
|
||||
", bootstrapServers='" + bootstrapServers + '\'' +
|
||||
", kafkaVersion='" + kafkaVersion + '\'' +
|
||||
", alarmFlag=" + alarmFlag +
|
||||
", securityProtocol='" + securityProtocol + '\'' +
|
||||
", saslMechanism='" + saslMechanism + '\'' +
|
||||
", saslJaasConfig='" + saslJaasConfig + '\'' +
|
||||
", id=" + id +
|
||||
", status=" + status +
|
||||
", gmtCreate=" + gmtCreate +
|
||||
", gmtModify=" + gmtModify +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.po;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||
|
||||
public class ClusterMetricsDO extends BaseEntryDO {
|
||||
private Long clusterId;
|
||||
|
||||
private Integer topicNum = 0;
|
||||
|
||||
private Integer partitionNum = 0;
|
||||
|
||||
private Integer brokerNum = 0;
|
||||
|
||||
private Double bytesInPerSec = 0.0;
|
||||
|
||||
private Double bytesOutPerSec = 0.0;
|
||||
|
||||
private Double bytesRejectedPerSec = 0.0;
|
||||
|
||||
private Double messagesInPerSec = 0.0;
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public Integer getTopicNum() {
|
||||
return topicNum;
|
||||
}
|
||||
|
||||
public void setTopicNum(Integer topicNum) {
|
||||
this.topicNum = topicNum;
|
||||
}
|
||||
|
||||
public Integer getPartitionNum() {
|
||||
return partitionNum;
|
||||
}
|
||||
|
||||
public void setPartitionNum(Integer partitionNum) {
|
||||
this.partitionNum = partitionNum;
|
||||
}
|
||||
|
||||
public Integer getBrokerNum() {
|
||||
return brokerNum;
|
||||
}
|
||||
|
||||
public void setBrokerNum(Integer brokerNum) {
|
||||
this.brokerNum = brokerNum;
|
||||
}
|
||||
|
||||
public Double getBytesInPerSec() {
|
||||
return bytesInPerSec;
|
||||
}
|
||||
|
||||
public void setBytesInPerSec(Double bytesInPerSec) {
|
||||
this.bytesInPerSec = bytesInPerSec;
|
||||
}
|
||||
|
||||
public Double getBytesOutPerSec() {
|
||||
return bytesOutPerSec;
|
||||
}
|
||||
|
||||
public void setBytesOutPerSec(Double bytesOutPerSec) {
|
||||
this.bytesOutPerSec = bytesOutPerSec;
|
||||
}
|
||||
|
||||
public Double getBytesRejectedPerSec() {
|
||||
return bytesRejectedPerSec;
|
||||
}
|
||||
|
||||
public void setBytesRejectedPerSec(Double bytesRejectedPerSec) {
|
||||
this.bytesRejectedPerSec = bytesRejectedPerSec;
|
||||
}
|
||||
|
||||
public Double getMessagesInPerSec() {
|
||||
return messagesInPerSec;
|
||||
}
|
||||
|
||||
public void setMessagesInPerSec(Double messagesInPerSec) {
|
||||
this.messagesInPerSec = messagesInPerSec;
|
||||
}
|
||||
|
||||
public void addBrokerMetrics(BrokerMetrics brokerMetrics) {
|
||||
this.clusterId = brokerMetrics.getClusterId();
|
||||
this.brokerNum += 1;
|
||||
this.bytesInPerSec += brokerMetrics.getBytesInPerSec();
|
||||
this.bytesOutPerSec += brokerMetrics.getBytesOutPerSec();
|
||||
this.bytesRejectedPerSec += brokerMetrics.getBytesRejectedPerSec();
|
||||
this.messagesInPerSec += brokerMetrics.getMessagesInPerSec();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ClusterMetricsDO{" +
|
||||
"clusterId=" + clusterId +
|
||||
", topicNum=" + topicNum +
|
||||
", partitionNum=" + partitionNum +
|
||||
", brokerNum=" + brokerNum +
|
||||
", bytesInPerSec=" + bytesInPerSec +
|
||||
", bytesOutPerSec=" + bytesOutPerSec +
|
||||
", bytesRejectedPerSec=" + bytesRejectedPerSec +
|
||||
", messagesInPerSec=" + messagesInPerSec +
|
||||
", id=" + id +
|
||||
", gmtCreate=" + gmtCreate +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.po;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/2/28
|
||||
*/
|
||||
public class ControllerDO extends BaseEntryDO {
|
||||
private Long clusterId;
|
||||
|
||||
private Integer brokerId;
|
||||
|
||||
private String host;
|
||||
|
||||
private Long timestamp;
|
||||
|
||||
private Integer version;
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public Integer getBrokerId() {
|
||||
return brokerId;
|
||||
}
|
||||
|
||||
public void setBrokerId(Integer brokerId) {
|
||||
this.brokerId = brokerId;
|
||||
}
|
||||
|
||||
public String getHost() {
|
||||
return host;
|
||||
}
|
||||
|
||||
public void setHost(String host) {
|
||||
this.host = host;
|
||||
}
|
||||
|
||||
public Long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public void setTimestamp(Long timestamp) {
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
|
||||
public Integer getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public void setVersion(Integer version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ControllerDO{" +
|
||||
"id=" + id +
|
||||
", clusterId=" + clusterId +
|
||||
", brokerId=" + brokerId +
|
||||
", host='" + host + '\'' +
|
||||
", timestamp=" + timestamp +
|
||||
", version=" + version +
|
||||
", gmtCreate=" + gmtCreate +
|
||||
'}';
|
||||
}
|
||||
|
||||
public static ControllerDO newInstance(Long clusterId,
|
||||
Integer brokerId,
|
||||
String host,
|
||||
Long timestamp,
|
||||
Integer version) {
|
||||
ControllerDO controllerDO = new ControllerDO();
|
||||
controllerDO.setClusterId(clusterId);
|
||||
controllerDO.setBrokerId(brokerId);
|
||||
controllerDO.setHost(host == null? "": host);
|
||||
controllerDO.setTimestamp(timestamp);
|
||||
controllerDO.setVersion(version);
|
||||
return controllerDO;
|
||||
}
|
||||
}
|
||||
@@ -1,96 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.po;
|
||||
|
||||
/**
|
||||
* migrate topic task do
|
||||
* @author zengqiao
|
||||
* @date 19/4/16
|
||||
*/
|
||||
public class MigrationTaskDO extends BaseDO {
|
||||
private Long clusterId;
|
||||
|
||||
private String topicName;
|
||||
|
||||
private String reassignmentJson;
|
||||
|
||||
private Long throttle;
|
||||
|
||||
private String operator;
|
||||
|
||||
private String description;
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public void setDescription(String description) {
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
public String getOperator() {
|
||||
return operator;
|
||||
}
|
||||
|
||||
public void setOperator(String operator) {
|
||||
this.operator = operator;
|
||||
}
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public String getTopicName() {
|
||||
return topicName;
|
||||
}
|
||||
|
||||
public void setTopicName(String topicName) {
|
||||
this.topicName = topicName;
|
||||
}
|
||||
|
||||
public String getReassignmentJson() {
|
||||
return reassignmentJson;
|
||||
}
|
||||
|
||||
public void setReassignmentJson(String reassignmentJson) {
|
||||
this.reassignmentJson = reassignmentJson;
|
||||
}
|
||||
|
||||
public Long getThrottle() {
|
||||
return throttle;
|
||||
}
|
||||
|
||||
public void setThrottle(Long throttle) {
|
||||
this.throttle = throttle;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "MigrationTaskDO{" +
|
||||
"clusterId=" + clusterId +
|
||||
", topicName='" + topicName + '\'' +
|
||||
", reassignmentJson='" + reassignmentJson + '\'' +
|
||||
", throttle=" + throttle +
|
||||
", id=" + id +
|
||||
", status=" + status +
|
||||
", gmtCreate=" + gmtCreate +
|
||||
", gmtModify=" + gmtModify +
|
||||
'}';
|
||||
}
|
||||
|
||||
public static MigrationTaskDO createInstance(Long clusterId,
|
||||
String topicName,
|
||||
String reassignmentJson,
|
||||
Long throttle,
|
||||
String description) {
|
||||
MigrationTaskDO migrationTaskDO = new MigrationTaskDO();
|
||||
migrationTaskDO.setClusterId(clusterId);
|
||||
migrationTaskDO.setTopicName(topicName);
|
||||
migrationTaskDO.setReassignmentJson(reassignmentJson);
|
||||
migrationTaskDO.setThrottle(throttle);
|
||||
migrationTaskDO.setDescription(description);
|
||||
return migrationTaskDO;
|
||||
}
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.po;
|
||||
|
||||
public class OperationHistoryDO extends BaseEntryDO {
|
||||
private Long clusterId;
|
||||
|
||||
private String topicName;
|
||||
|
||||
private String operator;
|
||||
|
||||
private String operation;
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public String getTopicName() {
|
||||
return topicName;
|
||||
}
|
||||
|
||||
public void setTopicName(String topicName) {
|
||||
this.topicName = topicName;
|
||||
}
|
||||
|
||||
public String getOperator() {
|
||||
return operator;
|
||||
}
|
||||
|
||||
public void setOperator(String operator) {
|
||||
this.operator = operator;
|
||||
}
|
||||
|
||||
public String getOperation() {
|
||||
return operation;
|
||||
}
|
||||
|
||||
public void setOperation(String operation) {
|
||||
this.operation = operation;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "OperationHistoryDO{" +
|
||||
"clusterId=" + clusterId +
|
||||
", topicName='" + topicName + '\'' +
|
||||
", operator='" + operator + '\'' +
|
||||
", operation='" + operation + '\'' +
|
||||
", id=" + id +
|
||||
", gmtCreate=" + gmtCreate +
|
||||
'}';
|
||||
}
|
||||
|
||||
public static OperationHistoryDO newInstance(Long clusterId, String topicName, String operator, String operation) {
|
||||
OperationHistoryDO operationHistoryDO = new OperationHistoryDO();
|
||||
operationHistoryDO.setClusterId(clusterId);
|
||||
operationHistoryDO.setTopicName(topicName);
|
||||
operationHistoryDO.setOperator(operator);
|
||||
operationHistoryDO.setOperation(operation);
|
||||
return operationHistoryDO;
|
||||
}
|
||||
}
|
||||
@@ -1,134 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.po;
|
||||
|
||||
public class OrderPartitionDO extends BaseDO{
|
||||
private Long clusterId;
|
||||
|
||||
private String clusterName;
|
||||
|
||||
private String topicName;
|
||||
|
||||
private String applicant;
|
||||
|
||||
private Integer partitionNum;
|
||||
|
||||
private String brokerList;
|
||||
|
||||
private Long peakBytesIn;
|
||||
|
||||
private String description;
|
||||
|
||||
private Integer orderStatus;
|
||||
|
||||
private String approver;
|
||||
|
||||
private String opinion;
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public String getClusterName() {
|
||||
return clusterName;
|
||||
}
|
||||
|
||||
public void setClusterName(String clusterName) {
|
||||
this.clusterName = clusterName;
|
||||
}
|
||||
|
||||
public String getTopicName() {
|
||||
return topicName;
|
||||
}
|
||||
|
||||
public void setTopicName(String topicName) {
|
||||
this.topicName = topicName;
|
||||
}
|
||||
|
||||
public String getApplicant() {
|
||||
return applicant;
|
||||
}
|
||||
|
||||
public void setApplicant(String applicant) {
|
||||
this.applicant = applicant;
|
||||
}
|
||||
|
||||
public Integer getPartitionNum() {
|
||||
return partitionNum;
|
||||
}
|
||||
|
||||
public void setPartitionNum(Integer partitionNum) {
|
||||
this.partitionNum = partitionNum;
|
||||
}
|
||||
|
||||
public String getBrokerList() {
|
||||
return brokerList;
|
||||
}
|
||||
|
||||
public void setBrokerList(String brokerList) {
|
||||
this.brokerList = brokerList;
|
||||
}
|
||||
|
||||
public Long getPeakBytesIn() {
|
||||
return peakBytesIn;
|
||||
}
|
||||
|
||||
public void setPeakBytesIn(Long peakBytesIn) {
|
||||
this.peakBytesIn = peakBytesIn;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public void setDescription(String description) {
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
public Integer getOrderStatus() {
|
||||
return orderStatus;
|
||||
}
|
||||
|
||||
public void setOrderStatus(Integer orderStatus) {
|
||||
this.orderStatus = orderStatus;
|
||||
}
|
||||
|
||||
public String getApprover() {
|
||||
return approver;
|
||||
}
|
||||
|
||||
public void setApprover(String approver) {
|
||||
this.approver = approver;
|
||||
}
|
||||
|
||||
public String getOpinion() {
|
||||
return opinion;
|
||||
}
|
||||
|
||||
public void setOpinion(String opinion) {
|
||||
this.opinion = opinion;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "OrderPartitionDO{" +
|
||||
"clusterId=" + clusterId +
|
||||
", clusterName='" + clusterName + '\'' +
|
||||
", topicName='" + topicName + '\'' +
|
||||
", applicant='" + applicant + '\'' +
|
||||
", partitionNum=" + partitionNum +
|
||||
", brokerList='" + brokerList + '\'' +
|
||||
", peakBytesIn=" + peakBytesIn +
|
||||
", description='" + description + '\'' +
|
||||
", orderStatus=" + orderStatus +
|
||||
", approver='" + approver + '\'' +
|
||||
", opinion='" + opinion + '\'' +
|
||||
", id=" + id +
|
||||
", status=" + status +
|
||||
", gmtCreate=" + gmtCreate +
|
||||
", gmtModify=" + gmtModify +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,178 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.po;
|
||||
|
||||
public class OrderTopicDO extends BaseDO {
|
||||
private Long clusterId;
|
||||
|
||||
private String clusterName;
|
||||
|
||||
private String topicName;
|
||||
|
||||
private Long retentionTime;
|
||||
|
||||
private Integer partitionNum;
|
||||
|
||||
private Integer replicaNum;
|
||||
|
||||
private String regions;
|
||||
|
||||
private String brokers;
|
||||
|
||||
private Long peakBytesIn;
|
||||
|
||||
private String applicant;
|
||||
|
||||
private String principals;
|
||||
|
||||
private String description;
|
||||
|
||||
private Integer orderStatus;
|
||||
|
||||
private String approver;
|
||||
|
||||
private String opinion;
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public String getClusterName() {
|
||||
return clusterName;
|
||||
}
|
||||
|
||||
public void setClusterName(String clusterName) {
|
||||
this.clusterName = clusterName;
|
||||
}
|
||||
|
||||
public String getTopicName() {
|
||||
return topicName;
|
||||
}
|
||||
|
||||
public void setTopicName(String topicName) {
|
||||
this.topicName = topicName;
|
||||
}
|
||||
|
||||
public Long getRetentionTime() {
|
||||
return retentionTime;
|
||||
}
|
||||
|
||||
public void setRetentionTime(Long retentionTime) {
|
||||
this.retentionTime = retentionTime;
|
||||
}
|
||||
|
||||
public Integer getPartitionNum() {
|
||||
return partitionNum;
|
||||
}
|
||||
|
||||
public void setPartitionNum(Integer partitionNum) {
|
||||
this.partitionNum = partitionNum;
|
||||
}
|
||||
|
||||
public Integer getReplicaNum() {
|
||||
return replicaNum;
|
||||
}
|
||||
|
||||
public void setReplicaNum(Integer replicaNum) {
|
||||
this.replicaNum = replicaNum;
|
||||
}
|
||||
|
||||
public String getRegions() {
|
||||
return regions;
|
||||
}
|
||||
|
||||
public void setRegions(String regions) {
|
||||
this.regions = regions;
|
||||
}
|
||||
|
||||
public String getBrokers() {
|
||||
return brokers;
|
||||
}
|
||||
|
||||
public void setBrokers(String brokers) {
|
||||
this.brokers = brokers;
|
||||
}
|
||||
|
||||
public Long getPeakBytesIn() {
|
||||
return peakBytesIn;
|
||||
}
|
||||
|
||||
public void setPeakBytesIn(Long peakBytesIn) {
|
||||
this.peakBytesIn = peakBytesIn;
|
||||
}
|
||||
|
||||
public String getApplicant() {
|
||||
return applicant;
|
||||
}
|
||||
|
||||
public void setApplicant(String applicant) {
|
||||
this.applicant = applicant;
|
||||
}
|
||||
|
||||
public String getPrincipals() {
|
||||
return principals;
|
||||
}
|
||||
|
||||
public void setPrincipals(String principals) {
|
||||
this.principals = principals;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public void setDescription(String description) {
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
public Integer getOrderStatus() {
|
||||
return orderStatus;
|
||||
}
|
||||
|
||||
public void setOrderStatus(Integer orderStatus) {
|
||||
this.orderStatus = orderStatus;
|
||||
}
|
||||
|
||||
public String getApprover() {
|
||||
return approver;
|
||||
}
|
||||
|
||||
public void setApprover(String approver) {
|
||||
this.approver = approver;
|
||||
}
|
||||
|
||||
public String getOpinion() {
|
||||
return opinion;
|
||||
}
|
||||
|
||||
public void setOpinion(String opinion) {
|
||||
this.opinion = opinion;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "OrderTopicDO{" +
|
||||
"clusterId=" + clusterId +
|
||||
", clusterName='" + clusterName + '\'' +
|
||||
", topicName='" + topicName + '\'' +
|
||||
", retentionTime=" + retentionTime +
|
||||
", partitionNum=" + partitionNum +
|
||||
", replicaNum=" + replicaNum +
|
||||
", regions='" + regions + '\'' +
|
||||
", brokers='" + brokers + '\'' +
|
||||
", peakBytesIn=" + peakBytesIn +
|
||||
", applicant='" + applicant + '\'' +
|
||||
", principals='" + principals + '\'' +
|
||||
", description='" + description + '\'' +
|
||||
", orderStatus=" + orderStatus +
|
||||
", approver='" + approver + '\'' +
|
||||
", opinion='" + opinion + '\'' +
|
||||
", id=" + id +
|
||||
", status=" + status +
|
||||
", gmtCreate=" + gmtCreate +
|
||||
", gmtModify=" + gmtModify +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.po;
|
||||
|
||||
public class RegionDO extends BaseDO{
|
||||
private String regionName;
|
||||
|
||||
private Long clusterId;
|
||||
|
||||
private String brokerList;
|
||||
|
||||
private Integer level;
|
||||
|
||||
private String description;
|
||||
|
||||
private String operator;
|
||||
|
||||
public String getRegionName() {
|
||||
return regionName;
|
||||
}
|
||||
|
||||
public void setRegionName(String regionName) {
|
||||
this.regionName = regionName;
|
||||
}
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public String getBrokerList() {
|
||||
return brokerList;
|
||||
}
|
||||
|
||||
public void setBrokerList(String brokerList) {
|
||||
this.brokerList = brokerList;
|
||||
}
|
||||
|
||||
public Integer getLevel() {
|
||||
return level;
|
||||
}
|
||||
|
||||
public void setLevel(Integer level) {
|
||||
this.level = level;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public void setDescription(String description) {
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
public String getOperator() {
|
||||
return operator;
|
||||
}
|
||||
|
||||
public void setOperator(String operator) {
|
||||
this.operator = operator;
|
||||
}
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.po;
|
||||
|
||||
public class TopicDO extends BaseDO{
|
||||
private Long clusterId;
|
||||
|
||||
private String topicName;
|
||||
|
||||
private String applicant;
|
||||
|
||||
private String principals;
|
||||
|
||||
private String description;
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public String getTopicName() {
|
||||
return topicName;
|
||||
}
|
||||
|
||||
public void setTopicName(String topicName) {
|
||||
this.topicName = topicName;
|
||||
}
|
||||
|
||||
public String getApplicant() {
|
||||
return applicant;
|
||||
}
|
||||
|
||||
public void setApplicant(String applicant) {
|
||||
this.applicant = applicant;
|
||||
}
|
||||
|
||||
public String getPrincipals() {
|
||||
return principals;
|
||||
}
|
||||
|
||||
public void setPrincipals(String principals) {
|
||||
this.principals = principals;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public void setDescription(String description) {
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TopicDO{" +
|
||||
"clusterId=" + clusterId +
|
||||
", topicName='" + topicName + '\'' +
|
||||
", applicant='" + applicant + '\'' +
|
||||
", principals='" + principals + '\'' +
|
||||
", description='" + description + '\'' +
|
||||
", id=" + id +
|
||||
", status=" + status +
|
||||
", gmtCreate=" + gmtCreate +
|
||||
", gmtModify=" + gmtModify +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.po;
|
||||
|
||||
public class TopicFavoriteDO extends BaseDO{
|
||||
private String username;
|
||||
|
||||
private Long clusterId;
|
||||
|
||||
private String topicName;
|
||||
|
||||
public String getUsername() {
|
||||
return username;
|
||||
}
|
||||
|
||||
public void setUsername(String username) {
|
||||
this.username = username;
|
||||
}
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public String getTopicName() {
|
||||
return topicName;
|
||||
}
|
||||
|
||||
public void setTopicName(String topicName) {
|
||||
this.topicName = topicName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TopicFavoriteDO{" +
|
||||
"username='" + username + '\'' +
|
||||
", clusterId=" + clusterId +
|
||||
", topicName='" + topicName + '\'' +
|
||||
", id=" + id +
|
||||
", status=" + status +
|
||||
", gmtCreate=" + gmtCreate +
|
||||
", gmtModify=" + gmtModify +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.po.query;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/12/2
|
||||
*/
|
||||
public class AlarmRuleQueryOption extends BaseQueryOption {
|
||||
private String alarmName;
|
||||
|
||||
public String getAlarmName() {
|
||||
return alarmName;
|
||||
}
|
||||
|
||||
public void setAlarmName(String alarmName) {
|
||||
this.alarmName = alarmName;
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.po.query;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/12/2
|
||||
*/
|
||||
public class BaseQueryOption {
|
||||
protected Long id;
|
||||
|
||||
public Long getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(Long id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "BaseQueryOption{" +
|
||||
"id=" + id +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.po.query;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/12/4
|
||||
*/
|
||||
public class ClusterQueryOption extends BaseQueryOption {
|
||||
private String clusterName;
|
||||
|
||||
public String getClusterName() {
|
||||
return clusterName;
|
||||
}
|
||||
|
||||
public void setClusterName(String clusterName) {
|
||||
this.clusterName = clusterName;
|
||||
}
|
||||
}
|
||||
@@ -1,132 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/4/3
|
||||
*
|
||||
* 存储Broker的元信息, 元信息对应的ZK节点是/brokers/ids/{brokerId}
|
||||
* 节点结构:
|
||||
* {
|
||||
* "listener_security_protocol_map":{"SASL_PLAINTEXT":"SASL_PLAINTEXT"},
|
||||
* "endpoints":["SASL_PLAINTEXT://10.179.162.202:9093"],
|
||||
* "jmx_port":9999,
|
||||
* "host":null,
|
||||
* "timestamp":"1546632983233",
|
||||
* "port":-1,
|
||||
* "version":4
|
||||
* }
|
||||
*/
|
||||
public class BrokerMetadata implements Cloneable {
|
||||
private final static Logger LOGGER = LoggerFactory.getLogger(TopicMetadata.class);
|
||||
|
||||
private long clusterId;
|
||||
|
||||
private int brokerId;
|
||||
|
||||
private List<String> endpoints;
|
||||
|
||||
private String host;
|
||||
|
||||
private int port;
|
||||
|
||||
//zk上字段对应
|
||||
private int jmx_port;
|
||||
|
||||
private String version;
|
||||
|
||||
private long timestamp;
|
||||
|
||||
public long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public int getBrokerId() {
|
||||
return brokerId;
|
||||
}
|
||||
|
||||
public void setBrokerId(int brokerId) {
|
||||
this.brokerId = brokerId;
|
||||
}
|
||||
|
||||
public List<String> getEndpoints() {
|
||||
return endpoints;
|
||||
}
|
||||
|
||||
public void setEndpoints(List<String> endpoints) {
|
||||
this.endpoints = endpoints;
|
||||
}
|
||||
|
||||
public String getHost() {
|
||||
return host;
|
||||
}
|
||||
|
||||
public void setHost(String host) {
|
||||
this.host = host;
|
||||
}
|
||||
|
||||
public int getPort() {
|
||||
return port;
|
||||
}
|
||||
|
||||
public void setPort(int port) {
|
||||
this.port = port;
|
||||
}
|
||||
|
||||
public int getJmxPort() {
|
||||
return jmx_port;
|
||||
}
|
||||
|
||||
public void setJmxPort(int jmxPort) {
|
||||
this.jmx_port = jmxPort;
|
||||
}
|
||||
|
||||
public String getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public void setVersion(String version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
public long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public void setTimestamp(long timestamp) {
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object clone() {
|
||||
try {
|
||||
return super.clone();
|
||||
} catch (CloneNotSupportedException var3) {
|
||||
LOGGER.error("clone BrokerMetadata failed.", var3);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "BrokerMetadata{" +
|
||||
"clusterId=" + clusterId +
|
||||
", brokerId=" + brokerId +
|
||||
", endpoints=" + endpoints +
|
||||
", host='" + host + '\'' +
|
||||
", port=" + port +
|
||||
", jmxPort=" + jmx_port +
|
||||
", version='" + version + '\'' +
|
||||
", timestamp=" + timestamp +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/4/22
|
||||
*/
|
||||
public class ControllerData {
|
||||
private Integer brokerid;
|
||||
|
||||
private Integer version;
|
||||
|
||||
private Long timestamp;
|
||||
|
||||
public Integer getBrokerid() {
|
||||
return brokerid;
|
||||
}
|
||||
|
||||
public void setBrokerid(Integer brokerid) {
|
||||
this.brokerid = brokerid;
|
||||
}
|
||||
|
||||
public Integer getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public void setVersion(Integer version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
public Long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public void setTimestamp(Long timestamp) {
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ControllerData{" +
|
||||
"brokerid=" + brokerid +
|
||||
", version=" + version +
|
||||
", timestamp=" + timestamp +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* 根据/brokers/topics/topic的节点内容定义
|
||||
* @author tukun
|
||||
* @date 2015/11/10.
|
||||
*/
|
||||
public class PartitionMap implements Serializable {
|
||||
|
||||
/**
|
||||
* 版本号
|
||||
*/
|
||||
private int version;
|
||||
|
||||
/**
|
||||
* Map<PartitionId,副本所在的brokerId列表>
|
||||
*/
|
||||
private Map<Integer, List<Integer>> partitions;
|
||||
|
||||
public int getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public void setVersion(int version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
public Map<Integer, List<Integer>> getPartitions() {
|
||||
return partitions;
|
||||
}
|
||||
|
||||
public void setPartitions(Map<Integer, List<Integer>> partitions) {
|
||||
this.partitions = partitions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "PartitionMap{" + "version=" + version + ", partitions=" + partitions + '}';
|
||||
}
|
||||
}
|
||||
@@ -1,177 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* PartitionState实例
|
||||
* 对应zookeeper下的state节点信息以及partition的其它信息
|
||||
* @author tukun
|
||||
* @date 2015/11/10.
|
||||
*/
|
||||
public class PartitionState implements Cloneable {
|
||||
/**
|
||||
* partition id
|
||||
*/
|
||||
private int partitionId;
|
||||
|
||||
/**
|
||||
* kafka集群中的中央控制器选举次数
|
||||
*/
|
||||
private int controller_epoch;
|
||||
|
||||
/**
|
||||
* Partition所属的leader broker编号
|
||||
*/
|
||||
private int leader;
|
||||
|
||||
/**
|
||||
* partition的版本号
|
||||
*/
|
||||
private int version;
|
||||
|
||||
/**
|
||||
* 该partition leader选举次数
|
||||
*/
|
||||
private int leader_epoch;
|
||||
|
||||
/**
|
||||
* 同步副本组brokerId列表
|
||||
*/
|
||||
private List<Integer> isr;
|
||||
|
||||
/**
|
||||
* 是否处于复制同步状态
|
||||
*/
|
||||
private boolean isUnderReplicated;
|
||||
|
||||
/**
|
||||
* Partition的offset
|
||||
*/
|
||||
private long offset;
|
||||
|
||||
/**
|
||||
* 被消费的offset
|
||||
*/
|
||||
private long consumeOffset;
|
||||
|
||||
/**
|
||||
* 消费者对应的消费group
|
||||
*/
|
||||
private String consumerGroup;
|
||||
|
||||
public int getPartitionId() {
|
||||
return partitionId;
|
||||
}
|
||||
|
||||
public void setPartitionId(int partitionId) {
|
||||
this.partitionId = partitionId;
|
||||
}
|
||||
|
||||
public int getControllerEpoch() {
|
||||
return controller_epoch;
|
||||
}
|
||||
|
||||
public void setControllerEpoch(int controllerEpoch) {
|
||||
this.controller_epoch = controllerEpoch;
|
||||
}
|
||||
|
||||
public int getLeader() {
|
||||
return leader;
|
||||
}
|
||||
|
||||
public void setLeader(int leader) {
|
||||
this.leader = leader;
|
||||
}
|
||||
|
||||
public int getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public void setVersion(int version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
public int getLeaderEpoch() {
|
||||
return leader_epoch;
|
||||
}
|
||||
|
||||
public void setLeaderEpoch(int leaderEpoch) {
|
||||
this.leader_epoch = leaderEpoch;
|
||||
}
|
||||
|
||||
public List<Integer> getIsr() {
|
||||
return isr;
|
||||
}
|
||||
|
||||
public void setIsr(List<Integer> isr) {
|
||||
this.isr = isr;
|
||||
}
|
||||
|
||||
public boolean isUnderReplicated() {
|
||||
return isUnderReplicated;
|
||||
}
|
||||
|
||||
public void setUnderReplicated(boolean underReplicated) {
|
||||
isUnderReplicated = underReplicated;
|
||||
}
|
||||
|
||||
public long getOffset() {
|
||||
return offset;
|
||||
}
|
||||
|
||||
public void setOffset(long offset) {
|
||||
this.offset = offset;
|
||||
}
|
||||
|
||||
public long getConsumeOffset() {
|
||||
return consumeOffset;
|
||||
}
|
||||
|
||||
public void setConsumeOffset(long consumeOffset) {
|
||||
this.consumeOffset = consumeOffset;
|
||||
}
|
||||
|
||||
public String getConsumerGroup() {
|
||||
return consumerGroup;
|
||||
}
|
||||
|
||||
public void setConsumerGroup(String consumerGroup) {
|
||||
this.consumerGroup = consumerGroup;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "PartitionState{" +
|
||||
"partitionId=" + partitionId +
|
||||
", controller_epoch=" + controller_epoch +
|
||||
", leader=" + leader +
|
||||
", version=" + version +
|
||||
", leader_epoch=" + leader_epoch +
|
||||
", isr=" + isr +
|
||||
", isUnderReplicated=" + isUnderReplicated +
|
||||
", offset=" + offset +
|
||||
", consumeOffset=" + consumeOffset +
|
||||
", consumerGroup='" + consumerGroup + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
@Override
|
||||
public PartitionState clone() {
|
||||
try {
|
||||
PartitionState partitionState = (PartitionState) super.clone();
|
||||
partitionState.setPartitionId(this.partitionId);
|
||||
partitionState.setControllerEpoch(this.controller_epoch);
|
||||
partitionState.setLeader(this.leader);
|
||||
partitionState.setVersion(this.version);
|
||||
partitionState.setLeaderEpoch(this.leader_epoch);
|
||||
partitionState.setIsr(new ArrayList<>(this.isr));
|
||||
partitionState.setOffset(this.offset);
|
||||
partitionState.setConsumeOffset(this.consumeOffset);
|
||||
partitionState.setConsumerGroup(this.consumerGroup);
|
||||
return partitionState;
|
||||
} catch (CloneNotSupportedException e) {
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/1/15
|
||||
*/
|
||||
public class ReassignmentDTO {
|
||||
private Integer version;
|
||||
|
||||
private List<Map<String, String>> topics;
|
||||
|
||||
public ReassignmentDTO(Integer version, String topicName) {
|
||||
this.version = version;
|
||||
Map<String, String> topic = new HashMap<>();
|
||||
topic.put("topic", topicName);
|
||||
topics = new ArrayList<>();
|
||||
topics.add(topic);
|
||||
}
|
||||
|
||||
public Integer getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public void setVersion(Integer version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
public List<Map<String, String>> getTopics() {
|
||||
return topics;
|
||||
}
|
||||
|
||||
public void setTopics(List<Map<String, String>> topics) {
|
||||
this.topics = topics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ReassignmentDTO{" +
|
||||
"version=" + version +
|
||||
", topics=" + topics +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/1/15
|
||||
*/
|
||||
public class ReassignmentElemDTO {
|
||||
private String topic;
|
||||
|
||||
private Integer partition;
|
||||
|
||||
private List<Integer> replicas;
|
||||
|
||||
public String getTopic() {
|
||||
return topic;
|
||||
}
|
||||
|
||||
public void setTopic(String topic) {
|
||||
this.topic = topic;
|
||||
}
|
||||
|
||||
public Integer getPartition() {
|
||||
return partition;
|
||||
}
|
||||
|
||||
public void setPartition(Integer partition) {
|
||||
this.partition = partition;
|
||||
}
|
||||
|
||||
public List<Integer> getReplicas() {
|
||||
return replicas;
|
||||
}
|
||||
|
||||
public void setReplicas(List<Integer> replicas) {
|
||||
this.replicas = replicas;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ReassignmentElemDTO{" +
|
||||
"topic='" + topic + '\'' +
|
||||
", partition=" + partition +
|
||||
", replicas=" + replicas +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/1/15
|
||||
*/
|
||||
public class ReassignmentJsonDTO {
|
||||
private Integer version;
|
||||
|
||||
private List<ReassignmentElemDTO> partitions;
|
||||
|
||||
public Integer getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public void setVersion(Integer version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
public List<ReassignmentElemDTO> getPartitions() {
|
||||
return partitions;
|
||||
}
|
||||
|
||||
public void setPartitions(List<ReassignmentElemDTO> partitions) {
|
||||
this.partitions = partitions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ReassignmentJsonDTO{" +
|
||||
"version=" + version +
|
||||
", partitions=" + partitions +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* 存储Topic的元信息, 元信息对应的ZK节点是/brokers/topics/${topicName}
|
||||
* @author zengqiao
|
||||
* @date 19/4/3
|
||||
*/
|
||||
public class TopicMetadata implements Cloneable {
|
||||
private String topic; //topic名称
|
||||
|
||||
private PartitionMap partitionMap; //partition所在的Broker
|
||||
|
||||
private Set<Integer> brokerIdSet; //topic所在的broker, 由partitionMap获取得到
|
||||
|
||||
private int replicaNum; //副本数
|
||||
|
||||
private int partitionNum; //分区数
|
||||
|
||||
private long modifyTime; //修改节点的时间
|
||||
|
||||
private long createTime; //创建节点的时间
|
||||
|
||||
public String getTopic() {
|
||||
return topic;
|
||||
}
|
||||
|
||||
public void setTopic(String topic) {
|
||||
this.topic = topic;
|
||||
}
|
||||
|
||||
public int getReplicaNum() {
|
||||
return replicaNum;
|
||||
}
|
||||
|
||||
public void setReplicaNum(int replicaNum) {
|
||||
this.replicaNum = replicaNum;
|
||||
}
|
||||
|
||||
public PartitionMap getPartitionMap() {
|
||||
return partitionMap;
|
||||
}
|
||||
|
||||
public void setPartitionMap(PartitionMap partitionMap) {
|
||||
this.partitionMap = partitionMap;
|
||||
}
|
||||
|
||||
public Set<Integer> getBrokerIdSet() {
|
||||
return brokerIdSet;
|
||||
}
|
||||
|
||||
public void setBrokerIdSet(Set<Integer> brokerIdSet) {
|
||||
this.brokerIdSet = brokerIdSet;
|
||||
}
|
||||
|
||||
public int getPartitionNum() {
|
||||
return partitionNum;
|
||||
}
|
||||
|
||||
public void setPartitionNum(int partitionNum) {
|
||||
this.partitionNum = partitionNum;
|
||||
}
|
||||
|
||||
public long getModifyTime() {
|
||||
return modifyTime;
|
||||
}
|
||||
|
||||
public void setModifyTime(long modifyTime) {
|
||||
this.modifyTime = modifyTime;
|
||||
}
|
||||
|
||||
public long getCreateTime() {
|
||||
return createTime;
|
||||
}
|
||||
|
||||
public void setCreateTime(long createTime) {
|
||||
this.createTime = createTime;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TopicMetadata{" +
|
||||
"topic='" + topic + '\'' +
|
||||
", partitionMap=" + partitionMap +
|
||||
", brokerIdSet=" + brokerIdSet +
|
||||
", replicaNum=" + replicaNum +
|
||||
", partitionNum=" + partitionNum +
|
||||
", modifyTime=" + modifyTime +
|
||||
", createTime=" + createTime +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.exception;
|
||||
|
||||
/**
|
||||
* @author huangyiminghappy@163.com
|
||||
* @date 2019/3/15
|
||||
*/
|
||||
public class CopyException extends RuntimeException {
|
||||
private final static long serialVersionUID = 1L;
|
||||
|
||||
public CopyException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
public CopyException(String message, Throwable cause) {
|
||||
super(message, cause);
|
||||
}
|
||||
|
||||
public CopyException(Throwable cause) {
|
||||
super(cause);
|
||||
}
|
||||
|
||||
public CopyException() {
|
||||
super();
|
||||
}
|
||||
}
|
||||
@@ -1,482 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.utils;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.exception.CopyException;
|
||||
import org.apache.commons.beanutils.PropertyUtils;
|
||||
|
||||
import java.beans.PropertyDescriptor;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.Field;
|
||||
import java.lang.reflect.Modifier;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
/**
|
||||
* 对象复制新类型和同类型深度克隆工具类
|
||||
* @author huangyiminghappy@163.com
|
||||
* @date 2019/3/15
|
||||
*/
|
||||
public class CopyUtils {
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
public static <T> T deepCopy(T obj) {
|
||||
if (obj == null) {
|
||||
return null;
|
||||
} else if (obj instanceof String) {
|
||||
return (T)(String) obj;
|
||||
} else if (obj instanceof Integer) {
|
||||
return (T)(Integer) obj;
|
||||
} else if (obj instanceof Double) {
|
||||
return (T)(Double) obj;
|
||||
} else if (obj instanceof Byte) {
|
||||
return (T)(Byte) obj;
|
||||
} else if (obj instanceof Short) {
|
||||
return (T)(Short) obj;
|
||||
} else if (obj instanceof Long) {
|
||||
return (T)(Long) obj;
|
||||
} else if (obj instanceof Float) {
|
||||
return (T)(Float) obj;
|
||||
} else if (obj instanceof Character) {
|
||||
return (T)(Character) obj;
|
||||
} else if (obj instanceof Boolean) {
|
||||
return (T)(Boolean) obj;
|
||||
} else if (obj instanceof ArrayList<?>) {
|
||||
return (T) arrayListHandler((ArrayList<?>) obj);
|
||||
} else if (obj instanceof HashMap<?, ?>) {
|
||||
return (T) mapHandler((Map<?, ?>) obj);
|
||||
} else if (obj instanceof ConcurrentHashMap<?, ?>) {
|
||||
return (T) concurrentMapHandler((Map<?, ?>) obj);
|
||||
} else if (obj instanceof TreeMap<?, ?>) {
|
||||
return (T) treeMapHandler((Map<?, ?>) obj);
|
||||
} else if (obj instanceof LinkedList<?>) {
|
||||
return (T) linkedListHandler((LinkedList<?>) obj);
|
||||
} else if (obj instanceof HashSet<?>) {
|
||||
return (T) hashSetHandler((HashSet<?>) obj);
|
||||
} else if (isPrimitiveArray(obj)) {
|
||||
return getPrimitiveArray(obj);
|
||||
}
|
||||
|
||||
T finObj = null;
|
||||
Class rezClass = obj.getClass();
|
||||
rezClass.cast(finObj);
|
||||
try {
|
||||
Constructor<T> constructor = getCompleteConstructor(rezClass);
|
||||
finObj = (T) constructor.newInstance(getParamsObjForConstructor(rezClass));
|
||||
copyFields(rezClass, obj, finObj);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return finObj;
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
private static <T> T deepCopy(T obj, Object parrent) {
|
||||
if (obj == null) {
|
||||
return null;
|
||||
} else if (obj instanceof String) {
|
||||
return (T)String.valueOf((String) obj);
|
||||
} else if (obj instanceof Integer) {
|
||||
return (T)Integer.valueOf((Integer) obj);
|
||||
} else if (obj instanceof Double) {
|
||||
return (T)Double.valueOf((Double) obj);
|
||||
} else if (obj instanceof Byte) {
|
||||
return (T)Byte.valueOf((Byte) obj);
|
||||
} else if (obj instanceof Short) {
|
||||
return (T)Short.valueOf((Short) obj);
|
||||
} else if (obj instanceof Long) {
|
||||
return (T)Long.valueOf((Long) obj);
|
||||
} else if (obj instanceof Float) {
|
||||
return (T)Float.valueOf((Float) obj);
|
||||
} else if (obj instanceof Character) {
|
||||
return (T)Character.valueOf((Character) obj);
|
||||
} else if (obj instanceof Boolean) {
|
||||
return (T)Boolean.valueOf((Boolean) obj);
|
||||
} else if (obj instanceof ArrayList<?>) {
|
||||
return (T) arrayListHandler((ArrayList<?>) obj);
|
||||
} else if (obj instanceof HashMap<?, ?>) {
|
||||
return (T) mapHandler((Map<?, ?>) obj);
|
||||
} else if (obj instanceof ConcurrentHashMap<?, ?>) {
|
||||
return (T) concurrentMapHandler((Map<?, ?>) obj);
|
||||
} else if (obj instanceof TreeMap<?, ?>) {
|
||||
return (T) treeMapHandler((Map<?, ?>) obj);
|
||||
} else if (obj instanceof LinkedList<?>) {
|
||||
return (T) linkedListHandler((LinkedList<?>) obj);
|
||||
} else if (obj instanceof HashSet<?>) {
|
||||
return (T) hashSetHandler((HashSet<?>) obj);
|
||||
} else if (isPrimitiveArray(obj)) {
|
||||
return getPrimitiveArray(obj);
|
||||
}
|
||||
|
||||
T finObj = null;
|
||||
Class rezClass = obj.getClass();
|
||||
rezClass.cast(finObj);
|
||||
try {
|
||||
Constructor<T> constructor = getCompleteConstructor(rezClass);
|
||||
finObj = (T) constructor.newInstance(getParamsObjForConstructor(rezClass));
|
||||
copyFields(rezClass, obj, finObj, parrent);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return finObj;
|
||||
}
|
||||
|
||||
|
||||
@SuppressWarnings({"rawtypes", "unchecked"})
|
||||
private static ArrayList<?> arrayListHandler(ArrayList<?> obj) {
|
||||
ArrayList srcList = obj;
|
||||
ArrayList finList = new ArrayList();
|
||||
for (int i = 0; i < srcList.size(); i++) {
|
||||
finList.add(CopyUtils.deepCopy(srcList.get(i)));
|
||||
}
|
||||
return finList;
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
private static <K, V> Map<K, V> mapHandler(Map<K, V> obj) {
|
||||
Map<K, V> src = obj;
|
||||
Map<K, V> fin = new HashMap<K, V>();
|
||||
for (Map.Entry entry : src.entrySet()) {
|
||||
K key = (K) CopyUtils.deepCopy(entry.getKey());
|
||||
V value = (V) CopyUtils.deepCopy(entry.getValue());
|
||||
fin.put(key, value);
|
||||
}
|
||||
return fin;
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
private static <K, V> Map<K, V> concurrentMapHandler(Map<K, V> obj) {
|
||||
Map<K, V> src = obj;
|
||||
Map<K, V> fin = new ConcurrentHashMap<K, V>();
|
||||
for (Map.Entry entry : src.entrySet()) {
|
||||
K key = (K) CopyUtils.deepCopy(entry.getKey());
|
||||
V value = (V) CopyUtils.deepCopy(entry.getValue());
|
||||
fin.put(key, value);
|
||||
}
|
||||
return fin;
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
private static <K, V> Map<K, V> treeMapHandler(Map<K, V> obj) {
|
||||
Map<K, V> src = obj;
|
||||
Map<K, V> fin = new TreeMap<K, V>();
|
||||
for (Map.Entry entry : src.entrySet()) {
|
||||
K key = (K) CopyUtils.deepCopy(entry.getKey());
|
||||
V value = (V) CopyUtils.deepCopy(entry.getValue());
|
||||
fin.put(key, value);
|
||||
}
|
||||
return fin;
|
||||
}
|
||||
|
||||
@SuppressWarnings({"rawtypes", "unchecked"})
|
||||
private static LinkedList<?> linkedListHandler(LinkedList<?> obj) {
|
||||
LinkedList srcList = obj;
|
||||
LinkedList finList = new LinkedList<>();
|
||||
for (int i = 0; i < srcList.size(); i++) {
|
||||
finList.add(CopyUtils.deepCopy(srcList.get(i)));
|
||||
}
|
||||
return finList;
|
||||
}
|
||||
|
||||
@SuppressWarnings({"rawtypes", "unchecked"})
|
||||
private static HashSet<?> hashSetHandler(HashSet<?> obj) {
|
||||
HashSet srcList = obj;
|
||||
HashSet finList = new HashSet<>();
|
||||
for (Object o : srcList) {
|
||||
finList.add(CopyUtils.deepCopy(o));
|
||||
}
|
||||
return finList;
|
||||
}
|
||||
|
||||
|
||||
private static boolean isPrimitiveArray(Object obj) {
|
||||
if (obj instanceof byte[] ||
|
||||
obj instanceof short[] ||
|
||||
obj instanceof int[] ||
|
||||
obj instanceof long[] ||
|
||||
obj instanceof float[] ||
|
||||
obj instanceof double[] ||
|
||||
obj instanceof char[] ||
|
||||
obj instanceof boolean[]) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean isPrimitiveArray(String type) {
|
||||
if ("byte[]".equals(type) ||
|
||||
"short[]".equals(type) ||
|
||||
"int[]".equals(type) ||
|
||||
"long[]".equals(type) ||
|
||||
"float[]".equals(type) ||
|
||||
"double[]".equals(type) ||
|
||||
"char[]".equals(type) ||
|
||||
"boolean[]".equals(type)) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static <T> T getPrimitiveArray(T obj) {
|
||||
if (obj instanceof int[]) {
|
||||
int[] arr = new int[((int[]) obj).length];
|
||||
for (int i = 0; i < ((int[]) obj).length; i++) {
|
||||
arr[i] = ((int[]) obj)[i];
|
||||
}
|
||||
return (T) arr;
|
||||
} else if (obj instanceof byte[]) {
|
||||
byte[] arr = new byte[((byte[]) obj).length];
|
||||
for (int i = 0; i < ((byte[]) obj).length; i++) {
|
||||
arr[i] = ((byte[]) obj)[i];
|
||||
}
|
||||
return (T) arr;
|
||||
} else if (obj instanceof short[]) {
|
||||
short[] arr = new short[((short[]) obj).length];
|
||||
for (int i = 0; i < ((short[]) obj).length; i++) {
|
||||
arr[i] = ((short[]) obj)[i];
|
||||
}
|
||||
return (T) arr;
|
||||
} else if (obj instanceof long[]) {
|
||||
long[] arr = new long[((long[]) obj).length];
|
||||
for (int i = 0; i < ((long[]) obj).length; i++) {
|
||||
arr[i] = ((long[]) obj)[i];
|
||||
}
|
||||
return (T) arr;
|
||||
} else if (obj instanceof float[]) {
|
||||
float[] arr = new float[((float[]) obj).length];
|
||||
for (int i = 0; i < ((float[]) obj).length; i++) {
|
||||
arr[i] = ((float[]) obj)[i];
|
||||
}
|
||||
return (T) arr;
|
||||
} else if (obj instanceof double[]) {
|
||||
double[] arr = new double[((double[]) obj).length];
|
||||
for (int i = 0; i < ((double[]) obj).length; i++) {
|
||||
arr[i] = ((double[]) obj)[i];
|
||||
}
|
||||
return (T) arr;
|
||||
} else if (obj instanceof char[]) {
|
||||
char[] arr = new char[((char[]) obj).length];
|
||||
for (int i = 0; i < ((char[]) obj).length; i++) {
|
||||
arr[i] = ((char[]) obj)[i];
|
||||
}
|
||||
return (T) arr;
|
||||
} else if (obj instanceof boolean[]) {
|
||||
boolean[] arr = new boolean[((boolean[]) obj).length];
|
||||
for (int i = 0; i < ((boolean[]) obj).length; i++) {
|
||||
arr[i] = ((boolean[]) obj)[i];
|
||||
}
|
||||
return (T) arr;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static <T> T getPrimitiveArray(T obj, String type) {
|
||||
if ("int[]".equals(type)) {
|
||||
int[] arr = new int[1];
|
||||
arr[0] = 0;
|
||||
return (T) arr;
|
||||
} else if ("byte[]".equals(type)) {
|
||||
byte[] arr = new byte[1];
|
||||
arr[0] = 0;
|
||||
return (T) arr;
|
||||
} else if ("short[]".equals(type)) {
|
||||
short[] arr = new short[1];
|
||||
arr[0] = 0;
|
||||
return (T) arr;
|
||||
} else if ("long[]".equals(type)) {
|
||||
long[] arr = new long[1];
|
||||
arr[0] = 0;
|
||||
return (T) arr;
|
||||
} else if ("float[]".equals(type)) {
|
||||
float[] arr = new float[1];
|
||||
arr[0] = 0;
|
||||
return (T) arr;
|
||||
} else if ("double[]".equals(type)) {
|
||||
double[] arr = new double[1];
|
||||
arr[0] = 0;
|
||||
return (T) arr;
|
||||
} else if ("char[]".equals(type)) {
|
||||
char[] arr = new char[1];
|
||||
arr[0] = 0;
|
||||
return (T) arr;
|
||||
} else if ("boolean[]".equals(type)) {
|
||||
boolean[] arr = new boolean[1];
|
||||
arr[0] = false;
|
||||
return (T) arr;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
private static Constructor getCompleteConstructor(Class ourClass)
|
||||
throws NoSuchMethodException, SecurityException {
|
||||
Constructor constructor = null;
|
||||
Class[] params = new Class[ourClass.getDeclaredConstructors()[0].getParameterTypes().length];
|
||||
for (int i = 0; i < ourClass.getDeclaredConstructors()[0].getParameterTypes().length; i++) {
|
||||
params[i] = ourClass.getDeclaredConstructors()[0].getParameterTypes()[i];
|
||||
}
|
||||
constructor = ourClass.getConstructor(params);
|
||||
constructor.setAccessible(true);
|
||||
return constructor;
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
private static Object[] getParamsObjForConstructor(Class ourClass)
|
||||
throws NoSuchMethodException, SecurityException {
|
||||
Constructor constuctor = null;
|
||||
constuctor = ourClass.getDeclaredConstructors()[0];
|
||||
constuctor.setAccessible(true);
|
||||
Object[] objParams = new Object[constuctor.getParameterTypes().length];
|
||||
for (int i = 0; i < constuctor.getParameterTypes().length; i++) {
|
||||
String fieldType = constuctor.getParameterTypes()[i].toString();
|
||||
if ("int".equalsIgnoreCase(fieldType) ||
|
||||
"double".toString().equalsIgnoreCase(fieldType) ||
|
||||
"float".equalsIgnoreCase(fieldType) ||
|
||||
"byte".toString().equalsIgnoreCase(fieldType) ||
|
||||
"char".equalsIgnoreCase(fieldType) ||
|
||||
"long".equalsIgnoreCase(fieldType)) {
|
||||
objParams[i] = 0;
|
||||
} else if ("boolean".equalsIgnoreCase(fieldType)) {
|
||||
objParams[i] = false;
|
||||
} else if (isPrimitiveArray(constuctor.getParameterTypes()[i].getCanonicalName())) {
|
||||
objParams[i] = getPrimitiveArray(constuctor.getParameterTypes()[i],
|
||||
constuctor.getParameterTypes()[i].getCanonicalName()
|
||||
);
|
||||
} else {
|
||||
objParams[i] = null;
|
||||
}
|
||||
}
|
||||
return objParams;
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
private static <T> void copyFields(Class ourClass, T srcObj, T finObj)
|
||||
throws IllegalArgumentException, IllegalAccessException, NoSuchFieldException, SecurityException {
|
||||
Field[] fields = ourClass.getDeclaredFields();
|
||||
for (int i = 0; i < fields.length; i++) {
|
||||
fields[i].setAccessible(true);
|
||||
Field modField = Field.class.getDeclaredField("modifiers");
|
||||
modField.setAccessible(true);
|
||||
modField.setInt(fields[i], fields[i].getModifiers() & ~Modifier.FINAL);
|
||||
String fieldType = fields[i].getType().toString();
|
||||
if ("int".equalsIgnoreCase(fieldType) ||
|
||||
"double".equalsIgnoreCase(fieldType) ||
|
||||
"float".equalsIgnoreCase(fieldType) ||
|
||||
"byte".equalsIgnoreCase(fieldType) ||
|
||||
"char".equalsIgnoreCase(fieldType) ||
|
||||
"boolean".equalsIgnoreCase(fieldType) ||
|
||||
"short".equalsIgnoreCase(fieldType) ||
|
||||
"long".equalsIgnoreCase(fieldType)) {
|
||||
fields[i].set(finObj, fields[i].get(srcObj));
|
||||
} else {
|
||||
fields[i].set(finObj, CopyUtils.deepCopy(fields[i].get(srcObj), finObj));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
private static <T> void copyFields(Class ourClass, T srcObj, T finObj, Object parent)
|
||||
throws IllegalArgumentException, IllegalAccessException, NoSuchFieldException, SecurityException {
|
||||
Field[] fields = ourClass.getDeclaredFields();
|
||||
for (int i = 0; i < fields.length; i++) {
|
||||
fields[i].setAccessible(true);
|
||||
Field modField = Field.class.getDeclaredField("modifiers");
|
||||
modField.setAccessible(true);
|
||||
modField.setInt(fields[i], fields[i].getModifiers() & ~Modifier.FINAL);
|
||||
String fieldType = fields[i].getType().toString();
|
||||
if ("int".equalsIgnoreCase(fieldType) ||
|
||||
"double".equalsIgnoreCase(fieldType) ||
|
||||
"float".equalsIgnoreCase(fieldType) ||
|
||||
"byte".equalsIgnoreCase(fieldType) ||
|
||||
"char".equalsIgnoreCase(fieldType) ||
|
||||
"boolean".equalsIgnoreCase(fieldType) ||
|
||||
"short".equalsIgnoreCase(fieldType) ||
|
||||
"long".equalsIgnoreCase(fieldType)) {
|
||||
fields[i].set(finObj, fields[i].get(srcObj));
|
||||
} else {
|
||||
if (fields[i].get(srcObj).toString().equals(parent.toString())) {
|
||||
fields[i].set(finObj, fields[i].get(srcObj));
|
||||
} else {
|
||||
fields[i].set(finObj, CopyUtils.deepCopy(fields[i].get(srcObj), finObj));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void setFinalStaticField(Field field, Object newValue) throws Exception {
|
||||
field.setAccessible(true);
|
||||
Field modifiersField = Field.class.getDeclaredField("modifiers");
|
||||
modifiersField.setAccessible(true);
|
||||
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
|
||||
field.set(null, newValue);
|
||||
}
|
||||
|
||||
public static Object copyProperties(Object target, Object orig) {
|
||||
if (target == null || orig == null) {
|
||||
return target;
|
||||
}
|
||||
|
||||
PropertyDescriptor[] destDesc = PropertyUtils.getPropertyDescriptors(target);
|
||||
try {
|
||||
for (int i = 0; i < destDesc.length; i++) {
|
||||
Class destType = destDesc[i].getPropertyType();
|
||||
Class origType = PropertyUtils.getPropertyType(orig, destDesc[i].getName());
|
||||
if (destType != null && destType.equals(origType) && !destType.equals(Class.class)) {
|
||||
if (!Collection.class.isAssignableFrom(origType)) {
|
||||
try {
|
||||
Object value = PropertyUtils.getProperty(orig, destDesc[i].getName());
|
||||
PropertyUtils.setProperty(target, destDesc[i].getName(), value);
|
||||
} catch (Exception ex) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return target;
|
||||
} catch (Exception ex) {
|
||||
throw new CopyException(ex);
|
||||
}
|
||||
}
|
||||
|
||||
public static Object copyProperties(Object dest, Object orig, String[] ignores) {
|
||||
if (dest == null || orig == null) {
|
||||
return dest;
|
||||
}
|
||||
|
||||
PropertyDescriptor[] destDesc = PropertyUtils.getPropertyDescriptors(dest);
|
||||
try {
|
||||
for (int i = 0; i < destDesc.length; i++) {
|
||||
if (contains(ignores, destDesc[i].getName())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Class destType = destDesc[i].getPropertyType();
|
||||
Class origType = PropertyUtils.getPropertyType(orig, destDesc[i].getName());
|
||||
if (destType != null && destType.equals(origType) && !destType.equals(Class.class)) {
|
||||
if (!Collection.class.isAssignableFrom(origType)) {
|
||||
Object value = PropertyUtils.getProperty(orig, destDesc[i].getName());
|
||||
PropertyUtils.setProperty(dest, destDesc[i].getName(), value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dest;
|
||||
} catch (Exception ex) {
|
||||
throw new CopyException(ex);
|
||||
}
|
||||
}
|
||||
|
||||
static boolean contains(String[] ignores, String name) {
|
||||
boolean ignored = false;
|
||||
for (int j = 0; ignores != null && j < ignores.length; j++) {
|
||||
if (ignores[j].equals(name)) {
|
||||
ignored = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ignored;
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.utils;
|
||||
|
||||
import java.util.Calendar;
|
||||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* 日期工具
|
||||
* @author huangyiminghappy@163.com
|
||||
* @date 2019-03-20
|
||||
*/
|
||||
public class DateUtils {
|
||||
public static Date long2Date(Long time){
|
||||
return new Date(time);
|
||||
}
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.utils;
|
||||
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
* Created by limeng on 2017/12/22
|
||||
*/
|
||||
public class DefaultThreadFactory implements ThreadFactory {
|
||||
|
||||
private static final AtomicInteger POOL_ID = new AtomicInteger();
|
||||
private final AtomicInteger nextId;
|
||||
private final String prefix;
|
||||
private final boolean daemon;
|
||||
private final int priority;
|
||||
|
||||
public DefaultThreadFactory(String poolName) {
|
||||
this((String) poolName, false, 5);
|
||||
}
|
||||
|
||||
public DefaultThreadFactory(String poolName, boolean daemon, int priority) {
|
||||
this.nextId = new AtomicInteger();
|
||||
if (poolName == null) {
|
||||
throw new NullPointerException("poolName");
|
||||
} else if (priority >= 1 && priority <= 10) {
|
||||
this.prefix = poolName + '-' + POOL_ID.incrementAndGet() + '-';
|
||||
this.daemon = daemon;
|
||||
this.priority = priority;
|
||||
} else {
|
||||
throw new IllegalArgumentException(
|
||||
"priority: " + priority
|
||||
+ " (expected: Thread.MIN_PRIORITY <= priority <= Thread.MAX_PRIORITY)");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Thread newThread(Runnable r) {
|
||||
Thread t = new Thread(r, this.prefix + this.nextId.incrementAndGet());
|
||||
|
||||
try {
|
||||
if (t.isDaemon()) {
|
||||
if (!this.daemon) {
|
||||
t.setDaemon(false);
|
||||
}
|
||||
} else if (this.daemon) {
|
||||
t.setDaemon(true);
|
||||
}
|
||||
|
||||
if (t.getPriority() != this.priority) {
|
||||
t.setPriority(this.priority);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
;
|
||||
}
|
||||
return t;
|
||||
}
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.utils;
|
||||
|
||||
import java.security.MessageDigest;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/3/17
|
||||
*/
|
||||
public class EncryptUtil {
|
||||
private static final char[] HEX_DIGITS = {
|
||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
|
||||
};
|
||||
|
||||
public static String md5(String key) {
|
||||
try {
|
||||
byte[] btInput = key.getBytes();
|
||||
MessageDigest mdInst = MessageDigest.getInstance("MD5");
|
||||
|
||||
// 使用指定的字节更新摘要
|
||||
mdInst.update(btInput);
|
||||
|
||||
// 获得密文
|
||||
byte[] md = mdInst.digest();
|
||||
|
||||
// 把密文转换成十六进制的字符串形式
|
||||
char[] str = new char[md.length * 2];
|
||||
for (int i = 0, k = 0; i < md.length; i++) {
|
||||
str[k++] = HEX_DIGITS[md[i] >>> 4 & 0xf];
|
||||
str[k++] = HEX_DIGITS[md[i] & 0xf];
|
||||
}
|
||||
return new String(str);
|
||||
} catch (Exception e) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.utils.jmx;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.management.remote.JMXConnector;
|
||||
import javax.management.remote.JMXConnectorFactory;
|
||||
import javax.management.remote.JMXServiceURL;
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
|
||||
/**
|
||||
* JMXConnector包装类
|
||||
* @author tukun
|
||||
* @date 2015/11/9.
|
||||
*/
|
||||
public class JmxConnectorWrap {
|
||||
private final static Logger logger = LoggerFactory.getLogger(JmxConnectorWrap.class);
|
||||
|
||||
private JMXConnector jmxConnector;
|
||||
|
||||
/**
|
||||
* JMX连接的主机名
|
||||
*/
|
||||
private String host;
|
||||
|
||||
/**
|
||||
* JMX连接端口
|
||||
*/
|
||||
private int port;
|
||||
|
||||
public JmxConnectorWrap(String host, int port) {
|
||||
this.host = host;
|
||||
this.port = port;
|
||||
}
|
||||
|
||||
public JMXConnector getJmxConnector() {
|
||||
// 如果JMX连接断开,则进行重新连接
|
||||
if (jmxConnector == null && port != -1) {
|
||||
createJMXConnector();
|
||||
}
|
||||
return jmxConnector;
|
||||
}
|
||||
|
||||
private synchronized void createJMXConnector() {
|
||||
if (jmxConnector != null) {
|
||||
return;
|
||||
}
|
||||
|
||||
String jmxUrl = String.format("service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi", host, port);
|
||||
try {
|
||||
JMXServiceURL url = new JMXServiceURL(jmxUrl);
|
||||
jmxConnector = JMXConnectorFactory.connect(url, null);
|
||||
} catch (MalformedURLException e) {
|
||||
logger.error("JMX url exception, host:{} port:{} jmxUrl:{}", host, port, jmxUrl, e);
|
||||
} catch (IOException e) {
|
||||
logger.error("JMX connect exception, host:{} port:{}.", host, port, e);
|
||||
}
|
||||
logger.info("JMX connect success, host:{} port:{}.", host, port);
|
||||
}
|
||||
|
||||
public void close() {
|
||||
if (jmxConnector == null) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
jmxConnector.close();
|
||||
} catch (IOException e) {
|
||||
logger.warn("close JmxConnector exception, host:{} port:{}.", host, port, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.utils.jmx;
|
||||
|
||||
/**
|
||||
* Mbean的对象封装
|
||||
* @author tukun
|
||||
* @date 2015/11/9.
|
||||
*/
|
||||
public class Mbean {
|
||||
/**
|
||||
* mbean的对象名称
|
||||
*/
|
||||
private String objectName;
|
||||
|
||||
/**
|
||||
* mbean对象被监控的属性名称
|
||||
*/
|
||||
private String property;
|
||||
|
||||
/**
|
||||
* mbean对象被监控的属性值对象类型
|
||||
*/
|
||||
private Class propertyClass;
|
||||
|
||||
public Mbean(String objectName, String property, Class propertyClass) {
|
||||
this.objectName = objectName;
|
||||
this.property = property;
|
||||
this.propertyClass = propertyClass;
|
||||
}
|
||||
|
||||
public String getObjectName() {
|
||||
return objectName;
|
||||
}
|
||||
|
||||
public void setObjectName(String objectName) {
|
||||
this.objectName = objectName;
|
||||
}
|
||||
|
||||
public String getProperty() {
|
||||
return property;
|
||||
}
|
||||
|
||||
public void setProperty(String property) {
|
||||
this.property = property;
|
||||
}
|
||||
|
||||
public Class getPropertyClass() {
|
||||
return propertyClass;
|
||||
}
|
||||
|
||||
public void setPropertyClass(Class propertyClass) {
|
||||
this.propertyClass = propertyClass;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Mbean{" +
|
||||
"objectName='" + objectName + '\'' +
|
||||
", property='" + property + '\'' +
|
||||
", propertyClass=" + propertyClass +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.utils.jmx;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* kafka集群的mbean的object name集合
|
||||
* @author tukun, zengqiao
|
||||
* @date 2015/11/5.
|
||||
*/
|
||||
public class MbeanNameUtil {
|
||||
|
||||
//broker监控参数
|
||||
private static final String MESSAGE_IN_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec";
|
||||
private static final String BYTES_IN_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec";
|
||||
private static final String BYTES_OUT_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec";
|
||||
private static final String BYTES_REJECTED_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=BytesRejectedPerSec";
|
||||
private static final String FAILED_FETCH_REQUEST_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=FailedFetchRequestsPerSec";
|
||||
private static final String FAILED_PRODUCE_REQUEST_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=FailedProduceRequestsPerSec";
|
||||
private static final String PRODUCE_REQUEST_PER_SEC = "kafka.network:type=RequestMetrics,name=RequestsPerSec,request=Produce";
|
||||
private static final String CONSUMER_REQUEST_PER_SEC = "kafka.network:type=RequestMetrics,name=RequestsPerSec,request=FetchConsumer";
|
||||
private static final String TOTAL_PRODUCE_REQUEST_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=TotalProduceRequestsPerSec";
|
||||
private static final String TOTAL_FETCH_REQUEST_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=TotalFetchRequestsPerSec";
|
||||
|
||||
private static final String REQUEST_HANDLER_AVG_IDLE_PERCENT = "kafka.server:type=KafkaRequestHandlerPool,name=RequestHandlerAvgIdlePercent";
|
||||
private static final String NETWORK_PROCESSOR_AVG_IDLE_PERCENT = "kafka.network:type=SocketServer,name=NetworkProcessorAvgIdlePercent";
|
||||
private static final String REQUEST_QUEUE_SIZE = "kafka.network:type=RequestChannel,name=RequestQueueSize";
|
||||
private static final String RESPONSE_QUEUE_SIZE = "kafka.network:type=RequestChannel,name=ResponseQueueSize";
|
||||
private static final String LOG_FLUSH_RATE_AND_TIME_MS = "kafka.log:type=LogFlushStats,name=LogFlushRateAndTimeMs";
|
||||
private static final String TOTAL_TIME_PRODUCE = "kafka.network:type=RequestMetrics,name=TotalTimeMs,request=Produce";
|
||||
private static final String TOTAL_TIME_FETCH_CONSUMER = "kafka.network:type=RequestMetrics,name=TotalTimeMs,request=FetchConsumer";
|
||||
|
||||
private static final String PART_COUNT = "kafka.server:type=ReplicaManager,name=PartitionCount";
|
||||
private static final String PARTITION_OFFSET_PULL = "kafka.log:type=Log,name=LogEndOffset,topic=${topic},partition=${partition}";
|
||||
private static final String UNDER_REPLICATED_PARTITIONS = "kafka.server:type=ReplicaManager,name=UnderReplicatedPartitions";
|
||||
private static final String LEADER_COUNT = "kafka.server:type=ReplicaManager,name=LeaderCount";
|
||||
|
||||
|
||||
// private static final String PRODUCE_REQUEST_TIME = "kafka.network:type=TopicRequestMetrics,name=TotalTimeMs,request=Produce";
|
||||
// private static final String FETCH_REQUEST_TIME = "kafka.network:type=TopicRequestMetrics,name=TotalTimeMs,request=FetchConsumer";
|
||||
|
||||
|
||||
//存储监控的参数name到获取的object_name的映射关系图
|
||||
private static Map<String, Mbean> mbeanNameMap = new HashMap<String, Mbean>();
|
||||
static {
|
||||
//监控参数配置,object_name和监控的属性名
|
||||
mbeanNameMap.put("MessagesInPerSec", new Mbean(MESSAGE_IN_PER_SEC,"OneMinuteRate", Double.class));
|
||||
mbeanNameMap.put("BytesInPerSec", new Mbean(BYTES_IN_PER_SEC,"OneMinuteRate", Double.class));
|
||||
mbeanNameMap.put("BytesOutPerSec", new Mbean(BYTES_OUT_PER_SEC,"OneMinuteRate", Double.class));
|
||||
mbeanNameMap.put("BytesRejectedPerSec", new Mbean(BYTES_REJECTED_PER_SEC,"OneMinuteRate", Double.class));
|
||||
mbeanNameMap.put("FailFetchRequestPerSec", new Mbean(FAILED_FETCH_REQUEST_PER_SEC,"OneMinuteRate", Double.class));
|
||||
mbeanNameMap.put("FailProduceRequestPerSec", new Mbean(FAILED_PRODUCE_REQUEST_PER_SEC,"OneMinuteRate", Double.class));
|
||||
mbeanNameMap.put("ProduceRequestPerSec", new Mbean(PRODUCE_REQUEST_PER_SEC,"OneMinuteRate", Double.class));
|
||||
mbeanNameMap.put("FetchConsumerRequestPerSec", new Mbean(CONSUMER_REQUEST_PER_SEC,"OneMinuteRate", Double.class));
|
||||
mbeanNameMap.put("TotalProduceRequestsPerSec", new Mbean(TOTAL_PRODUCE_REQUEST_PER_SEC,"OneMinuteRate", Double.class));
|
||||
mbeanNameMap.put("TotalFetchRequestsPerSec", new Mbean(TOTAL_FETCH_REQUEST_PER_SEC,"OneMinuteRate", Double.class));
|
||||
|
||||
|
||||
mbeanNameMap.put("PartitionOffset", new Mbean(PARTITION_OFFSET_PULL,"Value", int.class));
|
||||
|
||||
mbeanNameMap.put("PartitionCount", new Mbean(PART_COUNT,"Value", int.class));
|
||||
mbeanNameMap.put("UnderReplicatedPartitions", new Mbean(UNDER_REPLICATED_PARTITIONS,"Value", int.class));
|
||||
mbeanNameMap.put("LeaderCount", new Mbean(LEADER_COUNT,"Value", int.class));
|
||||
|
||||
mbeanNameMap.put("RequestHandlerAvgIdlePercent", new Mbean(REQUEST_HANDLER_AVG_IDLE_PERCENT,"OneMinuteRate", Double.class));
|
||||
mbeanNameMap.put("NetworkProcessorAvgIdlePercent", new Mbean(NETWORK_PROCESSOR_AVG_IDLE_PERCENT,"Value", Double.class));
|
||||
mbeanNameMap.put("RequestQueueSize", new Mbean(REQUEST_QUEUE_SIZE,"Value", int.class));
|
||||
mbeanNameMap.put("ResponseQueueSize", new Mbean(RESPONSE_QUEUE_SIZE, "Value", int.class));
|
||||
mbeanNameMap.put("LogFlushRateAndTimeMs", new Mbean(LOG_FLUSH_RATE_AND_TIME_MS,"OneMinuteRate", Double.class));
|
||||
mbeanNameMap.put("TotalTimeProduceMean", new Mbean(TOTAL_TIME_PRODUCE,"Mean", Double.class));
|
||||
mbeanNameMap.put("TotalTimeProduce99Th", new Mbean(TOTAL_TIME_PRODUCE,"99thPercentile", Double.class));
|
||||
mbeanNameMap.put("TotalTimeFetchConsumerMean", new Mbean(TOTAL_TIME_FETCH_CONSUMER,"Mean", Double.class));
|
||||
mbeanNameMap.put("TotalTimeFetchConsumer99Th", new Mbean(TOTAL_TIME_FETCH_CONSUMER,"99thPercentile", Double.class));
|
||||
|
||||
// mbeanNameMap.put("ProduceRequestTime", new Mbean(PRODUCE_REQUEST_TIME,"Value"));
|
||||
// mbeanNameMap.put("FetchRequestTime", new Mbean(FETCH_REQUEST_TIME,"Value"));
|
||||
}
|
||||
|
||||
/**
|
||||
* 根据属性名,kafka版本,topic获取相应的Mbean
|
||||
*/
|
||||
public static Mbean getMbean(String name, String topic) {
|
||||
Mbean mbean = mbeanNameMap.get(name);
|
||||
if (mbean == null) {
|
||||
return null;
|
||||
}
|
||||
if (topic != null && !topic.isEmpty()) {
|
||||
return new Mbean(mbean.getObjectName() + ",topic=" + topic, mbean.getProperty(), mbean.getPropertyClass());
|
||||
}
|
||||
return mbean;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,172 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.utils.zk;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.exception.ConfigException;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Created by limeng on 2017/12/22
|
||||
*/
|
||||
public interface ConfigClient {
|
||||
|
||||
/**
|
||||
* 添加连接状态监听器
|
||||
*
|
||||
* @param listener
|
||||
*/
|
||||
void addStateChangeListener(StateChangeListener listener);
|
||||
|
||||
/**
|
||||
* 检查节点是否存在
|
||||
*
|
||||
* @param path
|
||||
* @return
|
||||
* @throws ConfigException
|
||||
*/
|
||||
boolean checkPathExists(String path) throws ConfigException;
|
||||
|
||||
/**
|
||||
* 获取节点信息
|
||||
*
|
||||
* @param path
|
||||
* @return
|
||||
* @throws ConfigException
|
||||
*/
|
||||
Stat getNodeStat(String path) throws ConfigException;
|
||||
|
||||
/**
|
||||
* 重置zk下面数据
|
||||
*
|
||||
* @param path
|
||||
* @param data
|
||||
* @throws ConfigException
|
||||
*/
|
||||
Stat setNodeStat(String path, String data) throws ConfigException;
|
||||
|
||||
Stat setOrCreatePersistentNodeStat(String path, String data) throws ConfigException;
|
||||
|
||||
String createPersistentSequential(String path, String data) throws ConfigException;
|
||||
|
||||
/**
|
||||
* 创建一个节点并包含数据,在失去连接后不会删除.
|
||||
* <p/>
|
||||
* save是持久化存储,如果是临时数据,请使用register
|
||||
*
|
||||
* @param path
|
||||
* @param data
|
||||
* @param <T>
|
||||
* @throws ConfigException
|
||||
*/
|
||||
// <T> void save(String path, T data) throws ConfigException;
|
||||
|
||||
/**
|
||||
* 创建一个节点并包含数据,在失去连接后不会删除.
|
||||
* <p/>
|
||||
* save是持久化存储,如果是临时数据,请使用register
|
||||
*
|
||||
* @param path
|
||||
* @param data
|
||||
* @param <T>
|
||||
* @throws ConfigException
|
||||
*/
|
||||
// <T> void saveIfNotExisted(String path, T data) throws ConfigException;
|
||||
|
||||
// /**
|
||||
// * 注册一个数据,在连接断开时需要重新删除,重连后重新注册
|
||||
// *
|
||||
// * @param path
|
||||
// * @param data
|
||||
// * @param <T>
|
||||
// * @throws ConfigException
|
||||
// */
|
||||
// <T> void register(String path, T data) throws ConfigException;
|
||||
|
||||
/**
|
||||
* 获取数据
|
||||
*
|
||||
* @param path
|
||||
* @param clazz
|
||||
* @param <T>
|
||||
* @return
|
||||
* @throws ConfigException
|
||||
*/
|
||||
<T> T get(String path, Class<T> clazz) throws ConfigException;
|
||||
|
||||
/**
|
||||
* 删除数据,如果有子节点也会删除
|
||||
*
|
||||
* @param path
|
||||
* @throws ConfigException
|
||||
*/
|
||||
void delete(String path) throws ConfigException;
|
||||
|
||||
/**
|
||||
* 获取zkString字符
|
||||
* @param path
|
||||
* @return
|
||||
* @throws ConfigException
|
||||
*/
|
||||
String get(String path) throws ConfigException;
|
||||
|
||||
/**
|
||||
* 监听数据变化
|
||||
*
|
||||
* @param path
|
||||
* @param listener
|
||||
*/
|
||||
void watch(String path, StateChangeListener listener) throws ConfigException;
|
||||
|
||||
/**
|
||||
* 获取路径下的子节点
|
||||
*
|
||||
* @param path
|
||||
* @return
|
||||
* @throws ConfigException
|
||||
*/
|
||||
List<String> getChildren(String path) throws ConfigException;
|
||||
|
||||
/**
|
||||
* 监听子节点的变化并通知出来
|
||||
*
|
||||
* @param path
|
||||
* @param listener
|
||||
* @return
|
||||
* @throws ConfigException
|
||||
*/
|
||||
void watchChildren(String path, StateChangeListener listener) throws ConfigException;
|
||||
|
||||
/**
|
||||
* 取消监听子节点的变化
|
||||
*
|
||||
* @param path
|
||||
* @return
|
||||
*/
|
||||
void cancelWatchChildren(String path);
|
||||
|
||||
/**
|
||||
* 锁住某个节点
|
||||
*
|
||||
* @param path
|
||||
* @param timeoutMS
|
||||
* @param data
|
||||
* @param <T>
|
||||
* @return
|
||||
* @throws ConfigException
|
||||
*/
|
||||
<T> void lock(String path, long timeoutMS, T data) throws ConfigException;
|
||||
|
||||
/**
|
||||
* 释放节点锁
|
||||
*
|
||||
* @param path
|
||||
*/
|
||||
void unLock(String path);
|
||||
|
||||
/**
|
||||
* 资源释放
|
||||
*/
|
||||
void close();
|
||||
|
||||
// void setConfigClientTracer(ConfigClientTracer configClientTracer);
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.utils.zk;
|
||||
|
||||
/**
|
||||
* Created by limeng on 2017/12/22
|
||||
*/
|
||||
public interface StateChangeListener {
|
||||
|
||||
enum State {
|
||||
CONNECTION_RECONNECT, //
|
||||
CONNECTION_DISCONNECT, NODE_DATA_CHANGED, CHILD_UPDATED, CHILD_ADDED, CHILD_DELETED,
|
||||
//
|
||||
;
|
||||
}
|
||||
|
||||
void onChange(State state, String path);
|
||||
|
||||
}
|
||||
@@ -1,532 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.utils.zk;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.xiaojukeji.kafka.manager.common.exception.ConfigException;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.curator.framework.CuratorFramework;
|
||||
import org.apache.curator.framework.CuratorFrameworkFactory;
|
||||
import org.apache.curator.framework.recipes.cache.*;
|
||||
import org.apache.curator.framework.recipes.locks.InterProcessSemaphoreMutex;
|
||||
import org.apache.curator.framework.state.ConnectionState;
|
||||
import org.apache.curator.framework.state.ConnectionStateListener;
|
||||
import org.apache.curator.retry.ExponentialBackoffRetry;
|
||||
import org.apache.curator.utils.ThreadUtils;
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.Lock;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
/**
|
||||
* @author limeng
|
||||
* @date 2017/12/22
|
||||
*/
|
||||
public class ZkConfigImpl implements ConfigClient, ConnectionStateListener {
|
||||
private static final int DEFAULT_SESSION_TIMEOUT_MS = 12000;
|
||||
private static final int DEFAULT_CONNECTION_TIMEOUT_MS = 3000;
|
||||
private static final int DEFAULT_THREAD_POOL_SIZE = Math.max(Runtime.getRuntime().availableProcessors(), 16);
|
||||
|
||||
private final static Logger logger = LoggerFactory.getLogger(ZkConfigImpl.class);
|
||||
|
||||
final byte[] EMPTY = new byte[0];
|
||||
|
||||
/**
|
||||
* 监听连接状态
|
||||
*/
|
||||
private final Map<String, java.util.concurrent.locks.Lock> registerLocks = new ConcurrentHashMap<>();
|
||||
private Map<String, StateChangeListener> connectionListenerMap = new ConcurrentHashMap<>();
|
||||
private Set<StateChangeListener> connectionStateListeners = new HashSet<>();
|
||||
|
||||
/**
|
||||
* 监听节点数据变化的缓存
|
||||
*/
|
||||
private final Map<String, java.util.concurrent.locks.Lock> dataPathLocks = new ConcurrentHashMap<>();
|
||||
private final Map<String, NodeCache> dataWatchers = new ConcurrentHashMap<>();
|
||||
private final Map<String, List<StateChangeListener>> dataListeners = new ConcurrentHashMap<>();
|
||||
|
||||
/**
|
||||
* 监听子节点变化的缓存
|
||||
*/
|
||||
private final Map<String, java.util.concurrent.locks.Lock> childrenPathLocks = new ConcurrentHashMap<>();
|
||||
private final Map<String, PathChildrenCache> childrenWatcher = new ConcurrentHashMap<>();
|
||||
private final Map<String, List<StateChangeListener>> childrenListeners = new ConcurrentHashMap<>();
|
||||
|
||||
/**
|
||||
* 所有持有的锁
|
||||
*/
|
||||
private final Map<String, Lock> lockMap = new ConcurrentHashMap<>();
|
||||
|
||||
private final CuratorFramework curator;
|
||||
private final ExecutorService executor;
|
||||
|
||||
public ZkConfigImpl(String zkAddress) {
|
||||
this(zkAddress, DEFAULT_SESSION_TIMEOUT_MS, DEFAULT_CONNECTION_TIMEOUT_MS);
|
||||
}
|
||||
|
||||
public ZkConfigImpl(String zkAddress, int sessionTimeoutMs, int connectionTimeoutMs) {
|
||||
this(zkAddress, sessionTimeoutMs, connectionTimeoutMs, DEFAULT_THREAD_POOL_SIZE);
|
||||
}
|
||||
|
||||
public ZkConfigImpl(String zkAddress, int sessionTimeoutMs, int connectionTimeoutMs, int threadPoolSize) {
|
||||
ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3);
|
||||
CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder().connectString(zkAddress);
|
||||
builder.retryPolicy(retryPolicy);
|
||||
builder.sessionTimeoutMs(sessionTimeoutMs).connectionTimeoutMs(connectionTimeoutMs);
|
||||
curator = builder.build();
|
||||
curator.getConnectionStateListenable().addListener(this);
|
||||
curator.start();
|
||||
executor = Executors.newFixedThreadPool(threadPoolSize, ThreadUtils.newThreadFactory("PathChildrenCache"));
|
||||
}
|
||||
|
||||
private synchronized java.util.concurrent.locks.Lock getRegisterLock(String registerPath) {
|
||||
registerLocks.putIfAbsent(registerPath, new ReentrantLock());
|
||||
return registerLocks.get(registerPath);
|
||||
}
|
||||
|
||||
private synchronized java.util.concurrent.locks.Lock getDataPathLock(String dataPath) {
|
||||
dataPathLocks.putIfAbsent(dataPath, new ReentrantLock());
|
||||
return dataPathLocks.get(dataPath);
|
||||
}
|
||||
|
||||
private synchronized java.util.concurrent.locks.Lock getChildrenPathLock(String childrenPath) {
|
||||
childrenPathLocks.putIfAbsent(childrenPath, new ReentrantLock());
|
||||
return childrenPathLocks.get(childrenPath);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stateChanged(CuratorFramework client, ConnectionState newState) {
|
||||
|
||||
StateChangeListener.State state;
|
||||
switch (newState) {
|
||||
case LOST:
|
||||
logger.error("[zk] current connection status is {}", newState);
|
||||
releaseLocks();
|
||||
state = StateChangeListener.State.CONNECTION_DISCONNECT;
|
||||
break;
|
||||
case CONNECTED:
|
||||
case RECONNECTED:
|
||||
logger.warn("[zk] current connection status is {}", newState);
|
||||
state = StateChangeListener.State.CONNECTION_RECONNECT;
|
||||
break;
|
||||
default:
|
||||
logger.info("[zk] current connection status is {}", newState);
|
||||
return;
|
||||
}
|
||||
for (StateChangeListener listener : connectionListenerMap.values()) {
|
||||
listener.onChange(state, null);
|
||||
}
|
||||
|
||||
for (StateChangeListener listener : connectionStateListeners) {
|
||||
listener.onChange(state, null);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addStateChangeListener(StateChangeListener listener) {
|
||||
connectionStateListeners.add(listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean checkPathExists(String path) throws ConfigException {
|
||||
try {
|
||||
return curator.checkExists().forPath(path) != null;
|
||||
} catch (Exception e) {
|
||||
String info = String.format("[zk] Failed to check EXIST for path [%s]", path);
|
||||
logger.warn(info);
|
||||
throw new ConfigException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stat getNodeStat(String path) throws ConfigException {
|
||||
try {
|
||||
return curator.checkExists().forPath(path);
|
||||
} catch (Exception e) {
|
||||
String info = String.format("[zk] Failed to get node stat for path [%s]", path);
|
||||
logger.warn(info);
|
||||
throw new ConfigException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stat setNodeStat(String path, String data) throws ConfigException {
|
||||
try {
|
||||
return curator.setData().forPath(path, data.getBytes());
|
||||
} catch (Exception e) {
|
||||
throw new ConfigException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stat setOrCreatePersistentNodeStat(String path, String data) throws ConfigException {
|
||||
try {
|
||||
return curator.setData().forPath(path, data.getBytes());
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
try {
|
||||
curator.create().withMode(CreateMode.PERSISTENT).forPath(path);
|
||||
return setNodeStat(path, data);
|
||||
} catch (KeeperException.NodeExistsException nee) {
|
||||
return setNodeStat(path, data);
|
||||
} catch (Exception e2) {
|
||||
throw new ConfigException(e2);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new ConfigException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String createPersistentSequential(String path, String data) throws ConfigException {
|
||||
try {
|
||||
return curator.create().withMode(CreateMode.PERSISTENT_SEQUENTIAL).forPath(path, data.getBytes());
|
||||
} catch (Exception e) {
|
||||
throw new ConfigException(e);
|
||||
}
|
||||
}
|
||||
//
|
||||
// @Override
|
||||
// public <T> void save(String path, T data) throws ConfigException {
|
||||
// try {
|
||||
// byte[] bytes = EMPTY;
|
||||
// if (data != null) {
|
||||
// bytes = JSON.toJSONBytes(data);
|
||||
// }
|
||||
// Stat stat = curator.checkExists().forPath(path);
|
||||
// if (stat == null) {
|
||||
// curator.create().creatingParentsIfNeeded().forPath(path, bytes);
|
||||
// } else {
|
||||
// curator.setData().forPath(path, bytes);
|
||||
// }
|
||||
// } catch (Exception e) {
|
||||
// logger.warn("create {} failed", path);
|
||||
// throw new ConfigException(e);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public <T> void saveIfNotExisted(String path, T data) throws ConfigException {
|
||||
// try {
|
||||
// byte[] bytes = EMPTY;
|
||||
// if (data != null) {
|
||||
// bytes = JSON.toJSONBytes(data);
|
||||
// }
|
||||
// Stat stat = curator.checkExists().forPath(path);
|
||||
// if (stat == null) {
|
||||
// curator.create().creatingParentsIfNeeded().forPath(path, bytes);
|
||||
// }
|
||||
// } catch (Exception e) {
|
||||
// logger.warn("create {} failed", path, e);
|
||||
// throw new ConfigException(e);
|
||||
// }
|
||||
// }
|
||||
|
||||
// @Override
|
||||
// public <T> void register(final String path, final T data) throws ConfigException {
|
||||
// java.util.concurrent.locks.Lock registerLock = getRegisterLock(path);
|
||||
// registerLock.lock();
|
||||
// try {
|
||||
// byte[] bytes = EMPTY;
|
||||
// if (data != null) {
|
||||
// bytes = JSON.toJSONBytes(data);
|
||||
// }
|
||||
// if (!connectionListenerMap.containsKey(path)) {
|
||||
// connectionListenerMap.put(path, new StateChangeListener() {
|
||||
// @Override
|
||||
// public void onChange(State state, String stateChangePath) {
|
||||
// logger.warn("on state change " + state);
|
||||
// switch (state) {
|
||||
// case CONNECTION_RECONNECT:
|
||||
// try {
|
||||
// register(path, data);
|
||||
// } catch (ConfigException e) {
|
||||
// logger.warn("register {} failed", path);
|
||||
// }
|
||||
// break;
|
||||
// default:
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
// });
|
||||
// }
|
||||
// try {
|
||||
// deletePath(path);
|
||||
// logger.warn("register reconnect delete {} succeed", path);
|
||||
// } catch (ConfigException e) {
|
||||
// logger.warn("register reconnect delete {} failed", path);
|
||||
// }
|
||||
// curator.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL).forPath(path, bytes);
|
||||
// logger.info("register reconnect create {} succeed", path);
|
||||
// } catch (Exception e) {
|
||||
// logger.warn("register reconnect create {} failed", path);
|
||||
// throw new ConfigException(e);
|
||||
// } finally {
|
||||
// registerLock.unlock();
|
||||
// }
|
||||
// }
|
||||
|
||||
@Override
|
||||
public <T> T get(String path, Class<T> clazz) throws ConfigException {
|
||||
try {
|
||||
byte[] bytes = curator.getData().forPath(path);
|
||||
return JSON.parseObject(bytes, clazz);
|
||||
} catch (Exception e) {
|
||||
throw new ConfigException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String get(String path) throws ConfigException {
|
||||
try {
|
||||
byte[] bytes = curator.getData().forPath(path);
|
||||
return new String(bytes);
|
||||
} catch (Exception e) {
|
||||
throw new ConfigException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void delete(String path) throws ConfigException {
|
||||
try {
|
||||
connectionListenerMap.remove(path);
|
||||
if (curator.checkExists().forPath(path) != null) {
|
||||
curator.delete().deletingChildrenIfNeeded().forPath(path);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new ConfigException(e);
|
||||
}
|
||||
}
|
||||
|
||||
// private void deletePath(String path) throws ConfigException {
|
||||
// try {
|
||||
// if (curator.checkExists().forPath(path) != null) {
|
||||
// curator.delete().deletingChildrenIfNeeded().forPath(path);
|
||||
// }
|
||||
// } catch (Exception e) {
|
||||
// throw new ConfigException(e);
|
||||
// }
|
||||
// }
|
||||
|
||||
@SuppressWarnings("all")
|
||||
@Override
|
||||
public void watch(final String path, final StateChangeListener listener) throws ConfigException {
|
||||
java.util.concurrent.locks.Lock dataLock = getDataPathLock(path);
|
||||
dataLock.lock();
|
||||
try {
|
||||
NodeCache nodeCache = dataWatchers.get(path);
|
||||
if (nodeCache == null) {
|
||||
nodeCache = new NodeCache(curator, path);
|
||||
nodeCache.start();
|
||||
dataWatchers.put(path, nodeCache);
|
||||
nodeCache.getListenable().addListener(new NodeCacheListener() {
|
||||
@Override
|
||||
public void nodeChanged() throws Exception {
|
||||
listener.onChange(StateChangeListener.State.NODE_DATA_CHANGED, path);
|
||||
}
|
||||
});
|
||||
List<StateChangeListener> listeners = new ArrayList<>();
|
||||
listeners.add(listener);
|
||||
dataListeners.put(path, listeners);
|
||||
} else {
|
||||
List<StateChangeListener> listeners = dataListeners.get(path);
|
||||
Preconditions.checkState(listeners != null);
|
||||
if (!listeners.contains(listener)) {
|
||||
listeners.add(listener);
|
||||
nodeCache.getListenable().addListener(new NodeCacheListener() {
|
||||
@Override
|
||||
public void nodeChanged() throws Exception {
|
||||
listener.onChange(StateChangeListener.State.NODE_DATA_CHANGED, path);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new ConfigException(e);
|
||||
} finally {
|
||||
dataLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getChildren(String path) throws ConfigException{
|
||||
try {
|
||||
return curator.getChildren().forPath(path);
|
||||
} catch (Exception e) {
|
||||
throw new ConfigException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void watchChildren(final String path, final StateChangeListener listener) throws ConfigException {
|
||||
java.util.concurrent.locks.Lock childrenLock = getChildrenPathLock(path);
|
||||
childrenLock.lock();
|
||||
try {
|
||||
PathChildrenCache pathChildrenCache = childrenWatcher.get(path);
|
||||
if (pathChildrenCache == null) {
|
||||
pathChildrenCache = new PathChildrenCache(curator, path, false, false, executor);
|
||||
pathChildrenCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT);
|
||||
childrenWatcher.put(path, pathChildrenCache);
|
||||
|
||||
pathChildrenCache.getListenable().addListener(new PathChildrenCacheListenerImpl(listener));
|
||||
List<StateChangeListener> listeners = new ArrayList<>();
|
||||
listeners.add(listener);
|
||||
childrenListeners.put(path, listeners);
|
||||
} else {
|
||||
List<StateChangeListener> listeners = childrenListeners.get(path);
|
||||
Preconditions.checkState(listeners != null);
|
||||
if (!listeners.contains(listener)) {
|
||||
listeners.add(listener);
|
||||
pathChildrenCache.getListenable().addListener(new PathChildrenCacheListenerImpl(listener));
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new ConfigException(e);
|
||||
} finally {
|
||||
childrenLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cancelWatchChildren(String path) {
|
||||
java.util.concurrent.locks.Lock childrenLock = getChildrenPathLock(path);
|
||||
childrenLock.lock();
|
||||
try {
|
||||
PathChildrenCache pathChildrenCache = childrenWatcher.get(path);
|
||||
if (pathChildrenCache != null) {
|
||||
try {
|
||||
pathChildrenCache.close();
|
||||
} catch (IOException e) {
|
||||
logger.warn("close node cache for path {} error", path, e);
|
||||
}
|
||||
}
|
||||
childrenWatcher.remove(path);
|
||||
childrenListeners.remove(path);
|
||||
} finally {
|
||||
childrenLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
private static class PathChildrenCacheListenerImpl implements PathChildrenCacheListener {
|
||||
StateChangeListener listener;
|
||||
|
||||
public PathChildrenCacheListenerImpl(StateChangeListener listener) {
|
||||
this.listener = listener;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
|
||||
String path = event.getData() == null ? null : event.getData().getPath();
|
||||
switch (event.getType()) {
|
||||
case CHILD_ADDED:
|
||||
listener.onChange(StateChangeListener.State.CHILD_ADDED, path);
|
||||
break;
|
||||
case CHILD_UPDATED:
|
||||
listener.onChange(StateChangeListener.State.CHILD_UPDATED, path);
|
||||
break;
|
||||
case CHILD_REMOVED:
|
||||
listener.onChange(StateChangeListener.State.CHILD_DELETED, path);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> void lock(String path, long timeoutMS, T t) throws ConfigException {
|
||||
try {
|
||||
Lock lock = lockMap.get(path);
|
||||
if (lock != null) {
|
||||
if (lock.isAcquiredInThisProcess()) {
|
||||
return;
|
||||
}
|
||||
lock.release();
|
||||
lockMap.remove(path);
|
||||
}
|
||||
InterProcessSemaphoreMutex mutex = new InterProcessSemaphoreMutex(curator, path);
|
||||
boolean locked = mutex.acquire(timeoutMS, TimeUnit.MILLISECONDS);
|
||||
if (!locked) {
|
||||
throw new ConfigException("lock " + path + " failed " + timeoutMS);
|
||||
}
|
||||
if (t != null) {
|
||||
curator.setData().forPath(path, JSON.toJSONBytes(t));
|
||||
}
|
||||
lock = new Lock(mutex, path);
|
||||
lockMap.put(path, lock);
|
||||
} catch (Exception e) {
|
||||
logger.warn("lock {} failed", path, e);
|
||||
throw new ConfigException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unLock(String path) {
|
||||
Lock lock = lockMap.remove(path);
|
||||
if (lock != null) {
|
||||
lock.release();
|
||||
}
|
||||
}
|
||||
|
||||
public class Lock {
|
||||
InterProcessSemaphoreMutex mutex;
|
||||
String path;
|
||||
|
||||
public Lock(InterProcessSemaphoreMutex mutex, String path) {
|
||||
this.mutex = mutex;
|
||||
this.path = path;
|
||||
}
|
||||
|
||||
public void release() {
|
||||
lockMap.remove(path);
|
||||
try {
|
||||
mutex.release();
|
||||
} catch (Exception e) {
|
||||
logger.warn("release path {} lock error {}", path, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isAcquiredInThisProcess() {
|
||||
return mutex.isAcquiredInThisProcess();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
connectionListenerMap.clear();
|
||||
connectionStateListeners.clear();
|
||||
for (NodeCache nodeCache : dataWatchers.values()) {
|
||||
try {
|
||||
nodeCache.close();
|
||||
} catch (Exception e) {
|
||||
logger.warn("close node cache error", e);
|
||||
}
|
||||
}
|
||||
dataWatchers.clear();
|
||||
for (PathChildrenCache pathChildrenCache : childrenWatcher.values()) {
|
||||
try {
|
||||
pathChildrenCache.close();
|
||||
} catch (IOException e) {
|
||||
logger.warn("close children cache error", e);
|
||||
}
|
||||
}
|
||||
childrenWatcher.clear();
|
||||
releaseLocks();
|
||||
curator.close();
|
||||
executor.shutdown();
|
||||
}
|
||||
|
||||
private void releaseLocks() {
|
||||
for (Lock lock : lockMap.values()) {
|
||||
lock.release();
|
||||
}
|
||||
lockMap.clear();
|
||||
}
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.utils.zk;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* 存储结构:
|
||||
*
|
||||
* <pre>
|
||||
* /consumers
|
||||
* consumer-group
|
||||
* ids
|
||||
* consumerId
|
||||
* offsets
|
||||
* topic-0
|
||||
* 0(partition编号,节点内容表示)
|
||||
* 1
|
||||
* 2
|
||||
* topic-1
|
||||
* owners
|
||||
* /brokers
|
||||
* topics
|
||||
* topic-0 (节点内容是 ("0",[0,1,2]))
|
||||
* partitions
|
||||
* 0
|
||||
* state(节点内容是leader的brokerId,同步副本信息等)
|
||||
* 1
|
||||
* 2
|
||||
* topic-x
|
||||
* ids
|
||||
* 1(临时节点,broker编号,节点信息为broker相关信息,如JMX端口,host和port等)
|
||||
* 2
|
||||
* n
|
||||
* </pre>
|
||||
*
|
||||
* @author tukun @ 2015-11-5
|
||||
* @version 1.0.0
|
||||
*/
|
||||
public class ZkPathUtil {
|
||||
|
||||
public static final String ZOOKEEPER_SEPARATOR = "/";
|
||||
|
||||
public static final String BROKER_ROOT_NODE = ZOOKEEPER_SEPARATOR + "brokers";
|
||||
|
||||
public static final String CONTROLLER_ROOT_NODE = ZOOKEEPER_SEPARATOR + "controller";
|
||||
|
||||
public static final String BROKER_IDS_ROOT = BROKER_ROOT_NODE
|
||||
+ ZOOKEEPER_SEPARATOR + "ids";
|
||||
|
||||
public static final String BROKER_TOPICS_ROOT = BROKER_ROOT_NODE
|
||||
+ ZOOKEEPER_SEPARATOR + "topics";
|
||||
|
||||
public static final String CONSUMER_ROOT_NODE = ZOOKEEPER_SEPARATOR + "consumers";
|
||||
|
||||
public static final String CONFIG_ROOT_NODE = ZOOKEEPER_SEPARATOR + "config";
|
||||
|
||||
public static final String CONFIG_TOPICS_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "topics";
|
||||
|
||||
//存储监控的参数name到获取的object_name的映射关系图
|
||||
private static Map<String, String> zkPathMap = new HashMap<String, String>();
|
||||
|
||||
static {
|
||||
zkPathMap.put("ConusmerPartitionOffset", CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR
|
||||
+ "${consumerGroup}" + ZOOKEEPER_SEPARATOR
|
||||
+ "offsets" + ZOOKEEPER_SEPARATOR + "${topic}"
|
||||
+ ZOOKEEPER_SEPARATOR + "${partition}");
|
||||
}
|
||||
|
||||
//for broker目录
|
||||
public static String getBrokerIdNodePath(long brokerId) {
|
||||
return String.format(BROKER_IDS_ROOT + ZOOKEEPER_SEPARATOR + "%d", brokerId);
|
||||
}
|
||||
|
||||
public static String getBrokerTopicRoot(String topic) {
|
||||
return BROKER_TOPICS_ROOT + ZOOKEEPER_SEPARATOR + topic;
|
||||
}
|
||||
|
||||
public static String getBrokerTopicPartitionRoot(String topic) {
|
||||
return BROKER_TOPICS_ROOT + ZOOKEEPER_SEPARATOR + topic + ZOOKEEPER_SEPARATOR
|
||||
+ "partitions";
|
||||
}
|
||||
|
||||
public static String getBrokerTopicPartitionStatePath(String topic, int partitionId) {
|
||||
return String.format(getBrokerTopicPartitionRoot(topic) + ZOOKEEPER_SEPARATOR + "%d"
|
||||
+ ZOOKEEPER_SEPARATOR + "state", partitionId);
|
||||
}
|
||||
|
||||
//for consumer
|
||||
public static String getConsumerTopicPartitionOffsetNodePath(String consumerGroup,
|
||||
String topic, int partitionId) {
|
||||
return String.format(CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + "%s" + ZOOKEEPER_SEPARATOR
|
||||
+ "offset" + "%s" + "%d", consumerGroup, topic, partitionId);
|
||||
}
|
||||
|
||||
public static String getConsumerGroupRoot(String consumerGroup) {
|
||||
return CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + consumerGroup;
|
||||
}
|
||||
|
||||
public static String getConsumerGroupIdsRoot(String consumerGroup) {
|
||||
return CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + consumerGroup + ZOOKEEPER_SEPARATOR
|
||||
+ "ids";
|
||||
}
|
||||
|
||||
public static String getConsumerGroupOffsetRoot(String consumerGroup) {
|
||||
return CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + consumerGroup + ZOOKEEPER_SEPARATOR
|
||||
+ "offsets";
|
||||
}
|
||||
|
||||
public static String getConsumerGroupOwnersRoot(String consumerGroup) {
|
||||
return CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + consumerGroup + ZOOKEEPER_SEPARATOR
|
||||
+ "owners";
|
||||
}
|
||||
|
||||
public static String getConsumerGroupConsumerIdsNodePath(String consumerGroup, String consumerId) {
|
||||
return getConsumerGroupIdsRoot(consumerGroup) + ZOOKEEPER_SEPARATOR + consumerId;
|
||||
}
|
||||
|
||||
public static String getConsumerGroupOffsetTopicNode(String consumerGroup, String topic) {
|
||||
return getConsumerGroupOffsetRoot(consumerGroup) + ZOOKEEPER_SEPARATOR + topic;
|
||||
}
|
||||
|
||||
public static String getConsumerGroupOffsetTopicPartitionNode(String consumerGroup,
|
||||
String topic, int partitionId) {
|
||||
return getConsumerGroupOffsetTopicNode(consumerGroup, topic) + ZOOKEEPER_SEPARATOR
|
||||
+ partitionId;
|
||||
}
|
||||
|
||||
public static String getConsumerGroupOwnersTopicNode(String consumerGroup, String topic) {
|
||||
return getConsumerGroupOwnersRoot(consumerGroup) + ZOOKEEPER_SEPARATOR + topic;
|
||||
}
|
||||
|
||||
public static String getConsumerGroupOwnersTopicPartitionNode(String consumerGroup,
|
||||
String topic, int partitionId) {
|
||||
return getConsumerGroupOwnersTopicNode(consumerGroup, topic) + ZOOKEEPER_SEPARATOR
|
||||
+ partitionId;
|
||||
}
|
||||
|
||||
public static String getConfigTopicNode(String topicName) {
|
||||
return CONFIG_TOPICS_ROOT_NODE + ZOOKEEPER_SEPARATOR + topicName;
|
||||
}
|
||||
|
||||
public static String parseLastPartFromZkPath(String zkPath) {
|
||||
return zkPath.substring(zkPath.lastIndexOf("/") + 1);
|
||||
}
|
||||
|
||||
public static Map<String, String> getZkPathMap() {
|
||||
return zkPathMap;
|
||||
}
|
||||
|
||||
public static void setZkPathMap(Map<String, String> zkPathMap) {
|
||||
ZkPathUtil.zkPathMap = zkPathMap;
|
||||
}
|
||||
|
||||
public static String getControllerRootNode() {
|
||||
return CONTROLLER_ROOT_NODE;
|
||||
}
|
||||
|
||||
public static String getEntityConfigPath(String entityType, String entity) {
|
||||
return getEntityConfigRootPath(entityType) + "/" + entity;
|
||||
}
|
||||
|
||||
public static String getEntityConfigRootPath(String entityType) {
|
||||
return CONFIG_ROOT_NODE + "/" + entityType;
|
||||
}
|
||||
}
|
||||
9510
console/package-lock.json
generated
9510
console/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,47 +0,0 @@
|
||||
{
|
||||
"name": "mobx-ts-example",
|
||||
"version": "1.0.0",
|
||||
"description": "",
|
||||
"scripts": {
|
||||
"start": "webpack-dev-server",
|
||||
"daily-build": "cross-env NODE_ENV=production webpack",
|
||||
"pre-build": "cross-env NODE_ENV=production webpack",
|
||||
"prod-build": "cross-env NODE_ENV=production webpack"
|
||||
},
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"devDependencies": {
|
||||
"@hot-loader/react-dom": "^16.8.6",
|
||||
"@types/echarts": "^4.1.9",
|
||||
"@types/react": "^16.8.8",
|
||||
"@types/react-dom": "^16.8.2",
|
||||
"@types/react-router-dom": "^4.3.1",
|
||||
"antd": "^3.16.1",
|
||||
"clean-webpack-plugin": "^3.0.0",
|
||||
"cross-env": "^7.0.2",
|
||||
"css-loader": "^2.1.0",
|
||||
"echarts": "^4.2.1",
|
||||
"file-loader": "^5.0.2",
|
||||
"html-webpack-plugin": "^3.2.0",
|
||||
"less": "^3.9.0",
|
||||
"less-loader": "^4.1.0",
|
||||
"mini-css-extract-plugin": "^0.6.0",
|
||||
"mobx": "^5.9.4",
|
||||
"mobx-react": "^5.4.3",
|
||||
"moment": "^2.24.0",
|
||||
"optimize-css-assets-webpack-plugin": "^5.0.1",
|
||||
"react": "^16.8.4",
|
||||
"react-hot-loader": "^4.8.4",
|
||||
"react-router-dom": "^5.0.0",
|
||||
"style-loader": "^0.23.1",
|
||||
"terser-webpack-plugin": "^1.2.3",
|
||||
"ts-loader": "^5.3.3",
|
||||
"tsconfig-paths-webpack-plugin": "^3.2.0",
|
||||
"tslint": "^5.13.1",
|
||||
"tslint-react": "^3.6.0",
|
||||
"typescript": "^3.3.3333",
|
||||
"webpack": "^4.29.6",
|
||||
"webpack-cli": "^3.2.3",
|
||||
"webpack-dev-server": "^3.2.1"
|
||||
}
|
||||
}
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 2.7 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user