Compare commits
1313 Commits
v2.0.0-alp
...
v3.4.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e2692a6fc4 | ||
|
|
c18eeb6d55 | ||
|
|
6853862753 | ||
|
|
610af4a9e8 | ||
|
|
ac4ea13be9 | ||
|
|
b6ea4aec19 | ||
|
|
8346453aa3 | ||
|
|
a9eb4ae30e | ||
|
|
cceff91f81 | ||
|
|
009ffeb099 | ||
|
|
e8e05812d0 | ||
|
|
58a421c4b9 | ||
|
|
af916d5a71 | ||
|
|
8b30f78744 | ||
|
|
592dee884a | ||
|
|
715744ca15 | ||
|
|
8a95401364 | ||
|
|
e80f8086d4 | ||
|
|
af82c2e615 | ||
|
|
1369e7b9eb | ||
|
|
ab6afe6dbc | ||
|
|
e24a582067 | ||
|
|
65f8beef32 | ||
|
|
38366809f1 | ||
|
|
530219a317 | ||
|
|
c07e544c50 | ||
|
|
c9308ee4f2 | ||
|
|
95158813b9 | ||
|
|
59e8a416b5 | ||
|
|
f6becbdf2c | ||
|
|
07bd00d60c | ||
|
|
1adfa639ac | ||
|
|
3f817991aa | ||
|
|
3b72f732be | ||
|
|
e2ad3afe3d | ||
|
|
ae04ffdd71 | ||
|
|
cf9d5b6832 | ||
|
|
9c418d3b38 | ||
|
|
128b180c83 | ||
|
|
b60941abc8 | ||
|
|
1a42472fd8 | ||
|
|
18e00f043e | ||
|
|
6385889902 | ||
|
|
ea0c744677 | ||
|
|
d1417bef8c | ||
|
|
a7309612d5 | ||
|
|
6e56688a31 | ||
|
|
a6abfb3ea8 | ||
|
|
ca696dd6e1 | ||
|
|
db40a5cd0a | ||
|
|
55161e439a | ||
|
|
bdffc10ca6 | ||
|
|
b1892c21e2 | ||
|
|
90e5492060 | ||
|
|
42195c3180 | ||
|
|
94b1e508fd | ||
|
|
dd3dcd37e9 | ||
|
|
0a6e9b7633 | ||
|
|
470e471cad | ||
|
|
bd58b48bcb | ||
|
|
0cd071c5c6 | ||
|
|
abaadfb9a8 | ||
|
|
49e7fea6d3 | ||
|
|
d68a19679e | ||
|
|
75be94fbea | ||
|
|
c11aa4fd17 | ||
|
|
cb96fef1a5 | ||
|
|
e98cfbcf91 | ||
|
|
0140b2e898 | ||
|
|
b3b7ab9f6b | ||
|
|
b34edb9b64 | ||
|
|
c2bc0f788d | ||
|
|
3f518c9e63 | ||
|
|
7f7801a5f7 | ||
|
|
e1e02f7c2a | ||
|
|
c497e4cb2d | ||
|
|
e34e3f3e3d | ||
|
|
b3fd494398 | ||
|
|
ffc115cb76 | ||
|
|
7bfe787e39 | ||
|
|
2256e8bbdb | ||
|
|
e975932d41 | ||
|
|
db044caf8b | ||
|
|
82fbea4e5f | ||
|
|
6aaa4b34b8 | ||
|
|
3cb1f03668 | ||
|
|
e61c446410 | ||
|
|
9d0345c9cd | ||
|
|
62f870a342 | ||
|
|
13641c00ba | ||
|
|
769c2c0fbc | ||
|
|
c71865f623 | ||
|
|
258385dc9a | ||
|
|
65238231f0 | ||
|
|
cb22e02fbe | ||
|
|
aa0bec1206 | ||
|
|
793c780015 | ||
|
|
ec6f063450 | ||
|
|
f25c65b98b | ||
|
|
2d99aae779 | ||
|
|
a8847dc282 | ||
|
|
4852c01c88 | ||
|
|
3d6f405b69 | ||
|
|
18e3fbf41d | ||
|
|
ae8cc3092b | ||
|
|
5c26e8947b | ||
|
|
fbe6945d3b | ||
|
|
7dc8f2dc48 | ||
|
|
91c60ce72c | ||
|
|
687eea80c8 | ||
|
|
9bfe3fd1db | ||
|
|
03f81bc6de | ||
|
|
eed9571ffa | ||
|
|
e4651ef749 | ||
|
|
f715cf7a8d | ||
|
|
fad9ddb9a1 | ||
|
|
b6e4f50849 | ||
|
|
5c6911e398 | ||
|
|
a0371ab88b | ||
|
|
fa2abadc25 | ||
|
|
f03460f3cd | ||
|
|
b5683b73c2 | ||
|
|
c062586c7e | ||
|
|
98a5c7b776 | ||
|
|
e204023b1f | ||
|
|
4c5ffccc45 | ||
|
|
fbcf58e19c | ||
|
|
e5c6d00438 | ||
|
|
ab6a4d7099 | ||
|
|
78b2b8a45e | ||
|
|
add2af4f3f | ||
|
|
235c0ed30e | ||
|
|
5bd93aa478 | ||
|
|
f95be2c1b3 | ||
|
|
5110b30f62 | ||
|
|
861faa5df5 | ||
|
|
efdf624c67 | ||
|
|
caccf9cef5 | ||
|
|
6ba3dceb84 | ||
|
|
9b7c41e804 | ||
|
|
346aee8fe7 | ||
|
|
353d781bca | ||
|
|
3ce4bf231a | ||
|
|
d046cb8bf4 | ||
|
|
da95c63503 | ||
|
|
915e48de22 | ||
|
|
256f770971 | ||
|
|
16e251cbe8 | ||
|
|
67743b859a | ||
|
|
c275b42632 | ||
|
|
a02760417b | ||
|
|
0e50bfc5d4 | ||
|
|
eab988e18f | ||
|
|
dd6004b9d4 | ||
|
|
ac7c32acd5 | ||
|
|
f4a219ceef | ||
|
|
a8b56fb613 | ||
|
|
2925a20e8e | ||
|
|
6b3eb05735 | ||
|
|
17e0c39f83 | ||
|
|
4994639111 | ||
|
|
c187b5246f | ||
|
|
6ed6d5ec8a | ||
|
|
0735b332a8 | ||
|
|
344cec19fe | ||
|
|
6ef365e201 | ||
|
|
edfa6a9f71 | ||
|
|
860d0b92e2 | ||
|
|
5bceed7105 | ||
|
|
44a2fe0398 | ||
|
|
218459ad1b | ||
|
|
7db757bc12 | ||
|
|
896a943587 | ||
|
|
cd2c388e68 | ||
|
|
4543a339b7 | ||
|
|
1c4fbef9f2 | ||
|
|
b2f0f69365 | ||
|
|
c4fb18a73c | ||
|
|
5cad7b4106 | ||
|
|
f3c4133cd2 | ||
|
|
d9c59cb3d3 | ||
|
|
7a0db7161b | ||
|
|
6aefc16fa0 | ||
|
|
186dcd07e0 | ||
|
|
e8652d5db5 | ||
|
|
fb5964af84 | ||
|
|
249fe7c700 | ||
|
|
cc2a590b33 | ||
|
|
5b3f3e5575 | ||
|
|
36cf285397 | ||
|
|
4386563c2c | ||
|
|
0123ce4a5a | ||
|
|
c3d47d3093 | ||
|
|
9735c4f885 | ||
|
|
3a3141a361 | ||
|
|
ac30436324 | ||
|
|
7176e418f5 | ||
|
|
ca794f507e | ||
|
|
0f8be4fadc | ||
|
|
7066246e8f | ||
|
|
7d1bb48b59 | ||
|
|
dd0d519677 | ||
|
|
4293d05fca | ||
|
|
2c82baf9fc | ||
|
|
921161d6d0 | ||
|
|
e632c6c13f | ||
|
|
5833a8644c | ||
|
|
fab41e892f | ||
|
|
7a52cf67b0 | ||
|
|
175b8d643a | ||
|
|
6241eb052a | ||
|
|
c2fd0a8410 | ||
|
|
5127b600ec | ||
|
|
feb03aede6 | ||
|
|
47b6c5d86a | ||
|
|
c4a81613f4 | ||
|
|
daeb5c4cec | ||
|
|
38def45ad6 | ||
|
|
4b29a2fdfd | ||
|
|
a165ecaeef | ||
|
|
6637ba4ccc | ||
|
|
2f807eec2b | ||
|
|
636c2c6a83 | ||
|
|
898a55c703 | ||
|
|
8ffe7e7101 | ||
|
|
7661826ea5 | ||
|
|
e456be91ef | ||
|
|
da0a97cabf | ||
|
|
c1031a492a | ||
|
|
3c8aaf528c | ||
|
|
70ff20a2b0 | ||
|
|
6918f4babe | ||
|
|
805a704d34 | ||
|
|
c69c289bc4 | ||
|
|
dd5869e246 | ||
|
|
b51ffb81a3 | ||
|
|
ed0efd6bd2 | ||
|
|
39d2fe6195 | ||
|
|
7471d05c20 | ||
|
|
3492688733 | ||
|
|
a603783615 | ||
|
|
5c9096d564 | ||
|
|
c27786a257 | ||
|
|
81910d1958 | ||
|
|
55d5fc4bde | ||
|
|
f30586b150 | ||
|
|
37037c19f0 | ||
|
|
1a5e2c7309 | ||
|
|
941dd4fd65 | ||
|
|
5f6df3681c | ||
|
|
7d045dbf05 | ||
|
|
4ff4accdc3 | ||
|
|
bbe967c4a8 | ||
|
|
b101cec6fa | ||
|
|
e98ec562a2 | ||
|
|
0e71ecc587 | ||
|
|
0f11a65df8 | ||
|
|
da00c8c877 | ||
|
|
8b177877bb | ||
|
|
ea199dca8d | ||
|
|
88b5833f77 | ||
|
|
127b5be651 | ||
|
|
80f001cdd5 | ||
|
|
30d297cae1 | ||
|
|
a96853db90 | ||
|
|
c1502152c0 | ||
|
|
afda292796 | ||
|
|
163cab78ae | ||
|
|
8f4ff36c09 | ||
|
|
47b6b3577a | ||
|
|
f3eca3b214 | ||
|
|
62f7d3f72f | ||
|
|
26e60d8a64 | ||
|
|
df655a250c | ||
|
|
811fc9b400 | ||
|
|
83df02783c | ||
|
|
6a5efce874 | ||
|
|
fa0ae5e474 | ||
|
|
cafd665a2d | ||
|
|
e8f77a456b | ||
|
|
4510c62ebd | ||
|
|
79864955e1 | ||
|
|
ff26a8d46c | ||
|
|
cc226d552e | ||
|
|
962f89475b | ||
|
|
ec204a1605 | ||
|
|
58d7623938 | ||
|
|
8f4ecfcdc0 | ||
|
|
ef719cedbc | ||
|
|
b7856c892b | ||
|
|
7435a78883 | ||
|
|
f49206b316 | ||
|
|
7d500a0721 | ||
|
|
98a519f20b | ||
|
|
39b655bb43 | ||
|
|
78d56a49fe | ||
|
|
d2e9d1fa01 | ||
|
|
41ff914dc3 | ||
|
|
3ba447fac2 | ||
|
|
e9cc380a2e | ||
|
|
017cac9bbe | ||
|
|
9ad72694af | ||
|
|
e8f9821870 | ||
|
|
bb167b9f8d | ||
|
|
28fbb5e130 | ||
|
|
16101e81e8 | ||
|
|
aced504d2a | ||
|
|
abb064d9d1 | ||
|
|
dc1899a1cd | ||
|
|
442f34278c | ||
|
|
a6dcbcd35b | ||
|
|
2b600e96eb | ||
|
|
177bb80f31 | ||
|
|
63fbe728c4 | ||
|
|
b33020840b | ||
|
|
c5caf7c0d6 | ||
|
|
0f0473db4c | ||
|
|
beadde3e06 | ||
|
|
a423a20480 | ||
|
|
79f0a23813 | ||
|
|
780fdea2cc | ||
|
|
1c0fda1adf | ||
|
|
9cf13e9b30 | ||
|
|
87cd058fd8 | ||
|
|
81b1ec48c2 | ||
|
|
66dd82f4fd | ||
|
|
ce35b23911 | ||
|
|
e79342acf5 | ||
|
|
3fc9f39d24 | ||
|
|
0221fb3a4a | ||
|
|
f009f8b7ba | ||
|
|
b76959431a | ||
|
|
975370b593 | ||
|
|
7275030971 | ||
|
|
99b0be5a95 | ||
|
|
edd3f95fc4 | ||
|
|
479f983b09 | ||
|
|
7650332252 | ||
|
|
8f1a021851 | ||
|
|
ce4df4d5fd | ||
|
|
bd43ae1b5d | ||
|
|
8fa34116b9 | ||
|
|
7e92553017 | ||
|
|
b7e243a693 | ||
|
|
35d4888afb | ||
|
|
b3e8a4f0f6 | ||
|
|
321125caee | ||
|
|
e01427aa4f | ||
|
|
14652e7f7a | ||
|
|
7c05899dbd | ||
|
|
56726b703f | ||
|
|
6237b0182f | ||
|
|
be5b662f65 | ||
|
|
224698355c | ||
|
|
8f47138ecd | ||
|
|
d159746391 | ||
|
|
63df93ea5e | ||
|
|
38948c0daa | ||
|
|
6c610427b6 | ||
|
|
b4cc31c459 | ||
|
|
7d781712c9 | ||
|
|
dd61ce9b2a | ||
|
|
69a7212986 | ||
|
|
ff05a951fd | ||
|
|
89d5357b40 | ||
|
|
7ca3d65c42 | ||
|
|
7b5c2d800f | ||
|
|
f414b47a78 | ||
|
|
44f4e2f0f9 | ||
|
|
2361008bdf | ||
|
|
7377ef3ec5 | ||
|
|
a28d064b7a | ||
|
|
e2e57e8575 | ||
|
|
9d90bd2835 | ||
|
|
7445e68df4 | ||
|
|
ab42625ad2 | ||
|
|
18789a0a53 | ||
|
|
68a37bb56a | ||
|
|
3b33652c47 | ||
|
|
1e0c4c3904 | ||
|
|
04e223de16 | ||
|
|
c4a691aa8a | ||
|
|
ff9dde163a | ||
|
|
eb7efbd1a5 | ||
|
|
8c8c362c54 | ||
|
|
66e119ad5d | ||
|
|
6dedc04a05 | ||
|
|
0cf8bad0df | ||
|
|
95c9582d8b | ||
|
|
7815126ff5 | ||
|
|
a5fa9de54b | ||
|
|
95f1a2c630 | ||
|
|
1e256ae1fd | ||
|
|
9fc9c54fa1 | ||
|
|
1b362b1e02 | ||
|
|
04e3172cca | ||
|
|
1caab7f3f7 | ||
|
|
9d33c725ad | ||
|
|
6ed1d38106 | ||
|
|
0f07ddedaf | ||
|
|
289945b471 | ||
|
|
f331a6d144 | ||
|
|
0c8c12a651 | ||
|
|
028c3bb2fa | ||
|
|
d7a5a0d405 | ||
|
|
5ef5f6e531 | ||
|
|
1d205734b3 | ||
|
|
5edd43884f | ||
|
|
c1992373bc | ||
|
|
ed562f9c8a | ||
|
|
b4d44ef8c7 | ||
|
|
ad0c16a1b4 | ||
|
|
7eabe66853 | ||
|
|
3983d73695 | ||
|
|
161d4c4562 | ||
|
|
9a1e89564e | ||
|
|
0c18c5b4f6 | ||
|
|
3e12ba34f7 | ||
|
|
e71e29391b | ||
|
|
9b7b9a7af0 | ||
|
|
a23819c308 | ||
|
|
6cb1825d96 | ||
|
|
77b8c758dc | ||
|
|
e5a582cfad | ||
|
|
ec83db267e | ||
|
|
bfd026cae7 | ||
|
|
35f1dd8082 | ||
|
|
7ed0e7dd23 | ||
|
|
1a3cbf7a9d | ||
|
|
d9e4abc3de | ||
|
|
a4186085d3 | ||
|
|
26b1846bb4 | ||
|
|
1aa89527a6 | ||
|
|
eac76d7ad0 | ||
|
|
cea0cd56f6 | ||
|
|
c4b897f282 | ||
|
|
47389dbabb | ||
|
|
a2f8b1a851 | ||
|
|
feac0a058f | ||
|
|
27eeac9fd4 | ||
|
|
a14db4b194 | ||
|
|
54ee271a47 | ||
|
|
a3a9be4f7f | ||
|
|
d4f0a832f3 | ||
|
|
7dc533372c | ||
|
|
1737d87713 | ||
|
|
dbb98dea11 | ||
|
|
802b382b36 | ||
|
|
fc82999d45 | ||
|
|
08aa000c07 | ||
|
|
39015b5100 | ||
|
|
0d635ad419 | ||
|
|
9133205915 | ||
|
|
725ac10c3d | ||
|
|
2b76358c8f | ||
|
|
833c360698 | ||
|
|
7da1e67b01 | ||
|
|
7eb86a47dd | ||
|
|
d67e383c28 | ||
|
|
8749d3e1f5 | ||
|
|
30fba21c48 | ||
|
|
d83d35aee9 | ||
|
|
1d3caeea7d | ||
|
|
c8806dbb4d | ||
|
|
e5802c7f50 | ||
|
|
590f684d66 | ||
|
|
8e5a67f565 | ||
|
|
8d2fbce11e | ||
|
|
26916f6632 | ||
|
|
fbfa0d2d2a | ||
|
|
e626b99090 | ||
|
|
203859b71b | ||
|
|
9a25c22f3a | ||
|
|
0a03f41a7c | ||
|
|
56191939c8 | ||
|
|
beb754aaaa | ||
|
|
f234f740ca | ||
|
|
e14679694c | ||
|
|
e06712397e | ||
|
|
b6c6df7ffc | ||
|
|
375c6f56c9 | ||
|
|
0bf85c97b5 | ||
|
|
630e582321 | ||
|
|
a89fe23bdd | ||
|
|
a7a5fa9a31 | ||
|
|
c73a7eee2f | ||
|
|
121f8468d5 | ||
|
|
7b0b6936e0 | ||
|
|
597ea04a96 | ||
|
|
f7f90aeaaa | ||
|
|
227479f695 | ||
|
|
6477fb3fe0 | ||
|
|
4223f4f3c4 | ||
|
|
7288874d72 | ||
|
|
68f76f2daf | ||
|
|
fe6ddebc49 | ||
|
|
12b5acd073 | ||
|
|
a6f1fe07b3 | ||
|
|
85e3f2a946 | ||
|
|
d4f416de14 | ||
|
|
0d9a6702c1 | ||
|
|
d11285cdbf | ||
|
|
5f1f33d2b9 | ||
|
|
474daf752d | ||
|
|
27d1b92690 | ||
|
|
993afa4c19 | ||
|
|
028d891c32 | ||
|
|
0df55ec22d | ||
|
|
579f64774d | ||
|
|
792f8d939d | ||
|
|
e4fb02fcda | ||
|
|
0c14c641d0 | ||
|
|
dba671fd1e | ||
|
|
80d1693722 | ||
|
|
26014a11b2 | ||
|
|
848fddd55a | ||
|
|
97f5f05f1a | ||
|
|
25b82810f2 | ||
|
|
9b1e506fa7 | ||
|
|
7a42996e97 | ||
|
|
dbfcebcf67 | ||
|
|
37c3f69a28 | ||
|
|
5d412890b4 | ||
|
|
1e318a4c40 | ||
|
|
d4549176ec | ||
|
|
61efdf492f | ||
|
|
67ea4d44c8 | ||
|
|
fdae05a4aa | ||
|
|
5efb837ee8 | ||
|
|
584b626d93 | ||
|
|
de25a4ed8e | ||
|
|
2e852e5ca6 | ||
|
|
b11000715a | ||
|
|
b3f8b46f0f | ||
|
|
8d22a0664a | ||
|
|
20756a3453 | ||
|
|
c9b4d45a64 | ||
|
|
83f7f5468b | ||
|
|
59c042ad67 | ||
|
|
d550fc5068 | ||
|
|
6effba69a0 | ||
|
|
9b46956259 | ||
|
|
b5a4a732da | ||
|
|
487862367e | ||
|
|
5b63b9ce67 | ||
|
|
afbcd3e1df | ||
|
|
12b82c1395 | ||
|
|
863b765e0d | ||
|
|
731429c51c | ||
|
|
66f3bc61fe | ||
|
|
4efe35dd51 | ||
|
|
c92461ef93 | ||
|
|
405e6e0c1d | ||
|
|
0d227aef49 | ||
|
|
0e49002f42 | ||
|
|
2e016800e0 | ||
|
|
09f317b991 | ||
|
|
5a48cb1547 | ||
|
|
f632febf33 | ||
|
|
3c53467943 | ||
|
|
d358c0f4f7 | ||
|
|
de977a5b32 | ||
|
|
703d685d59 | ||
|
|
31a5f17408 | ||
|
|
c40ae3c455 | ||
|
|
b71a34279e | ||
|
|
8f8c0c4eda | ||
|
|
3a384f0e34 | ||
|
|
cf7bc11cbd | ||
|
|
be60ae8399 | ||
|
|
8e50d145d5 | ||
|
|
7a3d15525c | ||
|
|
64f32d8b24 | ||
|
|
949d6ba605 | ||
|
|
ceb8db09f4 | ||
|
|
ed05a0ebb8 | ||
|
|
a7cbb76655 | ||
|
|
93cbfa0b1f | ||
|
|
6120613a98 | ||
|
|
dbd00db159 | ||
|
|
befde952f5 | ||
|
|
1aa759e5be | ||
|
|
2de27719c1 | ||
|
|
21db57b537 | ||
|
|
dfe8d09477 | ||
|
|
90dfa22c64 | ||
|
|
0f35427645 | ||
|
|
7909f60ff8 | ||
|
|
9a1a8a4c30 | ||
|
|
fa7ad64140 | ||
|
|
8a0c23339d | ||
|
|
e7ab3aff16 | ||
|
|
d0948797b9 | ||
|
|
04a5e17451 | ||
|
|
47065c8042 | ||
|
|
488c778736 | ||
|
|
d10a7bcc75 | ||
|
|
afe44a2537 | ||
|
|
9eadafe850 | ||
|
|
dab3eefcc0 | ||
|
|
2b9a6b28d8 | ||
|
|
465f98ca2b | ||
|
|
a0312be4fd | ||
|
|
4a5161372b | ||
|
|
4c9921f752 | ||
|
|
6dd72d40ee | ||
|
|
db49c234bb | ||
|
|
4a9df0c4d9 | ||
|
|
461573c2ba | ||
|
|
291992753f | ||
|
|
fcefe7ac38 | ||
|
|
7da712fcff | ||
|
|
2fd8687624 | ||
|
|
639b1f8336 | ||
|
|
ab3b83e42a | ||
|
|
4818629c40 | ||
|
|
61784c860a | ||
|
|
d5667254f2 | ||
|
|
af2b93983f | ||
|
|
8281301cbd | ||
|
|
0043ab8371 | ||
|
|
500eaace82 | ||
|
|
28e8540c78 | ||
|
|
69adf682e2 | ||
|
|
69cd1ff6e1 | ||
|
|
415d67cc32 | ||
|
|
46a2fec79b | ||
|
|
560b322fca | ||
|
|
effe17ac85 | ||
|
|
7699acfc1b | ||
|
|
6e058240b3 | ||
|
|
f005c6bc44 | ||
|
|
7be462599f | ||
|
|
271ab432d9 | ||
|
|
4114777a4e | ||
|
|
9189a54442 | ||
|
|
b95ee762e3 | ||
|
|
9e3c4dc06b | ||
|
|
1891a3ac86 | ||
|
|
9ecdcac06d | ||
|
|
790cb6a2e1 | ||
|
|
4a98e5f025 | ||
|
|
507abc1d84 | ||
|
|
9b732fbbad | ||
|
|
220f1c6fc3 | ||
|
|
7a950c67b6 | ||
|
|
78f625dc8c | ||
|
|
211d26a3ed | ||
|
|
dce2bc6326 | ||
|
|
90e5d7f6f0 | ||
|
|
71d4e0f9e6 | ||
|
|
580b4534e0 | ||
|
|
fc835e09c6 | ||
|
|
c6e782a637 | ||
|
|
1ddfbfc833 | ||
|
|
dbf637fe0f | ||
|
|
110e129622 | ||
|
|
677e9d1b54 | ||
|
|
ad2adb905e | ||
|
|
5e9de7ac14 | ||
|
|
c63fb8380c | ||
|
|
2d39acc224 | ||
|
|
e68358e05f | ||
|
|
a96f10edf0 | ||
|
|
f03d94935b | ||
|
|
9c1320cd95 | ||
|
|
4f2ae588a5 | ||
|
|
eff51034b7 | ||
|
|
18832dc448 | ||
|
|
5262ae8907 | ||
|
|
7f251679fa | ||
|
|
5f5920b427 | ||
|
|
65a16d058a | ||
|
|
a73484d23a | ||
|
|
47887a20c6 | ||
|
|
9465c6f198 | ||
|
|
c09872c8c2 | ||
|
|
b0501cc80d | ||
|
|
f0792db6b3 | ||
|
|
e1514c901b | ||
|
|
e90c5003ae | ||
|
|
92a0d5d52c | ||
|
|
8912cb5323 | ||
|
|
d008c19149 | ||
|
|
e844b6444a | ||
|
|
02606cdce2 | ||
|
|
0081720f0e | ||
|
|
cca1e92868 | ||
|
|
69b774a074 | ||
|
|
5656b03fb4 | ||
|
|
02d0dcbb7f | ||
|
|
7b2e06df12 | ||
|
|
4259ae63d7 | ||
|
|
d7b11803bc | ||
|
|
fed298a6d4 | ||
|
|
51832385b1 | ||
|
|
462303fca0 | ||
|
|
4405703e42 | ||
|
|
23e398e121 | ||
|
|
b17bb89d04 | ||
|
|
5590cebf8f | ||
|
|
1fa043f09d | ||
|
|
3bd0af1451 | ||
|
|
1545962745 | ||
|
|
d032571681 | ||
|
|
33fb0acc7e | ||
|
|
1ec68a91e2 | ||
|
|
a23c113a46 | ||
|
|
371ae2c0a5 | ||
|
|
8f8f6ffa27 | ||
|
|
475fe0d91f | ||
|
|
3d74e60d03 | ||
|
|
83ac83bb28 | ||
|
|
8478fb857c | ||
|
|
7074bdaa9f | ||
|
|
58164294cc | ||
|
|
7c0e9df156 | ||
|
|
bd62212ecb | ||
|
|
2292039b42 | ||
|
|
73f8da8d5a | ||
|
|
e51dbe0ca7 | ||
|
|
482a375e31 | ||
|
|
689c5ce455 | ||
|
|
734a020ecc | ||
|
|
44d537f78c | ||
|
|
b4c60eb910 | ||
|
|
e120b32375 | ||
|
|
de54966d30 | ||
|
|
39a6302c18 | ||
|
|
05ceeea4b0 | ||
|
|
9f8e3373a8 | ||
|
|
42521cbae4 | ||
|
|
b23c35197e | ||
|
|
70f28d9ac4 | ||
|
|
912d73d98a | ||
|
|
2a720fce6f | ||
|
|
e4534c359f | ||
|
|
b91bec15f2 | ||
|
|
67ad5cacb7 | ||
|
|
b4a739476a | ||
|
|
a7bf2085db | ||
|
|
c3802cf48b | ||
|
|
54711c4491 | ||
|
|
fcb52a69c0 | ||
|
|
1b632f9754 | ||
|
|
73d7a0ecdc | ||
|
|
08943593b3 | ||
|
|
c949a88f20 | ||
|
|
a49c11f655 | ||
|
|
a66aed4a88 | ||
|
|
0045c953a0 | ||
|
|
fdce41b451 | ||
|
|
4d5e4d0f00 | ||
|
|
82c9b6481e | ||
|
|
053d4dcb18 | ||
|
|
e1b2c442aa | ||
|
|
0ed8ba8ca4 | ||
|
|
f195847c68 | ||
|
|
5beb13b17e | ||
|
|
7d9ec05062 | ||
|
|
fc604a9eaf | ||
|
|
4f3c1ad9b6 | ||
|
|
6d45ed586c | ||
|
|
1afb633b4f | ||
|
|
34d9f9174b | ||
|
|
3b0c208eff | ||
|
|
05022f8db4 | ||
|
|
3336de457a | ||
|
|
10a27bc29c | ||
|
|
542e5d3c2d | ||
|
|
7372617b14 | ||
|
|
89735a130b | ||
|
|
859cf74bd6 | ||
|
|
e2744ab399 | ||
|
|
16bd065098 | ||
|
|
71c52e6dd7 | ||
|
|
a7f8c3ced3 | ||
|
|
f3f0432c65 | ||
|
|
426ba2d150 | ||
|
|
2790099efa | ||
|
|
f6ba8bc95e | ||
|
|
d6181522c0 | ||
|
|
04cf071ca6 | ||
|
|
e4371b5d02 | ||
|
|
52c52b2a0d | ||
|
|
8f40f10575 | ||
|
|
fe0f6fcd0b | ||
|
|
31b1ad8bb4 | ||
|
|
373680d854 | ||
|
|
9e3bc80495 | ||
|
|
89405fe003 | ||
|
|
b9ea3865a5 | ||
|
|
b5bd643814 | ||
|
|
52ccaeffd5 | ||
|
|
18136c12fd | ||
|
|
dec3f9e75e | ||
|
|
ccc0ee4d18 | ||
|
|
69e9708080 | ||
|
|
5944ba099a | ||
|
|
ada2718b5e | ||
|
|
1f87bd63e7 | ||
|
|
c0f3259cf6 | ||
|
|
e1d5749a40 | ||
|
|
a8d7eb27d9 | ||
|
|
1eecdf3829 | ||
|
|
be8b345889 | ||
|
|
074da389b3 | ||
|
|
4df2dc09fe | ||
|
|
e8d42ba074 | ||
|
|
c036483680 | ||
|
|
2818584db6 | ||
|
|
37585f760d | ||
|
|
f5477a03a1 | ||
|
|
50388425b2 | ||
|
|
725c59eab0 | ||
|
|
7bf1de29a4 | ||
|
|
d90c3fc7dd | ||
|
|
80785ce072 | ||
|
|
44ea896de8 | ||
|
|
d30cb8a0f0 | ||
|
|
6c7b333b34 | ||
|
|
6d34a00e77 | ||
|
|
1f353e10ce | ||
|
|
4e10f8d1c5 | ||
|
|
a22cd853fc | ||
|
|
354e0d6a87 | ||
|
|
dfabe28645 | ||
|
|
fce230da48 | ||
|
|
055ba9bda6 | ||
|
|
ec19c3b4dd | ||
|
|
37aa526404 | ||
|
|
86c1faa40f | ||
|
|
8dcf15d0f9 | ||
|
|
6835e1e680 | ||
|
|
d8f89b8f67 | ||
|
|
ec28eba781 | ||
|
|
5ef8fff5bc | ||
|
|
4f317b76fa | ||
|
|
61672637dc | ||
|
|
ecf6e8f664 | ||
|
|
4115975320 | ||
|
|
21904a8609 | ||
|
|
10b0a3dabb | ||
|
|
b2091e9aed | ||
|
|
f2cb5bd77c | ||
|
|
19c61c52e6 | ||
|
|
b327359183 | ||
|
|
9e9bb72e17 | ||
|
|
a23907e009 | ||
|
|
ad131f5a2c | ||
|
|
dbeae4ca68 | ||
|
|
0fb0e94848 | ||
|
|
95d2a82d35 | ||
|
|
5bc6eb6774 | ||
|
|
3ba81e9aaa | ||
|
|
329a9b59c1 | ||
|
|
39cccd568e | ||
|
|
19b7f6ad8c | ||
|
|
41c000cf47 | ||
|
|
1b8ea61e87 | ||
|
|
22c26e24b1 | ||
|
|
396045177c | ||
|
|
4538593236 | ||
|
|
8086ef355b | ||
|
|
60d038fe46 | ||
|
|
ff0f4463be | ||
|
|
820571d993 | ||
|
|
e311d3767c | ||
|
|
24d7b80244 | ||
|
|
61f99e4d2e | ||
|
|
d5348bcf49 | ||
|
|
5d31d66365 | ||
|
|
29778a0154 | ||
|
|
165c0a5866 | ||
|
|
588323961e | ||
|
|
fd1c0b71c5 | ||
|
|
54fbdcadf9 | ||
|
|
69a30d0cf0 | ||
|
|
b8f9b44f38 | ||
|
|
cbf17d4eb5 | ||
|
|
327e025262 | ||
|
|
6b1e944bba | ||
|
|
668ed4d61b | ||
|
|
312c0584ed | ||
|
|
110d3acb58 | ||
|
|
ddbc60283b | ||
|
|
471bcecfd6 | ||
|
|
0245791b13 | ||
|
|
4794396ce8 | ||
|
|
c7088779d6 | ||
|
|
672905da12 | ||
|
|
47172b13be | ||
|
|
3668a10af6 | ||
|
|
a4e294c03f | ||
|
|
3fd6f4003f | ||
|
|
3eaf5cd530 | ||
|
|
c344fd8ca4 | ||
|
|
09639ca294 | ||
|
|
a81b6dca83 | ||
|
|
b74aefb08f | ||
|
|
fffc0c3add | ||
|
|
757f90aa7a | ||
|
|
022f9eb551 | ||
|
|
6e7b82cfcb | ||
|
|
b5fb24b360 | ||
|
|
b77345222c | ||
|
|
793e81406e | ||
|
|
cef1ec95d2 | ||
|
|
7e1b3c552b | ||
|
|
69736a63b6 | ||
|
|
fb4a9f9056 | ||
|
|
387d89d3af | ||
|
|
65d9ca9d39 | ||
|
|
8c842af4ba | ||
|
|
4faf9262c9 | ||
|
|
be7724c67d | ||
|
|
48d26347f7 | ||
|
|
bdb01ec8b5 | ||
|
|
9047815799 | ||
|
|
05bd94a2cc | ||
|
|
c9f7da84d0 | ||
|
|
bcc124e86a | ||
|
|
48d2733403 | ||
|
|
31fc6e4e56 | ||
|
|
fcdeef0146 | ||
|
|
1cd524c0cc | ||
|
|
0f746917a7 | ||
|
|
a2228d0169 | ||
|
|
e8a679d34b | ||
|
|
1912a42091 | ||
|
|
ca81f96635 | ||
|
|
eb3b8c4b31 | ||
|
|
6740d6d60b | ||
|
|
c46c35b248 | ||
|
|
0b2dcec4bc | ||
|
|
f8e2a4aff4 | ||
|
|
7256db8c4e | ||
|
|
b14d5d9bee | ||
|
|
12e15c3e4b | ||
|
|
51911bf272 | ||
|
|
6dc8061401 | ||
|
|
b8fa4f8797 | ||
|
|
cc0bea7f45 | ||
|
|
4e9124b244 | ||
|
|
f0eabef7b0 | ||
|
|
23e5557958 | ||
|
|
b1d02afa85 | ||
|
|
2edc380f47 | ||
|
|
cea8295c09 | ||
|
|
244bfc993a | ||
|
|
3a272a4493 | ||
|
|
a3300db770 | ||
|
|
b0394ce261 | ||
|
|
3123089790 | ||
|
|
f13cf66676 | ||
|
|
0c8c4d87fb | ||
|
|
066088fdeb | ||
|
|
cf641e41c7 | ||
|
|
5b48322e1b | ||
|
|
9d3f680d58 | ||
|
|
bed28d57e6 | ||
|
|
2538525103 | ||
|
|
6ed798db8c | ||
|
|
8e9d966829 | ||
|
|
be16640f92 | ||
|
|
0e1376dd2e | ||
|
|
0494575aa7 | ||
|
|
bed57534e0 | ||
|
|
1862d631d1 | ||
|
|
c977ce5690 | ||
|
|
84df377516 | ||
|
|
4d9a284f6e | ||
|
|
da7ad8b44a | ||
|
|
4164046323 | ||
|
|
72e743dfd1 | ||
|
|
7eb7edaf0a | ||
|
|
49368aaf76 | ||
|
|
b8c07a966f | ||
|
|
c6bcc0e3aa | ||
|
|
7719339f23 | ||
|
|
8ad64722ed | ||
|
|
611f8b8865 | ||
|
|
38bdc173e8 | ||
|
|
52244325d9 | ||
|
|
3fd3d99b8c | ||
|
|
d4ee5e91a2 | ||
|
|
c2ad2d7238 | ||
|
|
892e195f0e | ||
|
|
c5b1bed7dc | ||
|
|
0e388d7aa7 | ||
|
|
c3a0dbbe48 | ||
|
|
8b95b3ffc7 | ||
|
|
42b78461cd | ||
|
|
9190a41ca5 | ||
|
|
28a7251319 | ||
|
|
20565866ef | ||
|
|
246f10aee5 | ||
|
|
960017280d | ||
|
|
7218aaf52e | ||
|
|
62050cc7b6 | ||
|
|
f88a14ac0a | ||
|
|
9286761c30 | ||
|
|
07c3273247 | ||
|
|
eb8fe77582 | ||
|
|
b68ba0bff6 | ||
|
|
696657c09e | ||
|
|
12bea9b60a | ||
|
|
9334e9552f | ||
|
|
a43b04a98b | ||
|
|
f359ff995d | ||
|
|
9185d2646b | ||
|
|
33e61c762c | ||
|
|
e342e646ff | ||
|
|
ed163a80e0 | ||
|
|
b390df08b5 | ||
|
|
f0b3b9f7f4 | ||
|
|
a67d732507 | ||
|
|
ca0ebe0d75 | ||
|
|
94d113cbe0 | ||
|
|
25c3aeaa5f | ||
|
|
736d5a00b7 | ||
|
|
f1627b214c | ||
|
|
d9265ec7ea | ||
|
|
663e871bed | ||
|
|
5c5eaddef7 | ||
|
|
edaec4f1ae | ||
|
|
6d19acaa6c | ||
|
|
d29a619fbf | ||
|
|
b17808dd91 | ||
|
|
c5321a3667 | ||
|
|
8836691510 | ||
|
|
6568f6525d | ||
|
|
473fc27b49 | ||
|
|
74aeb55acb | ||
|
|
8efcf0529f | ||
|
|
06071c2f9c | ||
|
|
5eb4eca487 | ||
|
|
33f6153e12 | ||
|
|
df3283f526 | ||
|
|
b5901a2819 | ||
|
|
6d5f1402fe | ||
|
|
65e3782b2e | ||
|
|
135981dd30 | ||
|
|
fe5cf2d922 | ||
|
|
e15425cc2e | ||
|
|
c3cb0a4e33 | ||
|
|
cc32976bdd | ||
|
|
bc08318716 | ||
|
|
ee1ab30c2c | ||
|
|
7fa1a66f7e | ||
|
|
946bf37406 | ||
|
|
8706f6931a | ||
|
|
f551674860 | ||
|
|
d90fe0ef07 | ||
|
|
bf979fa3b3 | ||
|
|
b3b88891e9 | ||
|
|
01c5de60dc | ||
|
|
47b8fe5022 | ||
|
|
324b37b875 | ||
|
|
76e7e192d8 | ||
|
|
f9f3c4d923 | ||
|
|
a476476bd1 | ||
|
|
82a60a884a | ||
|
|
f17727de18 | ||
|
|
f1f33c79f4 | ||
|
|
d52eaafdbb | ||
|
|
e7a3e50ed1 | ||
|
|
2e09a87baa | ||
|
|
b92ae7e47e | ||
|
|
f98446e139 | ||
|
|
57a48dadaa | ||
|
|
c65ec68e46 | ||
|
|
d6559be3fc | ||
|
|
6fbf67f9a9 | ||
|
|
59df5b24fe | ||
|
|
3e1544294b | ||
|
|
a12c398816 | ||
|
|
0bd3e28348 | ||
|
|
ad4e39c088 | ||
|
|
2668d96e6a | ||
|
|
357c496aad | ||
|
|
22a513ba22 | ||
|
|
e6dd1119be | ||
|
|
2dbe454e04 | ||
|
|
e3a59b76eb | ||
|
|
01008acfcd | ||
|
|
b67a162d3f | ||
|
|
8bfde9fbaf | ||
|
|
1fdecf8def | ||
|
|
1141d4b833 | ||
|
|
cdac92ca7b | ||
|
|
2a57c260cc | ||
|
|
f41e29ab3a | ||
|
|
8f10624073 | ||
|
|
eb1f8be11e | ||
|
|
3333501ab9 | ||
|
|
0f40820315 | ||
|
|
5f1a839620 | ||
|
|
b9bb1c775d | ||
|
|
1059b7376b | ||
|
|
f38ab4a9ce | ||
|
|
9e7450c012 | ||
|
|
99a3e360fe | ||
|
|
d45f8f78d6 | ||
|
|
648af61116 | ||
|
|
eebf1b89b1 | ||
|
|
f8094bb624 | ||
|
|
ed13e0d2c2 | ||
|
|
aa830589b4 | ||
|
|
999a2bd929 | ||
|
|
d69ee98450 | ||
|
|
f6712c24ad | ||
|
|
89d2772194 | ||
|
|
03352142b6 | ||
|
|
73a51e0c00 | ||
|
|
2e26f8caa6 | ||
|
|
f9bcce9e43 | ||
|
|
2ecc877ba8 | ||
|
|
3f8a3c69e3 | ||
|
|
67c37a0984 | ||
|
|
a58a55d00d | ||
|
|
06d51dd0b8 | ||
|
|
d5db028f57 | ||
|
|
fcb85ff4be | ||
|
|
3695b4363d | ||
|
|
cb11e6437c | ||
|
|
5127bd11ce | ||
|
|
91f90aefa1 | ||
|
|
0a067bce36 | ||
|
|
f0aba433bf | ||
|
|
f06467a0e3 | ||
|
|
68bcd3c710 | ||
|
|
a645733cc5 | ||
|
|
49fe5baf94 | ||
|
|
411ee55653 | ||
|
|
e351ce7411 | ||
|
|
f33e585a71 | ||
|
|
77f3096e0d | ||
|
|
9a5b18c4e6 | ||
|
|
0c7112869a | ||
|
|
f66a4d71ea | ||
|
|
9b0ab878df | ||
|
|
d30b90dfd0 | ||
|
|
efd28f8c27 | ||
|
|
e05e722387 | ||
|
|
748e81956d | ||
|
|
c9a41febce | ||
|
|
18e244b756 | ||
|
|
47676139a3 | ||
|
|
1ed933b7ad | ||
|
|
f6a343ccd6 | ||
|
|
dd6cdc22e5 | ||
|
|
f70f4348b3 | ||
|
|
ec7f801929 | ||
|
|
0f8aca382e | ||
|
|
0270f77eaa | ||
|
|
dcba71ada4 | ||
|
|
6080f76a9c | ||
|
|
e7349161f3 | ||
|
|
2e2907ea09 | ||
|
|
25e84b2a6c | ||
|
|
5efd424172 | ||
|
|
2672502c07 | ||
|
|
83440cc3d9 | ||
|
|
8e5f93be1c | ||
|
|
c1afc07955 | ||
|
|
4a83e14878 | ||
|
|
832320abc6 | ||
|
|
70c237da72 | ||
|
|
edfcc5c023 | ||
|
|
0668debec6 | ||
|
|
02d6463faa | ||
|
|
1fdb85234c | ||
|
|
44b7dd1808 | ||
|
|
e983ee3101 | ||
|
|
75e7e81c05 | ||
|
|
31ce3b9c08 | ||
|
|
ed93c50fef | ||
|
|
4845660eb5 | ||
|
|
c7919210a2 | ||
|
|
9491418f3b | ||
|
|
e8de403286 | ||
|
|
dfb625377b | ||
|
|
2c0f2a8be6 | ||
|
|
787d3cb3e9 | ||
|
|
96ca17d26c | ||
|
|
3dd0f7f2c3 | ||
|
|
10ba0cf976 | ||
|
|
276c15cc23 | ||
|
|
2584b848ad | ||
|
|
6471efed5f | ||
|
|
5b7d7ad65d | ||
|
|
712851a8a5 | ||
|
|
63d291cb47 | ||
|
|
f825c92111 | ||
|
|
419eb2ea41 | ||
|
|
89b58dd64e | ||
|
|
6bc5f81440 | ||
|
|
424f4b7b5e | ||
|
|
9271a1caac | ||
|
|
0ee4df03f9 | ||
|
|
8ac713ce32 | ||
|
|
76b2489fe9 | ||
|
|
6786095154 | ||
|
|
2c5793ef37 | ||
|
|
d483f25b96 | ||
|
|
7118368979 | ||
|
|
59256c2e80 | ||
|
|
1fb8a0db1e | ||
|
|
07d0c8e8fa | ||
|
|
98452ead17 | ||
|
|
d8c9f40377 | ||
|
|
8148d5eec6 | ||
|
|
4c429ad604 | ||
|
|
a9c52de8d5 | ||
|
|
f648aa1f91 | ||
|
|
eaba388bdd | ||
|
|
73e6afcbc6 | ||
|
|
8c3b72adf2 | ||
|
|
ae18ff4262 | ||
|
|
1adc8af543 | ||
|
|
7413df6f1e | ||
|
|
bda8559190 | ||
|
|
b74612fa41 | ||
|
|
22e0c20dcd | ||
|
|
08f92e1100 | ||
|
|
bb12ece46e | ||
|
|
0065438305 | ||
|
|
7f115c1b3e | ||
|
|
4e0114ab0d | ||
|
|
0ef64fa4bd | ||
|
|
84dbc17c22 | ||
|
|
16e16e356d | ||
|
|
978ee885c4 | ||
|
|
850d43df63 | ||
|
|
fc109fd1b1 | ||
|
|
9aefc55534 | ||
|
|
2829947b93 | ||
|
|
0c2af89a1c | ||
|
|
14c2dc9624 | ||
|
|
4f35d710a6 | ||
|
|
fdb5e018e5 | ||
|
|
6001fde25c | ||
|
|
ae63c0adaf | ||
|
|
ad1539c8f6 | ||
|
|
634a0c8cd0 | ||
|
|
773f9a0c63 | ||
|
|
e4e320e9e3 | ||
|
|
3b4b400e6b | ||
|
|
a950be2d95 | ||
|
|
ba6f5ab984 | ||
|
|
f3a5e3f5ed | ||
|
|
e685e621f3 | ||
|
|
2cd2be9b67 | ||
|
|
e73d9e8a03 | ||
|
|
476f74a604 | ||
|
|
ab0d1d99e6 | ||
|
|
d5680ffd5d | ||
|
|
3c091a88d4 | ||
|
|
49b70b33de | ||
|
|
c5ff2716fb | ||
|
|
400fdf0896 | ||
|
|
cbb8c7323c | ||
|
|
60e79f8f77 | ||
|
|
0e829d739a | ||
|
|
62abb274e0 | ||
|
|
e4028785de | ||
|
|
2bb44bcb76 | ||
|
|
684599f81b | ||
|
|
b56d28f5df | ||
|
|
02b9ac04c8 | ||
|
|
2fc283990a | ||
|
|
abb652ebd5 | ||
|
|
55786cb7f7 | ||
|
|
447a575f4f | ||
|
|
49280a8617 | ||
|
|
ff78a9cc35 | ||
|
|
3fea5c9c8c | ||
|
|
aea63cad52 | ||
|
|
800abe9920 | ||
|
|
dd6069e41a | ||
|
|
90d31aeff0 | ||
|
|
4d9a327b1f | ||
|
|
06a97ef076 | ||
|
|
76c2477387 | ||
|
|
bc4dac9cad | ||
|
|
36e3d6c18a | ||
|
|
edfd84a8e3 | ||
|
|
fb20cf6069 | ||
|
|
abbe47f6b9 | ||
|
|
f84d250134 | ||
|
|
3ffb4b8990 | ||
|
|
f70cfabede | ||
|
|
3a81783d77 | ||
|
|
237a4a90ff | ||
|
|
99c7dfc98d | ||
|
|
48aba34370 | ||
|
|
29cca36f2c | ||
|
|
0f5819f5c2 | ||
|
|
373772de2d | ||
|
|
7f5bbe8b5f | ||
|
|
daee57167b | ||
|
|
03467196b9 | ||
|
|
d3f3531cdb | ||
|
|
883b694592 | ||
|
|
6c89d66af9 | ||
|
|
fb0a76b418 | ||
|
|
64f77fca5b | ||
|
|
b1fca2c5be | ||
|
|
108d705f09 | ||
|
|
a77242e66c | ||
|
|
8b153113ff | ||
|
|
6d0ec37135 |
51
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
name: 报告Bug
|
||||
about: 报告KnowStreaming的相关Bug
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
- [ ] 我已经在 [issues](https://github.com/didi/KnowStreaming/issues) 搜索过相关问题了,并没有重复的。
|
||||
|
||||
你是否希望来认领这个Bug。
|
||||
|
||||
「 Y / N 」
|
||||
|
||||
### 环境信息
|
||||
|
||||
* KnowStreaming version : <font size=4 color =red> xxx </font>
|
||||
* Operating System version : <font size=4 color =red> xxx </font>
|
||||
* Java version : <font size=4 color =red> xxx </font>
|
||||
|
||||
|
||||
### 重现该问题的步骤
|
||||
|
||||
1. xxx
|
||||
|
||||
|
||||
|
||||
2. xxx
|
||||
|
||||
|
||||
3. xxx
|
||||
|
||||
|
||||
|
||||
### 预期结果
|
||||
|
||||
<!-- 写下应该出现的预期结果?-->
|
||||
|
||||
### 实际结果
|
||||
|
||||
<!-- 实际发生了什么? -->
|
||||
|
||||
|
||||
---
|
||||
|
||||
如果有异常,请附上异常Trace:
|
||||
|
||||
```
|
||||
Just put your stack trace here!
|
||||
```
|
||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: 讨论问题
|
||||
url: https://github.com/didi/KnowStreaming/discussions/new
|
||||
about: 发起问题、讨论 等等
|
||||
- name: KnowStreaming官网
|
||||
url: https://knowstreaming.com/
|
||||
about: KnowStreaming website
|
||||
26
.github/ISSUE_TEMPLATE/detail_optimizing.md
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
name: 优化建议
|
||||
about: 相关功能优化建议
|
||||
title: ''
|
||||
labels: Optimization Suggestions
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
- [ ] 我已经在 [issues](https://github.com/didi/KnowStreaming/issues) 搜索过相关问题了,并没有重复的。
|
||||
|
||||
你是否希望来认领这个优化建议。
|
||||
|
||||
「 Y / N 」
|
||||
|
||||
### 环境信息
|
||||
|
||||
* KnowStreaming version : <font size=4 color =red> xxx </font>
|
||||
* Operating System version : <font size=4 color =red> xxx </font>
|
||||
* Java version : <font size=4 color =red> xxx </font>
|
||||
|
||||
### 需要优化的功能点
|
||||
|
||||
|
||||
### 建议如何优化
|
||||
|
||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: 提议新功能/需求
|
||||
about: 给KnowStreaming提一个功能需求
|
||||
title: ''
|
||||
labels: feature
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
- [ ] 我在 [issues](https://github.com/didi/KnowStreaming/issues) 中并未搜索到与此相关的功能需求。
|
||||
- [ ] 我在 [release note](https://github.com/didi/KnowStreaming/releases) 已经发布的版本中并没有搜到相关功能.
|
||||
|
||||
你是否希望来认领这个Feature。
|
||||
|
||||
「 Y / N 」
|
||||
|
||||
|
||||
## 这里描述需求
|
||||
<!--请尽可能的描述清楚您的需求 -->
|
||||
|
||||
12
.github/ISSUE_TEMPLATE/question.md
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
name: 提个问题
|
||||
about: 问KnowStreaming相关问题
|
||||
title: ''
|
||||
labels: question
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
- [ ] 我已经在 [issues](https://github.com/didi/KnowStreaming/issues) 搜索过相关问题了,并没有重复的。
|
||||
|
||||
## 在这里提出你的问题
|
||||
23
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
请不要在没有先创建Issue的情况下创建Pull Request。
|
||||
|
||||
## 变更的目的是什么
|
||||
|
||||
XXXXX
|
||||
|
||||
## 简短的更新日志
|
||||
|
||||
XX
|
||||
|
||||
## 验证这一变化
|
||||
|
||||
XXXX
|
||||
|
||||
请遵循此清单,以帮助我们快速轻松地整合您的贡献:
|
||||
|
||||
* [ ] 一个 PR(Pull Request的简写)只解决一个问题,禁止一个 PR 解决多个问题;
|
||||
* [ ] 确保 PR 有对应的 Issue(通常在您开始处理之前创建),除非是书写错误之类的琐碎更改不需要 Issue ;
|
||||
* [ ] 格式化 PR 及 Commit-Log 的标题及内容,例如 #861 。PS:Commit-Log 需要在 Git Commit 代码时进行填写,在 GitHub 上修改不了;
|
||||
* [ ] 编写足够详细的 PR 描述,以了解 PR 的作用、方式和原因;
|
||||
* [ ] 编写必要的单元测试来验证您的逻辑更正。如果提交了新功能或重大更改,请记住在 test 模块中添加 integration-test;
|
||||
* [ ] 确保编译通过,集成测试通过;
|
||||
|
||||
43
.github/workflows/ci_build.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: KnowStreaming Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up JDK 11
|
||||
uses: actions/setup-java@v3
|
||||
with:
|
||||
java-version: '11'
|
||||
distribution: 'temurin'
|
||||
cache: maven
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: '12.22.12'
|
||||
|
||||
- name: Build With Maven
|
||||
run: mvn -Prelease-package -Dmaven.test.skip=true clean install -U
|
||||
|
||||
- name: Get KnowStreaming Version
|
||||
if: ${{ success() }}
|
||||
run: |
|
||||
version=`mvn -Dexec.executable='echo' -Dexec.args='${project.version}' --non-recursive exec:exec -q`
|
||||
echo "VERSION=${version}" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload Binary Package
|
||||
if: ${{ success() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: KnowStreaming-${{ env.VERSION }}.tar.gz
|
||||
path: km-dist/target/KnowStreaming-${{ env.VERSION }}.tar.gz
|
||||
20
.gitignore
vendored
@@ -56,6 +56,7 @@ fabric.properties
|
||||
*.jar
|
||||
*.war
|
||||
*.ear
|
||||
*.tar.gz
|
||||
|
||||
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
|
||||
hs_err_pid*
|
||||
@@ -99,14 +100,17 @@ target/
|
||||
*/velocity.log*
|
||||
*/*.log
|
||||
*/*.log.*
|
||||
web/node_modules/
|
||||
web/node_modules/*
|
||||
node_modules/
|
||||
node_modules/*
|
||||
workspace.xml
|
||||
/output/*
|
||||
.gitversion
|
||||
*/node_modules/*
|
||||
web/src/main/resources/templates/*
|
||||
*/out/*
|
||||
*/dist/*
|
||||
.DS_Store
|
||||
kafka-manager-web/src/main/resources/templates/*
|
||||
out/*
|
||||
dist/
|
||||
dist/*
|
||||
km-rest/src/main/resources/templates/
|
||||
*dependency-reduced-pom*
|
||||
#filter flattened xml
|
||||
*/.flattened-pom.xml
|
||||
.flattened-pom.xml
|
||||
*/*/.flattened-pom.xml
|
||||
74
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,74 @@
|
||||
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project, and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
education, socio-economic status, nationality, personal appearance, race,
|
||||
religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at https://knowstreaming.com/support-center . All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
158
CONTRIBUTING.md
@@ -1,28 +1,150 @@
|
||||
# Contribution Guideline
|
||||
|
||||
Thanks for considering to contribute this project. All issues and pull requests are highly appreciated.
|
||||
|
||||
## Pull Requests
|
||||
|
||||
Before sending pull request to this project, please read and follow guidelines below.
|
||||
# 为KnowStreaming做贡献
|
||||
|
||||
1. Branch: We only accept pull request on `dev` branch.
|
||||
2. Coding style: Follow the coding style used in kafka-manager.
|
||||
3. Commit message: Use English and be aware of your spell.
|
||||
4. Test: Make sure to test your code.
|
||||
|
||||
Add device mode, API version, related log, screenshots and other related information in your pull request if possible.
|
||||
欢迎👏🏻来到KnowStreaming!本文档是关于如何为KnowStreaming做出贡献的指南。
|
||||
|
||||
NOTE: We assume all your contribution can be licensed under the [Apache License 2.0](LICENSE).
|
||||
如果您发现不正确或遗漏的内容, 请留下意见/建议。
|
||||
|
||||
## Issues
|
||||
## 行为守则
|
||||
请务必阅读并遵守我们的 [行为准则](./CODE_OF_CONDUCT.md).
|
||||
|
||||
We love clearly described issues. :)
|
||||
|
||||
Following information can help us to resolve the issue faster.
|
||||
|
||||
* Device mode and hardware information.
|
||||
* API version.
|
||||
* Logs.
|
||||
* Screenshots.
|
||||
* Steps to reproduce the issue.
|
||||
## 贡献
|
||||
|
||||
**KnowStreaming** 欢迎任何角色的新参与者,包括 **User** 、**Contributor**、**Committer**、**PMC** 。
|
||||
|
||||
我们鼓励新人积极加入 **KnowStreaming** 项目,从User到Contributor、Committer ,甚至是 PMC 角色。
|
||||
|
||||
为了做到这一点,新人需要积极地为 **KnowStreaming** 项目做出贡献。以下介绍如何对 **KnowStreaming** 进行贡献。
|
||||
|
||||
|
||||
### 创建/打开 Issue
|
||||
|
||||
如果您在文档中发现拼写错误、在代码中**发现错误**或想要**新功能**或想要**提供建议**,您可以在 GitHub 上[创建一个Issue](https://github.com/didi/KnowStreaming/issues/new/choose) 进行报告。
|
||||
|
||||
|
||||
如果您想直接贡献, 您可以选择下面标签的问题。
|
||||
|
||||
- [contribution welcome](https://github.com/didi/KnowStreaming/labels/contribution%20welcome) : 非常需要解决/新增 的Issues
|
||||
- [good first issue](https://github.com/didi/KnowStreaming/labels/good%20first%20issue): 对新人比较友好, 新人可以拿这个Issue来练练手热热身。
|
||||
|
||||
<font color=red ><b> 请注意,任何 PR 都必须与有效issue相关联。否则,PR 将被拒绝。</b></font>
|
||||
|
||||
|
||||
|
||||
### 开始你的贡献
|
||||
|
||||
**分支介绍**
|
||||
|
||||
我们将 `dev`分支作为开发分支, 说明这是一个不稳定的分支。
|
||||
|
||||
此外,我们的分支模型符合 [https://nvie.com/posts/a-successful-git-branching-model/](https://nvie.com/posts/a-successful-git-branching-model/). 我们强烈建议新人在创建PR之前先阅读上述文章。
|
||||
|
||||
|
||||
|
||||
**贡献流程**
|
||||
|
||||
为方便描述,我们这里定义一下2个名词:
|
||||
|
||||
自己Fork出来的仓库是私人仓库, 我们这里称之为 :**分叉仓库**
|
||||
Fork的源项目,我们称之为:**源仓库**
|
||||
|
||||
|
||||
现在,如果您准备好创建PR, 以下是贡献者的工作流程:
|
||||
|
||||
1. Fork [KnowStreaming](https://github.com/didi/KnowStreaming) 项目到自己的仓库
|
||||
|
||||
2. 从源仓库的`dev`拉取并创建自己的本地分支,例如: `dev`
|
||||
3. 在本地分支上对代码进行修改
|
||||
4. Rebase 开发分支, 并解决冲突
|
||||
5. commit 并 push 您的更改到您自己的**分叉仓库**
|
||||
6. 创建一个 Pull Request 到**源仓库**的`dev`分支中。
|
||||
7. 等待回复。如果回复的慢,请无情的催促。
|
||||
|
||||
|
||||
更为详细的贡献流程请看:[贡献流程](./docs/contributer_guide/贡献流程.md)
|
||||
|
||||
创建Pull Request时:
|
||||
|
||||
1. 请遵循 PR的 [模板](./.github/PULL_REQUEST_TEMPLATE.md)
|
||||
2. 请确保 PR 有相应的issue。
|
||||
3. 如果您的 PR 包含较大的更改,例如组件重构或新组件,请编写有关其设计和使用的详细文档(在对应的issue中)。
|
||||
4. 注意单个 PR 不能太大。如果需要进行大量更改,最好将更改分成几个单独的 PR。
|
||||
5. 在合并PR之前,尽量的将最终的提交信息清晰简洁, 将多次修改的提交尽可能的合并为一次提交。
|
||||
6. 创建 PR 后,将为PR分配一个或多个reviewers。
|
||||
|
||||
|
||||
<font color=red><b>如果您的 PR 包含较大的更改,例如组件重构或新组件,请编写有关其设计和使用的详细文档。</b></font>
|
||||
|
||||
|
||||
# 代码审查指南
|
||||
|
||||
Commiter将轮流review代码,以确保在合并前至少有一名Commiter
|
||||
|
||||
一些原则:
|
||||
|
||||
- 可读性——重要的代码应该有详细的文档。API 应该有 Javadoc。代码风格应与现有风格保持一致。
|
||||
- 优雅:新的函数、类或组件应该设计得很好。
|
||||
- 可测试性——单元测试用例应该覆盖 80% 的新代码。
|
||||
- 可维护性 - 遵守我们的编码规范。
|
||||
|
||||
|
||||
# 开发者
|
||||
|
||||
## 成为Contributor
|
||||
|
||||
只要成功提交并合并PR , 则为Contributor
|
||||
|
||||
贡献者名单请看:[贡献者名单](./docs/contributer_guide/开发者名单.md)
|
||||
|
||||
## 尝试成为Commiter
|
||||
|
||||
一般来说, 贡献8个重要的补丁并至少让三个不同的人来Review他们(您需要3个Commiter的支持)。
|
||||
然后请人给你提名, 您需要展示您的
|
||||
|
||||
1. 至少8个重要的PR和项目的相关问题
|
||||
2. 与团队合作的能力
|
||||
3. 了解项目的代码库和编码风格
|
||||
4. 编写好代码的能力
|
||||
|
||||
当前的Commiter可以通过在KnowStreaming中的Issue标签 `nomination`(提名)来提名您
|
||||
|
||||
1. 你的名字和姓氏
|
||||
2. 指向您的Git个人资料的链接
|
||||
3. 解释为什么你应该成为Commiter
|
||||
4. 详细说明提名人与您合作的3个PR以及相关问题,这些问题可以证明您的能力。
|
||||
|
||||
另外2个Commiter需要支持您的**提名**,如果5个工作日内没有人反对,您就是提交者,如果有人反对或者想要更多的信息,Commiter会讨论并通常达成共识(5个工作日内) 。
|
||||
|
||||
|
||||
# 开源奖励计划
|
||||
|
||||
|
||||
我们非常欢迎开发者们为KnowStreaming开源项目贡献一份力量,相应也将给予贡献者激励以表认可与感谢。
|
||||
|
||||
|
||||
## 参与贡献
|
||||
|
||||
1. 积极参与 Issue 的讨论,如答疑解惑、提供想法或报告无法解决的错误(Issue)
|
||||
2. 撰写和改进项目的文档(Wiki)
|
||||
3. 提交补丁优化代码(Coding)
|
||||
|
||||
|
||||
## 你将获得
|
||||
|
||||
1. 加入KnowStreaming开源项目贡献者名单并展示
|
||||
2. KnowStreaming开源贡献者证书(纸质&电子版)
|
||||
3. KnowStreaming贡献者精美大礼包(KnowStreamin/滴滴 周边)
|
||||
|
||||
|
||||
## 相关规则
|
||||
|
||||
- Contributer和Commiter都会有对应的证书和对应的礼包
|
||||
- 每季度有KnowStreaming项目团队评选出杰出贡献者,颁发相应证书。
|
||||
- 年末进行年度评选
|
||||
|
||||
贡献者名单请看:[贡献者名单](./docs/contributer_guide/开发者名单.md)
|
||||
159
README.md
@@ -1,64 +1,161 @@
|
||||
|
||||
<p align="center">
|
||||
<img src="https://user-images.githubusercontent.com/71620349/185368586-aed82d30-1534-453d-86ff-ecfa9d0f35bd.png" width = "256" div align=center />
|
||||
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://knowstreaming.com">产品官网</a> |
|
||||
<a href="https://github.com/didi/KnowStreaming/releases">下载地址</a> |
|
||||
<a href="https://doc.knowstreaming.com/product">文档资源</a> |
|
||||
<a href="https://demo.knowstreaming.com">体验环境</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<!--最近一次提交时间-->
|
||||
<a href="https://img.shields.io/github/last-commit/didi/KnowStreaming">
|
||||
<img src="https://img.shields.io/github/last-commit/didi/KnowStreaming" alt="LastCommit">
|
||||
</a>
|
||||
|
||||
<!--最新版本-->
|
||||
<a href="https://github.com/didi/KnowStreaming/blob/master/LICENSE">
|
||||
<img src="https://img.shields.io/github/v/release/didi/KnowStreaming" alt="License">
|
||||
</a>
|
||||
|
||||
<!--License信息-->
|
||||
<a href="https://github.com/didi/KnowStreaming/blob/master/LICENSE">
|
||||
<img src="https://img.shields.io/github/license/didi/KnowStreaming" alt="License">
|
||||
</a>
|
||||
|
||||
<!--Open-Issue-->
|
||||
<a href="https://github.com/didi/KnowStreaming/issues">
|
||||
<img src="https://img.shields.io/github/issues-raw/didi/KnowStreaming" alt="Issues">
|
||||
</a>
|
||||
|
||||
<!--知识星球-->
|
||||
<a href="https://z.didi.cn/5gSF9">
|
||||
<img src="https://img.shields.io/badge/join-%E7%9F%A5%E8%AF%86%E6%98%9F%E7%90%83-red" alt="Slack">
|
||||
</a>
|
||||
|
||||
</p>
|
||||
|
||||
|
||||
---
|
||||
|
||||

|
||||
|
||||
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
||||
## `Know Streaming` 简介
|
||||
|
||||
---
|
||||
`Know Streaming`是一套云原生的Kafka管控平台,脱胎于众多互联网内部多年的Kafka运营实践经验,专注于Kafka运维管控、监控告警、资源治理、多活容灾等核心场景。在用户体验、监控、运维管控上进行了平台化、可视化、智能化的建设,提供一系列特色的功能,极大地方便了用户和运维人员的日常使用,让普通运维人员都能成为Kafka专家。
|
||||
|
||||
## 主要功能特性
|
||||
我们现在正在收集 Know Streaming 用户信息,以帮助我们进一步改进 Know Streaming。
|
||||
请在 [issue#663](https://github.com/didi/KnowStreaming/issues/663) 上提供您的使用信息来支持我们:[谁在使用 Know Streaming](https://github.com/didi/KnowStreaming/issues/663)
|
||||
|
||||
|
||||
### 集群监控维度
|
||||
|
||||
- 多版本集群管控,支持从`0.10.2`到`2.x`版本;
|
||||
- 集群Topic、Broker等多维度历史与实时关键指标查看;
|
||||
整体具有以下特点:
|
||||
|
||||
- 👀 **零侵入、全覆盖**
|
||||
- 无需侵入改造 `Apache Kafka` ,一键便能纳管 `0.10.x` ~ `3.x.x` 众多版本的Kafka,包括 `ZK` 或 `Raft` 运行模式的版本,同时在兼容架构上具备良好的扩展性,帮助您提升集群管理水平;
|
||||
|
||||
- 🌪️ **零成本、界面化**
|
||||
- 提炼高频 CLI 能力,设计合理的产品路径,提供清新美观的 GUI 界面,支持 Cluster、Broker、Zookeeper、Topic、ConsumerGroup、Message、ACL、Connect 等组件 GUI 管理,普通用户5分钟即可上手;
|
||||
|
||||
- 👏 **云原生、插件化**
|
||||
- 基于云原生构建,具备水平扩展能力,只需要增加节点即可获取更强的采集及对外服务能力,提供众多可热插拔的企业级特性,覆盖可观测性生态整合、资源治理、多活容灾等核心场景;
|
||||
|
||||
- 🚀 **专业能力**
|
||||
- 集群管理:支持一键纳管,健康分析、核心组件观测 等功能;
|
||||
- 观测提升:多维度指标观测大盘、观测指标最佳实践 等功能;
|
||||
- 异常巡检:集群多维度健康巡检、集群多维度健康分 等功能;
|
||||
- 能力增强:集群负载均衡、Topic扩缩副本、Topic副本迁移 等功能;
|
||||
|
||||
|
||||
### 集群管控维度
|
||||
|
||||
**产品图**
|
||||
|
||||
- 集群运维,包括逻辑Region方式管理集群
|
||||
- Broker运维,包括优先副本选举
|
||||
- Topic运维,包括创建、查询、扩容、修改属性、数据采样及迁移等;
|
||||
- 消费组运维,包括指定时间或指定偏移两种方式进行重置消费偏移
|
||||
<p align="center">
|
||||
|
||||
<img src="http://img-ys011.didistatic.com/static/dc2img/do1_sPmS4SNLX9m1zlpmHaLJ" width = "768" height = "473" div align=center />
|
||||
|
||||
</p>
|
||||
|
||||
|
||||
### 用户使用维度
|
||||
|
||||
- Kafka用户、Kafka研发、Kafka运维 视角区分
|
||||
- Kafka用户、Kafka研发、Kafka运维 权限区分
|
||||
|
||||
|
||||
## kafka-manager架构图
|
||||
## 文档资源
|
||||
|
||||

|
||||
**`开发相关手册`**
|
||||
|
||||
- [打包编译手册](docs/install_guide/源码编译打包手册.md)
|
||||
- [单机部署手册](docs/install_guide/单机部署手册.md)
|
||||
- [版本升级手册](docs/install_guide/版本升级手册.md)
|
||||
- [本地源码启动手册](docs/dev_guide/本地源码启动手册.md)
|
||||
- [页面无数据排查手册](docs/dev_guide/页面无数据排查手册.md)
|
||||
|
||||
**`产品相关手册`**
|
||||
|
||||
- [产品使用指南](docs/user_guide/用户使用手册.md)
|
||||
- [2.x与3.x新旧对比手册](docs/user_guide/新旧对比手册.md)
|
||||
- [FAQ](docs/user_guide/faq.md)
|
||||
|
||||
|
||||
## 相关文档
|
||||
**点击 [这里](https://doc.knowstreaming.com/product),也可以从官网获取到更多文档**
|
||||
|
||||
- [kafka-manager安装手册](./docs/install_cn_guide.md)
|
||||
- [kafka-manager接入集群](./docs/manual_kafka_op/add_cluster.md)
|
||||
- [kafka-manager使用手册-待更新](./docs/user_cn_guide.md)
|
||||
**`产品网址`**
|
||||
- [产品官网:https://knowstreaming.com](https://knowstreaming.com)
|
||||
- [体验环境:https://demo.knowstreaming.com](https://demo.knowstreaming.com),登陆账号:admin/admin
|
||||
|
||||
|
||||
## 钉钉交流群
|
||||
|
||||

|
||||
## 成为社区贡献者
|
||||
|
||||
1. [贡献源码](https://doc.knowstreaming.com/product/10-contribution) 了解如何成为 Know Streaming 的贡献者
|
||||
2. [具体贡献流程](https://doc.knowstreaming.com/product/10-contribution#102-贡献流程)
|
||||
3. [开源激励计划](https://doc.knowstreaming.com/product/10-contribution#105-开源激励计划)
|
||||
4. [贡献者名单](https://doc.knowstreaming.com/product/10-contribution#106-贡献者名单)
|
||||
|
||||
|
||||
## 项目成员
|
||||
获取KnowStreaming开源社区证书。
|
||||
|
||||
### 内部核心人员
|
||||
## 加入技术交流群
|
||||
|
||||
`iceyuhui`、`liuyaguang`、`limengmonty`、`zhangliangmike`、`nullhuangyiming`、`zengqiao`、`eilenexuzhe`、`huangjiaweihjw`
|
||||
**`1、知识星球`**
|
||||
|
||||
<p align="left">
|
||||
<img src="https://user-images.githubusercontent.com/71620349/185357284-fdff1dad-c5e9-4ddf-9a82-0be1c970980d.JPG" height = "180" div align=left />
|
||||
</p>
|
||||
|
||||
### 外部贡献者
|
||||
<br/>
|
||||
<br/>
|
||||
<br/>
|
||||
<br/>
|
||||
<br/>
|
||||
<br/>
|
||||
<br/>
|
||||
<br/>
|
||||
|
||||
`fangjunyu`、`zhoutaiyang`
|
||||
👍 我们正在组建国内最大,最权威的 **[Kafka中文社区](https://z.didi.cn/5gSF9)**
|
||||
|
||||
在这里你可以结交各大互联网的 Kafka大佬 以及 4000+ Kafka爱好者,一起实现知识共享,实时掌控最新行业资讯,期待 👏 您的加入中~ https://z.didi.cn/5gSF9
|
||||
|
||||
## 协议
|
||||
有问必答~! 互动有礼~!
|
||||
|
||||
PS: 提问请尽量把问题一次性描述清楚,并告知环境信息情况~!如使用版本、操作步骤、报错/警告信息等,方便大V们快速解答~
|
||||
|
||||
|
||||
|
||||
**`2、微信群`**
|
||||
|
||||
微信加群:添加`PenceXie` 、`szzdzhp001`的微信号备注KnowStreaming加群。
|
||||
<br/>
|
||||
|
||||
加群之前有劳点一下 star,一个小小的 star 是对KnowStreaming作者们努力建设社区的动力。
|
||||
|
||||
感谢感谢!!!
|
||||
|
||||
<img width="116" alt="wx" src="https://user-images.githubusercontent.com/71620349/192257217-c4ebc16c-3ad9-485d-a914-5911d3a4f46b.png">
|
||||
|
||||
## Star History
|
||||
|
||||
[](https://star-history.com/#didi/KnowStreaming&Date)
|
||||
|
||||
`kafka-manager`基于`Apache-2.0`协议进行分发和使用,更多信息参见[协议文件](./LICENSE)
|
||||
|
||||
646
Releases_Notes.md
Normal file
@@ -0,0 +1,646 @@
|
||||
|
||||
## v3.4.0
|
||||
|
||||
|
||||
|
||||
**问题修复**
|
||||
- [Bugfix]修复 Overview 指标文案错误的错误 ([#1190](https://github.com/didi/KnowStreaming/issues/1190))
|
||||
- [Bugfix]修复删除 Kafka 集群后,Connect 集群任务出现 NPE 问题 ([#1129](https://github.com/didi/KnowStreaming/issues/1129))
|
||||
- [Bugfix]修复在 Ldap 登录时,设置 auth-user-registration: false 会导致空指针的问题 ([#1117](https://github.com/didi/KnowStreaming/issues/1117))
|
||||
- [Bugfix]修复 Ldap 登录,调用 user.getId() 出现 NPE 的问题 ([#1108](https://github.com/didi/KnowStreaming/issues/1108))
|
||||
- [Bugfix]修复前端新增角色失败等问题 ([#1107](https://github.com/didi/KnowStreaming/issues/1107))
|
||||
- [Bugfix]修复 ZK 四字命令解析错误的问题
|
||||
- [Bugfix]修复 zk standalone 模式下,状态获取错误的问题
|
||||
- [Bugfix]修复 Broker 元信息解析方法未调用导致接入集群失败的问题 ([#993](https://github.com/didi/KnowStreaming/issues/993))
|
||||
- [Bugfix]修复 ConsumerAssignment 类型转换错误的问题
|
||||
- [Bugfix]修复对 Connect 集群的 clusterUrl 的动态更新导致配置不生效的问题 ([#1079](https://github.com/didi/KnowStreaming/issues/1079))
|
||||
- [Bugfix]修复消费组不支持重置到最旧 Offset 的问题 ([#1059](https://github.com/didi/KnowStreaming/issues/1059))
|
||||
- [Bugfix]后端增加查看 User 密码的权限点 ([#1095](https://github.com/didi/KnowStreaming/issues/1095))
|
||||
- [Bugfix]修复 Connect-JMX 端口维护信息错误的问题 ([#1146](https://github.com/didi/KnowStreaming/issues/1146))
|
||||
- [Bugfix]修复系统管理子应用无法正常启动的问题 ([#1167](https://github.com/didi/KnowStreaming/issues/1167))
|
||||
- [Bugfix]修复 Security 模块,权限点缺失问题 ([#1069](https://github.com/didi/KnowStreaming/issues/1069)), ([#1154](https://github.com/didi/KnowStreaming/issues/1154))
|
||||
- [Bugfix]修复 Connect-Worker Jmx 不生效的问题 ([#1067](https://github.com/didi/KnowStreaming/issues/1067))
|
||||
- [Bugfix]修复权限 ACL 管理中,消费组列表展示错误的问题 ([#1037](https://github.com/didi/KnowStreaming/issues/1037))
|
||||
- [Bugfix]修复 Connect 模块没有默认勾选指标的问题([#1022](https://github.com/didi/KnowStreaming/issues/1022))
|
||||
- [Bugfix]修复 es 索引 create/delete 死循环的问题 ([#1021](https://github.com/didi/KnowStreaming/issues/1021))
|
||||
- [Bugfix]修复 Connect-GroupDescription 解析失败的问题 ([#1015](https://github.com/didi/KnowStreaming/issues/1015))
|
||||
- [Bugfix]修复 Prometheus 开放接口中,Partition 指标 tag 缺失的问题 ([#1014](https://github.com/didi/KnowStreaming/issues/1014))
|
||||
- [Bugfix]修复 Topic 消息展示,offset 为 0 不显示的问题 ([#1192](https://github.com/didi/KnowStreaming/issues/1192))
|
||||
- [Bugfix]修复重置offset接口调用过多问题
|
||||
- [Bugfix]Connect 提交任务变更为只保存用户修改的配置,并修复 JSON 模式下配置展示不全的问题 ([#1158](https://github.com/didi/KnowStreaming/issues/1158))
|
||||
- [Bugfix]修复消费组 Offset 重置后,提示重置成功,但是前端不刷新数据,Offset 无变化的问题 ([#1090](https://github.com/didi/KnowStreaming/issues/1090))
|
||||
- [Bugfix]修复未勾选系统管理查看权限,但是依然可以查看系统管理的问题 ([#1105](https://github.com/didi/KnowStreaming/issues/1105))
|
||||
|
||||
|
||||
**产品优化**
|
||||
- [Optimize]补充接入集群时,可选的 Kafka 版本列表 ([#1204](https://github.com/didi/KnowStreaming/issues/1204))
|
||||
- [Optimize]GroupTopic 信息修改为实时获取 ([#1196](https://github.com/didi/KnowStreaming/issues/1196))
|
||||
- [Optimize]增加 AdminClient 观测信息 ([#1111](https://github.com/didi/KnowStreaming/issues/1111))
|
||||
- [Optimize]增加 Connector 运行状态指标 ([#1110](https://github.com/didi/KnowStreaming/issues/1110))
|
||||
- [Optimize]统一 DB 元信息更新格式 ([#1127](https://github.com/didi/KnowStreaming/issues/1127)), ([#1125](https://github.com/didi/KnowStreaming/issues/1125)), ([#1006](https://github.com/didi/KnowStreaming/issues/1006))
|
||||
- [Optimize]日志输出增加支持 MDC,方便用户在 logback.xml 中 json 格式化日志 ([#1032](https://github.com/didi/KnowStreaming/issues/1032))
|
||||
- [Optimize]Jmx 相关日志优化 ([#1082](https://github.com/didi/KnowStreaming/issues/1082))
|
||||
- [Optimize]Topic-Partitions增加主动超时功能 ([#1076](https://github.com/didi/KnowStreaming/issues/1076))
|
||||
- [Optimize]Topic-Messages页面后端增加按照Partition和Offset纬度的排序 ([#1075](https://github.com/didi/KnowStreaming/issues/1075))
|
||||
- [Optimize]Connect-JSON模式下的JSON格式和官方API的格式不一致 ([#1080](https://github.com/didi/KnowStreaming/issues/1080)), ([#1153](https://github.com/didi/KnowStreaming/issues/1153)), ([#1192](https://github.com/didi/KnowStreaming/issues/1192))
|
||||
- [Optimize]登录页面展示的 star 数量修改为最新的数量
|
||||
- [Optimize]Group 列表的 maxLag 指标调整为实时获取 ([#1074](https://github.com/didi/KnowStreaming/issues/1074))
|
||||
- [Optimize]Connector增加重启、编辑、删除等权限点 ([#1066](https://github.com/didi/KnowStreaming/issues/1066)), ([#1147](https://github.com/didi/KnowStreaming/issues/1147))
|
||||
- [Optimize]优化 pom.xml 中,KS版本的标签名
|
||||
- [Optimize]优化集群Brokers中, Controller显示存在延迟的问题 ([#1162](https://github.com/didi/KnowStreaming/issues/1162))
|
||||
- [Optimize]bump jackson version to 2.13.5
|
||||
- [Optimize]权限新增 ACL,自定义权限配置,资源 TransactionalId 优化 ([#1192](https://github.com/didi/KnowStreaming/issues/1192))
|
||||
- [Optimize]Connect 样式优化
|
||||
- [Optimize]消费组详情控制数据实时刷新
|
||||
|
||||
|
||||
**功能新增**
|
||||
- [Feature]新增删除 Group 或 GroupOffset 功能 ([#1064](https://github.com/didi/KnowStreaming/issues/1064)), ([#1084](https://github.com/didi/KnowStreaming/issues/1084)), ([#1040](https://github.com/didi/KnowStreaming/issues/1040)), ([#1144](https://github.com/didi/KnowStreaming/issues/1144))
|
||||
- [Feature]增加 Truncate 数据功能 ([#1062](https://github.com/didi/KnowStreaming/issues/1062)), ([#1043](https://github.com/didi/KnowStreaming/issues/1043)), ([#1145](https://github.com/didi/KnowStreaming/issues/1145))
|
||||
- [Feature]支持指定 Server 的具体 Jmx 端口 ([#965](https://github.com/didi/KnowStreaming/issues/965))
|
||||
|
||||
|
||||
**文档更新**
|
||||
- [Doc]FAQ 补充 ES 8.x 版本使用说明 ([#1189](https://github.com/didi/KnowStreaming/issues/1189))
|
||||
- [Doc]补充启动失败的说明 ([#1126](https://github.com/didi/KnowStreaming/issues/1126))
|
||||
- [Doc]补充 ZK 无数据排查说明 ([#1004](https://github.com/didi/KnowStreaming/issues/1004))
|
||||
- [Doc]无数据排查文档,补充 ES 集群 Shard 满的异常日志
|
||||
- [Doc]README 补充页面无数据排查手册链接
|
||||
- [Doc]补充连接特定 Jmx 端口的说明 ([#965](https://github.com/didi/KnowStreaming/issues/965))
|
||||
- [Doc]补充 zk_properties 字段的使用说明 ([#1003](https://github.com/didi/KnowStreaming/issues/1003))
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## v3.3.0
|
||||
|
||||
**问题修复**
|
||||
- 修复 Connect 的 JMX-Port 配置未生效问题;
|
||||
- 修复 不存在 Connector 时,OverView 页面的数据一直处于加载中的问题;
|
||||
- 修复 Group 分区信息,分页时展示不全的问题;
|
||||
- 修复采集副本指标时,参数传递错误的问题;
|
||||
- 修复用户信息修改后,用户列表会抛出空指针异常的问题;
|
||||
- 修复 Topic 详情页面,查看消息时,选择分区不生效问题;
|
||||
- 修复对 ZK 客户端进行配置后不生效的问题;
|
||||
- 修复 connect 模块,指标中缺少健康巡检项通过数的问题;
|
||||
- 修复 connect 模块,指标获取方法存在映射错误的问题;
|
||||
- 修复 connect 模块,max 纬度指标获取错误的问题;
|
||||
- 修复 Topic 指标大盘 TopN 指标显示信息错误的问题;
|
||||
- 修复 Broker Similar Config 显示错误的问题;
|
||||
- 修复解析 ZK 四字命令时,数据类型设置错误导致空指针的问题;
|
||||
- 修复新增 Topic 时,清理策略选项版本控制错误的问题;
|
||||
- 修复新接入集群时 Controller-Host 信息不显示的问题;
|
||||
- 修复 Connector 和 MM2 列表搜索不生效的问题;
|
||||
- 修复 Zookeeper 页面,Leader 显示存在异常的问题;
|
||||
- 修复前端打包失败的问题;
|
||||
|
||||
|
||||
**产品优化**
|
||||
- ZK Overview 页面补充默认展示的指标;
|
||||
- 统一初始化 ES 索引模版的脚本为 init_es_template.sh,同时新增缺失的 connect 索引模版初始化脚本,去除多余的 replica 和 zookeper 索引模版初始化脚本;
|
||||
- 指标大盘页面,优化指标筛选操作后,无指标数据的指标卡片由不显示改为显示,并增加无数据的兜底;
|
||||
- 删除从 ES 读写 replica 指标的相关代码;
|
||||
- 优化 Topic 健康巡检的日志,明确错误的原因;
|
||||
- 优化无 ZK 模块时,巡检详情忽略对 ZK 的展示;
|
||||
- 优化本地缓存大小为可配置;
|
||||
- Task 模块中的返回中,补充任务的分组信息;
|
||||
- FAQ 补充 Ldap 的配置说明;
|
||||
- FAQ 补充接入 Kerberos 认证的 Kafka 集群的配置说明;
|
||||
- ks_km_kafka_change_record 表增加时间纬度的索引,优化查询性能;
|
||||
- 优化 ZK 健康巡检的日志,便于问题的排查;
|
||||
|
||||
**功能新增**
|
||||
- 新增基于滴滴 Kafka 的 Topic 复制功能(需使用滴滴 Kafka 才可具备该能力);
|
||||
- Topic 指标大盘,新增 Topic 复制相关的指标;
|
||||
- 新增基于 TestContainers 的单测;
|
||||
|
||||
|
||||
**Kafka MM2 Beta版 (v3.3.0版本新增发布)**
|
||||
- MM2 任务的增删改查;
|
||||
- MM2 任务的指标大盘;
|
||||
- MM2 任务的健康状态;
|
||||
|
||||
---
|
||||
|
||||
|
||||
## v3.2.0
|
||||
|
||||
**问题修复**
|
||||
- 修复健康巡检结果更新至 DB 时,出现死锁问题;
|
||||
- 修复 KafkaJMXClient 类中,logger错误的问题;
|
||||
- 后端修复 Topic 过期策略在 0.10.1.0 版本能多选的问题,实际应该只能二选一;
|
||||
- 修复接入集群时,不填写集群配置会报错的问题;
|
||||
- 升级 spring-context 至 5.3.19 版本,修复安全漏洞;
|
||||
- 修复 Broker & Topic 修改配置时,多版本兼容配置的版本信息错误的问题;
|
||||
- 修复 Topic 列表的健康分为健康状态;
|
||||
- 修复 Broker LogSize 指标存储名称错误导致查询不到的问题;
|
||||
- 修复 Prometheus 中,缺少 Group 部分指标的问题;
|
||||
- 修复因缺少健康状态指标导致集群数错误的问题;
|
||||
- 修复后台任务记录操作日志时,因缺少操作用户信息导致出现异常的问题;
|
||||
- 修复 Replica 指标查询时,DSL 错误的问题;
|
||||
- 关闭 errorLogger,修复错误日志重复输出的问题;
|
||||
- 修复系统管理更新用户信息失败的问题;
|
||||
- 修复因原AR信息丢失,导致迁移任务一直处于执行中的错误;
|
||||
- 修复集群 Topic 列表实时数据查询时,出现失败的问题;
|
||||
- 修复集群 Topic 列表,页面白屏问题;
|
||||
- 修复副本变更时,因AR数据异常,导致数组访问越界的问题;
|
||||
|
||||
|
||||
**产品优化**
|
||||
- 优化健康巡检为按照资源维度多线程并发处理;
|
||||
- 统一日志输出格式,并优化部分输出的日志;
|
||||
- 优化 ZK 四字命令结果解析过程中,容易引起误解的 WARN 日志;
|
||||
- 优化 Zookeeper 详情中,目录结构的搜索文案;
|
||||
- 优化线程池的名称,方便第三方系统进行相关问题的分析;
|
||||
- 去除 ESClient 的并发访问控制,降低 ESClient 创建数及提升利用率;
|
||||
- 优化 Topic Messages 抽屉文案;
|
||||
- 优化 ZK 健康巡检失败时的错误日志信息;
|
||||
- 提高 Offset 信息获取的超时时间,降低并发过高时出现请求超时的概率;
|
||||
- 优化 Topic & Partition 元信息的更新策略,降低对 DB 连接的占用;
|
||||
- 优化 Sonar 代码扫码问题;
|
||||
- 优化分区 Offset 指标的采集;
|
||||
- 优化前端图表相关组件逻辑;
|
||||
- 优化产品主题色;
|
||||
- Consumer 列表刷新按钮新增 hover 提示;
|
||||
- 优化配置 Topic 的消息大小时的测试弹框体验;
|
||||
- 优化 Overview 页面 TopN 查询的流程;
|
||||
|
||||
|
||||
**功能新增**
|
||||
- 新增页面无数据排查文档;
|
||||
- 增加 ES 索引删除的功能;
|
||||
- 支持拆分API服务和Job服务部署;
|
||||
|
||||
|
||||
**Kafka Connect Beta版 (v3.2.0版本新增发布)**
|
||||
- Connect 集群的纳管;
|
||||
- Connector 的增删改查;
|
||||
- Connect 集群 & Connector 的指标大盘;
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## v3.1.0
|
||||
|
||||
**Bug修复**
|
||||
- 修复重置 Group Offset 的提示信息中,缺少Dead状态也可进行重置的描述;
|
||||
- 修复新建 Topic 后,立即查看 Topic Messages 信息时,会提示 Topic 不存在的问题;
|
||||
- 修复副本变更时,优先副本选举未被正常处罚执行的问题;
|
||||
- 修复 git 目录不存在时,打包不能正常进行的问题;
|
||||
- 修复 KRaft 模式的 Kafka 集群,JMX PORT 显示 -1 的问题;
|
||||
|
||||
|
||||
**体验优化**
|
||||
- 优化Cluster、Broker、Topic、Group的健康分为健康状态;
|
||||
- 去除健康巡检配置中的权重信息;
|
||||
- 错误提示页面展示优化;
|
||||
- 前端打包编译依赖默认使用 taobao 镜像;
|
||||
- 重新设计优化导航栏的 icon ;
|
||||
|
||||
|
||||
**新增**
|
||||
- 个人头像下拉信息中,新增产品版本信息;
|
||||
- 多集群列表页面,新增集群健康状态分布信息;
|
||||
|
||||
|
||||
**Kafka ZK 部分 (v3.1.0版本正式发布)**
|
||||
- 新增 ZK 集群的指标大盘信息;
|
||||
- 新增 ZK 集群的服务状态概览信息;
|
||||
- 新增 ZK 集群的服务节点列表信息;
|
||||
- 新增 Kafka 在 ZK 的存储数据查看功能;
|
||||
- 新增 ZK 的健康巡检及健康状态计算;
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## v3.0.1
|
||||
|
||||
**Bug修复**
|
||||
- 修复重置 Group Offset 时,提示信息中缺少 Dead 状态也可进行重置的信息;
|
||||
- 修复 Ldap 某个属性不存在时,会直接抛出空指针导致登陆失败的问题;
|
||||
- 修复集群 Topic 列表页,健康分详情信息中,检查时间展示错误的问题;
|
||||
- 修复更新健康检查结果时,出现死锁的问题;
|
||||
- 修复 Replica 索引模版错误的问题;
|
||||
- 修复 FAQ 文档中的错误链接;
|
||||
- 修复 Broker 的 TopN 指标不存在时,页面数据不展示的问题;
|
||||
- 修复 Group 详情页,图表时间范围选择不生效的问题;
|
||||
|
||||
|
||||
**体验优化**
|
||||
- 集群 Group 列表按照 Group 维度进行展示;
|
||||
- 优化避免因 ES 中该指标不存在,导致日志中出现大量空指针的问题;
|
||||
- 优化全局 Message & Notification 展示效果;
|
||||
- 优化 Topic 扩分区名称 & 描述展示;
|
||||
|
||||
|
||||
**新增**
|
||||
- Broker 列表页面,新增 JMX 是否成功连接的信息;
|
||||
|
||||
|
||||
**ZK 部分(未完全发布)**
|
||||
- 后端补充 Kafka ZK 指标采集,Kafka ZK 信息获取相关功能;
|
||||
- 增加本地缓存,避免同一采集周期内 ZK 指标重复采集;
|
||||
- 增加 ZK 节点采集失败跳过策略,避免不断对存在问题的节点不断尝试;
|
||||
- 修复 zkAvgLatency 指标转 Long 时抛出异常问题;
|
||||
- 修复 ks_km_zookeeper 表中,role 字段类型错误问题;
|
||||
|
||||
---
|
||||
|
||||
## v3.0.0
|
||||
|
||||
**Bug修复**
|
||||
- 修复 Group 指标防重复采集不生效问题
|
||||
- 修复自动创建 ES 索引模版失败问题
|
||||
- 修复 Group+Topic 列表中存在已删除Topic的问题
|
||||
- 修复使用 MySQL-8 ,因兼容问题, start_time 信息为 NULL 时,会导致创建任务失败的问题
|
||||
- 修复 Group 信息表更新时,出现死锁的问题
|
||||
- 修复图表补点逻辑与图表时间范围不适配的问题
|
||||
|
||||
|
||||
**体验优化**
|
||||
- 按照资源类别,拆分健康巡检任务
|
||||
- 优化 Group 详情页的指标为实时获取
|
||||
- 图表拖拽排序支持用户级存储
|
||||
- 多集群列表 ZK 信息展示兼容无 ZK 情况
|
||||
- Topic 详情消息预览支持复制功能
|
||||
- 部分内容大数字支持千位分割符展示
|
||||
|
||||
|
||||
**新增**
|
||||
- 集群信息中,新增 Zookeeper 客户端配置字段
|
||||
- 集群信息中,新增 Kafka 集群运行模式字段
|
||||
- 新增 docker-compose 的部署方式
|
||||
|
||||
---
|
||||
|
||||
## v3.0.0-beta.3
|
||||
|
||||
**文档**
|
||||
- FAQ 补充权限识别失败问题的说明
|
||||
- 同步更新文档,保持与官网一致
|
||||
|
||||
|
||||
**Bug修复**
|
||||
- Offset 信息获取时,过滤掉无 Leader 的分区
|
||||
- 升级 oshi-core 版本至 5.6.1 版本,修复 Windows 系统获取系统指标失败问题
|
||||
- 修复 JMX 连接被关闭后,未进行重建的问题
|
||||
- 修复因 DB 中 Broker 信息不存在导致 TotalLogSize 指标获取时抛空指针问题
|
||||
- 修复 dml-logi.sql 中,SQL 注释错误的问题
|
||||
- 修复 startup.sh 中,识别操作系统类型错误的问题
|
||||
- 修复配置管理页面删除配置失败的问题
|
||||
- 修复系统管理应用文件引用路径
|
||||
- 修复 Topic Messages 详情提示信息点击跳转 404 的问题
|
||||
- 修复扩副本时,当前副本数不显示问题
|
||||
|
||||
|
||||
**体验优化**
|
||||
- Topic-Messages 页面,增加返回数据的排序以及按照Earliest/Latest的获取方式
|
||||
- 优化 GroupOffsetResetEnum 类名为 OffsetTypeEnum,使得类名含义更准确
|
||||
- 移动 KafkaZKDAO 类,及 Kafka Znode 实体类的位置,使得 Kafka Zookeeper DAO 更加内聚及便于识别
|
||||
- 后端补充 Overview 页面指标排序的功能
|
||||
- 前端 Webpack 配置优化
|
||||
- Cluster Overview 图表取消放大展示功能
|
||||
- 列表页增加手动刷新功能
|
||||
- 接入/编辑集群,优化 JMX-PORT,Version 信息的回显,优化JMX信息的展示
|
||||
- 提高登录页面图片展示清晰度
|
||||
- 部分样式和文案优化
|
||||
|
||||
---
|
||||
|
||||
## v3.0.0-beta.2
|
||||
|
||||
**文档**
|
||||
- 新增登录系统对接文档
|
||||
- 优化前端工程打包构建部分文档说明
|
||||
- FAQ补充KnowStreaming连接特定JMX IP的说明
|
||||
|
||||
|
||||
**Bug修复**
|
||||
- 修复logi_security_oplog表字段过短,导致删除Topic等操作无法记录的问题
|
||||
- 修复ES查询时,抛java.lang.NumberFormatException: For input string: "{"value":0,"relation":"eq"}" 问题
|
||||
- 修复LogStartOffset和LogEndOffset指标单位错误问题
|
||||
- 修复进行副本变更时,旧副本数为NULL的问题
|
||||
- 修复集群Group列表,在第二页搜索时,搜索时返回的分页信息错误问题
|
||||
- 修复重置Offset时,返回的错误信息提示不一致的问题
|
||||
- 修复集群查看,系统查看,LoadRebalance等页面权限点缺失问题
|
||||
- 修复查询不存在的Topic时,错误信息提示不明显的问题
|
||||
- 修复Windows用户打包前端工程报错的问题
|
||||
- package-lock.json锁定前端依赖版本号,修复因依赖自动升级导致打包失败等问题
|
||||
- 系统管理子应用,补充后端返回的Code码拦截,解决后端接口返回报错不展示的问题
|
||||
- 修复用户登出后,依旧可以访问系统的问题
|
||||
- 修复巡检任务配置时,数值显示错误的问题
|
||||
- 修复Broker/Topic Overview 图表和图表详情问题
|
||||
- 修复Job扩缩副本任务明细数据错误的问题
|
||||
- 修复重置Offset时,分区ID,Offset数值无限制问题
|
||||
- 修复扩缩/迁移副本时,无法选中Kafka系统Topic的问题
|
||||
- 修复Topic的Config页面,编辑表单时不能正确回显当前值的问题
|
||||
- 修复Broker Card返回数据后依旧展示加载态的问题
|
||||
|
||||
|
||||
|
||||
**体验优化**
|
||||
- 优化默认用户密码为 admin/admin
|
||||
- 缩短新增集群后,集群信息加载的耗时
|
||||
- 集群Broker列表,增加Controller角色信息
|
||||
- 副本变更任务结束后,增加进行优先副本选举的操作
|
||||
- Task模块任务分为Metrics、Common、Metadata三类任务,每类任务配备独立线程池,减少对Job模块的线程池,以及不同类任务之间的相互影响
|
||||
- 删除代码中存在的多余无用文件
|
||||
- 自动新增ES索引模版及近7天索引,减少用户搭建时需要做的事项
|
||||
- 优化前端工程打包流程
|
||||
- 优化登录页文案,页面左侧栏内容,单集群详情样式,Topic列表趋势图等
|
||||
- 首次进入Broker/Topic图表详情时,进行预缓存数据从而优化体验
|
||||
- 优化Topic详情Partition Tab的展示
|
||||
- 多集群列表页增加编辑功能
|
||||
- 优化副本变更时,迁移时间支持分钟级别粒度
|
||||
- logi-security版本升级至2.10.13
|
||||
- logi-elasticsearch-client版本升级至1.0.24
|
||||
|
||||
|
||||
**能力提升**
|
||||
- 支持Ldap登录认证
|
||||
|
||||
---
|
||||
|
||||
## v3.0.0-beta.1
|
||||
|
||||
**文档**
|
||||
- 新增Task模块说明文档
|
||||
- FAQ补充 `Specified key was too long; max key length is 767 bytes ` 错误说明
|
||||
- FAQ补充 `出现ESIndexNotFoundException报错` 错误说明
|
||||
|
||||
|
||||
**Bug修复**
|
||||
- 修复 Consumer 点击 Stop 未停止检索的问题
|
||||
- 修复创建/编辑角色权限报错问题
|
||||
- 修复多集群管理/单集群详情均衡卡片状态错误问题
|
||||
- 修复版本列表未排序问题
|
||||
- 修复Raft集群Controller信息不断记录问题
|
||||
- 修复部分版本消费组描述信息获取失败问题
|
||||
- 修复分区Offset获取失败的日志中,缺少Topic名称信息问题
|
||||
- 修复GitHub图地址错误,及图裂问题
|
||||
- 修复Broker默认使用的地址和注释不一致问题
|
||||
- 修复 Consumer 列表分页不生效问题
|
||||
- 修复操作记录表operation_methods字段缺少默认值问题
|
||||
- 修复集群均衡表中move_broker_list字段无效的问题
|
||||
- 修复KafkaUser、KafkaACL信息获取时,日志一直重复提示不支持问题
|
||||
- 修复指标缺失时,曲线出现掉底的问题
|
||||
|
||||
|
||||
**体验优化**
|
||||
- 优化前端构建时间和打包体积,增加依赖打包的分包策略
|
||||
- 优化产品样式和文案展示
|
||||
- 优化ES客户端数为可配置
|
||||
- 优化日志中大量出现的MySQL Key冲突日志
|
||||
|
||||
|
||||
**能力提升**
|
||||
- 增加周期任务,用于主动创建缺少的ES模版及索引的能力,减少额外的脚本操作
|
||||
- 增加JMX连接的Broker地址可选择的能力
|
||||
|
||||
---
|
||||
|
||||
## v3.0.0-beta.0
|
||||
|
||||
**1、多集群管理**
|
||||
|
||||
- 增加健康监测体系、关键组件&指标 GUI 展示
|
||||
- 增加 2.8.x 以上 Kafka 集群接入,覆盖 0.10.x-3.x
|
||||
- 删除逻辑集群、共享集群、Region 概念
|
||||
|
||||
**2、Cluster 管理**
|
||||
|
||||
- 增加集群概览信息、集群配置变更记录
|
||||
- 增加 Cluster 健康分,健康检查规则支持自定义配置
|
||||
- 增加 Cluster 关键指标统计和 GUI 展示,支持自定义配置
|
||||
- 增加 Cluster 层 I/O、Disk 的 Load Reblance 功能,支持定时均衡任务(企业版)
|
||||
- 删除限流、鉴权功能
|
||||
- 删除 APPID 概念
|
||||
|
||||
**3、Broker 管理**
|
||||
|
||||
- 增加 Broker 健康分
|
||||
- 增加 Broker 关键指标统计和 GUI 展示,支持自定义配置
|
||||
- 增加 Broker 参数配置功能,需重启生效
|
||||
- 增加 Controller 变更记录
|
||||
- 增加 Broker Datalogs 记录
|
||||
- 删除 Leader Rebalance 功能
|
||||
- 删除 Broker 优先副本选举
|
||||
|
||||
**4、Topic 管理**
|
||||
|
||||
- 增加 Topic 健康分
|
||||
- 增加 Topic 关键指标统计和 GUI 展示,支持自定义配置
|
||||
- 增加 Topic 参数配置功能,可实时生效
|
||||
- 增加 Topic 批量迁移、Topic 批量扩缩副本功能
|
||||
- 增加查看系统 Topic 功能
|
||||
- 优化 Partition 分布的 GUI 展示
|
||||
- 优化 Topic Message 数据采样
|
||||
- 删除 Topic 过期概念
|
||||
- 删除 Topic 申请配额功能
|
||||
|
||||
**5、Consumer 管理**
|
||||
|
||||
- 优化了 ConsumerGroup 展示形式,增加 Consumer Lag 的 GUI 展示
|
||||
|
||||
**6、ACL 管理**
|
||||
|
||||
- 增加原生 ACL GUI 配置功能,可配置生产、消费、自定义多种组合权限
|
||||
- 增加 KafkaUser 功能,可自定义新增 KafkaUser
|
||||
|
||||
**7、消息测试(企业版)**
|
||||
|
||||
- 增加生产者消息模拟器,支持 Data、Flow、Header、Options 自定义配置(企业版)
|
||||
- 增加消费者消息模拟器,支持 Data、Flow、Header、Options 自定义配置(企业版)
|
||||
|
||||
**8、Job**
|
||||
|
||||
- 优化 Job 模块,支持任务进度管理
|
||||
|
||||
**9、系统管理**
|
||||
|
||||
- 优化用户、角色管理体系,支持自定义角色配置页面及操作权限
|
||||
- 优化审计日志信息
|
||||
- 删除多租户体系
|
||||
- 删除工单流程
|
||||
|
||||
---
|
||||
|
||||
## v2.6.0
|
||||
|
||||
版本上线时间:2022-01-24
|
||||
|
||||
### 能力提升
|
||||
- 增加简单回退工具类
|
||||
|
||||
### 体验优化
|
||||
- 补充周期任务说明文档
|
||||
- 补充集群安装部署使用说明文档
|
||||
- 升级Swagger、SpringFramework、SpringBoot、EChats版本
|
||||
- 优化Task模块的日志输出
|
||||
- 优化corn表达式解析失败后退出无任何日志提示问题
|
||||
- Ldap用户接入时,增加部门及邮箱信息等
|
||||
- 对Jmx模块,增加连接失败后的回退机制及错误日志优化
|
||||
- 增加线程池、客户端池可配置
|
||||
- 删除无用的jmx_prometheus_javaagent-0.14.0.jar
|
||||
- 优化迁移任务名称
|
||||
- 优化创建Region时,Region容量信息不能立即被更新问题
|
||||
- 引入lombok
|
||||
- 更新视频教程
|
||||
- 优化kcm_script.sh脚本中的LogiKM地址为可通过程序传入
|
||||
- 第三方接口及网关接口,增加是否跳过登录的开关
|
||||
- extends模块相关配置调整为非必须在application.yml中配置
|
||||
|
||||
### bug修复
|
||||
- 修复批量往DB写入空指标数组时报SQL语法异常的问题
|
||||
- 修复网关增加配置及修改配置时,version不变化问题
|
||||
- 修复集群列表页,提示框遮挡问题
|
||||
- 修复对高版本Broker元信息协议解析失败的问题
|
||||
- 修复Dockerfile执行时提示缺少application.yml文件的问题
|
||||
- 修复逻辑集群更新时,会报空指针的问题
|
||||
|
||||
|
||||
## v2.5.0
|
||||
|
||||
版本上线时间:2021-07-10
|
||||
|
||||
### 体验优化
|
||||
- 更改产品名为LogiKM
|
||||
- 更新产品图标
|
||||
|
||||
|
||||
## v2.4.1+
|
||||
|
||||
版本上线时间:2021-05-21
|
||||
|
||||
### 能力提升
|
||||
- 增加直接增加权限和配额的接口(v2.4.1)
|
||||
- 增加接口调用可绕过登录的功能(v2.4.1)
|
||||
|
||||
### 体验优化
|
||||
- Tomcat 版本提升至8.5.66(v2.4.2)
|
||||
- op接口优化,拆分util接口为topic、leader两类接口(v2.4.1)
|
||||
- 简化Gateway配置的Key长度(v2.4.1)
|
||||
|
||||
### bug修复
|
||||
- 修复页面展示版本错误问题(v2.4.2)
|
||||
|
||||
|
||||
## v2.4.0
|
||||
|
||||
版本上线时间:2021-05-18
|
||||
|
||||
|
||||
### 能力提升
|
||||
|
||||
- 增加App与Topic自动化审批开关
|
||||
- Broker元信息中增加Rack信息
|
||||
- 升级MySQL 驱动,支持MySQL 8+
|
||||
- 增加操作记录查询界面
|
||||
|
||||
### 体验优化
|
||||
|
||||
- FAQ告警组说明优化
|
||||
- 用户手册共享及 独享集群概念优化
|
||||
- 用户管理界面,前端限制用户删除自己
|
||||
|
||||
### bug修复
|
||||
|
||||
- 修复op-util类中创建Topic失败的接口
|
||||
- 周期同步Topic到DB的任务修复,将Topic列表查询从缓存调整为直接查DB
|
||||
- 应用下线审批失败的功能修复,将权限为0(无权限)的数据进行过滤
|
||||
- 修复登录及权限绕过的漏洞
|
||||
- 修复研发角色展示接入集群、暂停监控等按钮的问题
|
||||
|
||||
|
||||
## v2.3.0
|
||||
|
||||
版本上线时间:2021-02-08
|
||||
|
||||
|
||||
### 能力提升
|
||||
|
||||
- 新增支持docker化部署
|
||||
- 可指定Broker作为候选controller
|
||||
- 可新增并管理网关配置
|
||||
- 可获取消费组状态
|
||||
- 增加集群的JMX认证
|
||||
|
||||
### 体验优化
|
||||
|
||||
- 优化编辑用户角色、修改密码的流程
|
||||
- 新增consumerID的搜索功能
|
||||
- 优化“Topic连接信息”、“消费组重置消费偏移”、“修改Topic保存时间”的文案提示
|
||||
- 在相应位置增加《资源申请文档》链接
|
||||
|
||||
### bug修复
|
||||
|
||||
- 修复Broker监控图表时间轴展示错误的问题
|
||||
- 修复创建夜莺监控告警规则时,使用的告警周期的单位不正确的问题
|
||||
|
||||
|
||||
|
||||
## v2.2.0
|
||||
|
||||
版本上线时间:2021-01-25
|
||||
|
||||
|
||||
|
||||
### 能力提升
|
||||
|
||||
- 优化工单批量操作流程
|
||||
- 增加获取Topic75分位/99分位的实时耗时数据
|
||||
- 增加定时任务,可将无主未落DB的Topic定期写入DB
|
||||
|
||||
### 体验优化
|
||||
|
||||
- 在相应位置增加《集群接入文档》链接
|
||||
- 优化物理集群、逻辑集群含义
|
||||
- 在Topic详情页、Topic扩分区操作弹窗增加展示Topic所属Region的信息
|
||||
- 优化Topic审批时,Topic数据保存时间的配置流程
|
||||
- 优化Topic/应用申请、审批时的错误提示文案
|
||||
- 优化Topic数据采样的操作项文案
|
||||
- 优化运维人员删除Topic时的提示文案
|
||||
- 优化运维人员删除Region的删除逻辑与提示文案
|
||||
- 优化运维人员删除逻辑集群的提示文案
|
||||
- 优化上传集群配置文件时的文件类型限制条件
|
||||
|
||||
### bug修复
|
||||
|
||||
- 修复填写应用名称时校验特殊字符出错的问题
|
||||
- 修复普通用户越权访问应用详情的问题
|
||||
- 修复由于Kafka版本升级,导致的数据压缩格式无法获取的问题
|
||||
- 修复删除逻辑集群或Topic之后,界面依旧展示的问题
|
||||
- 修复进行Leader rebalance操作时执行结果重复提示的问题
|
||||
|
||||
|
||||
## v2.1.0
|
||||
|
||||
版本上线时间:2020-12-19
|
||||
|
||||
|
||||
|
||||
### 体验优化
|
||||
|
||||
- 优化页面加载时的背景样式
|
||||
- 优化普通用户申请Topic权限的流程
|
||||
- 优化Topic申请配额、申请分区的权限限制
|
||||
- 优化取消Topic权限的文案提示
|
||||
- 优化申请配额表单的表单项名称
|
||||
- 优化重置消费偏移的操作流程
|
||||
- 优化创建Topic迁移任务的表单内容
|
||||
- 优化Topic扩分区操作的弹窗界面样式
|
||||
- 优化集群Broker监控可视化图表样式
|
||||
- 优化创建逻辑集群的表单内容
|
||||
- 优化集群安全协议的提示文案
|
||||
|
||||
### bug修复
|
||||
|
||||
- 修复偶发性重置消费偏移失败的问题
|
||||
|
||||
|
||||
|
||||
|
||||
1036
bin/init_es_template.sh
Normal file
16
bin/shutdown.sh
Normal file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd `dirname $0`/../libs
|
||||
target_dir=`pwd`
|
||||
|
||||
pid=`ps ax | grep -i 'ks-km' | grep ${target_dir} | grep java | grep -v grep | awk '{print $1}'`
|
||||
if [ -z "$pid" ] ; then
|
||||
echo "No ks-km running."
|
||||
exit -1;
|
||||
fi
|
||||
|
||||
echo "The ks-km (${pid}) is running..."
|
||||
|
||||
kill ${pid}
|
||||
|
||||
echo "Send shutdown request to ks-km (${pid}) OK"
|
||||
82
bin/startup.sh
Normal file
@@ -0,0 +1,82 @@
|
||||
error_exit ()
|
||||
{
|
||||
echo "ERROR: $1 !!"
|
||||
exit 1
|
||||
}
|
||||
|
||||
[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=$HOME/jdk/java
|
||||
[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=/usr/java
|
||||
[ ! -e "$JAVA_HOME/bin/java" ] && unset JAVA_HOME
|
||||
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
if [ "Darwin" = "$(uname -s)" ]; then
|
||||
|
||||
if [ -x '/usr/libexec/java_home' ] ; then
|
||||
export JAVA_HOME=`/usr/libexec/java_home`
|
||||
|
||||
elif [ -d "/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home" ]; then
|
||||
export JAVA_HOME="/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home"
|
||||
fi
|
||||
else
|
||||
JAVA_PATH=`dirname $(readlink -f $(which javac))`
|
||||
if [ "x$JAVA_PATH" != "x" ]; then
|
||||
export JAVA_HOME=`dirname $JAVA_PATH 2>/dev/null`
|
||||
fi
|
||||
fi
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
error_exit "Please set the JAVA_HOME variable in your environment, We need java(x64)! jdk8 or later is better!"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
|
||||
|
||||
export WEB_SERVER="ks-km"
|
||||
export JAVA_HOME
|
||||
export JAVA="$JAVA_HOME/bin/java"
|
||||
export BASE_DIR=`cd $(dirname $0)/..; pwd`
|
||||
export CUSTOM_SEARCH_LOCATIONS=file:${BASE_DIR}/conf/
|
||||
|
||||
|
||||
#===========================================================================================
|
||||
# JVM Configuration
|
||||
#===========================================================================================
|
||||
|
||||
JAVA_OPT="${JAVA_OPT} -server -Xms2g -Xmx2g -Xmn1g -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=320m"
|
||||
JAVA_OPT="${JAVA_OPT} -XX:-OmitStackTraceInFastThrow -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${BASE_DIR}/logs/java_heapdump.hprof"
|
||||
|
||||
## jdk版本高的情况 有些 参数废弃了
|
||||
JAVA_MAJOR_VERSION=$($JAVA -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p')
|
||||
if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then
|
||||
JAVA_OPT="${JAVA_OPT} -Xlog:gc*:file=${BASE_DIR}/logs/km_gc.log:time,tags:filecount=10,filesize=102400"
|
||||
else
|
||||
JAVA_OPT="${JAVA_OPT} -Djava.ext.dirs=${JAVA_HOME}/jre/lib/ext:${JAVA_HOME}/lib/ext"
|
||||
JAVA_OPT="${JAVA_OPT} -Xloggc:${BASE_DIR}/logs/km_gc.log -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
|
||||
|
||||
fi
|
||||
|
||||
JAVA_OPT="${JAVA_OPT} -jar ${BASE_DIR}/libs/${WEB_SERVER}.jar"
|
||||
JAVA_OPT="${JAVA_OPT} --spring.config.additional-location=${CUSTOM_SEARCH_LOCATIONS}"
|
||||
JAVA_OPT="${JAVA_OPT} --logging.config=${BASE_DIR}/conf/logback-spring.xml"
|
||||
JAVA_OPT="${JAVA_OPT} --server.max-http-header-size=524288"
|
||||
|
||||
|
||||
|
||||
if [ ! -d "${BASE_DIR}/logs" ]; then
|
||||
mkdir ${BASE_DIR}/logs
|
||||
fi
|
||||
|
||||
echo "$JAVA ${JAVA_OPT}"
|
||||
|
||||
# check the start.out log output file
|
||||
if [ ! -f "${BASE_DIR}/logs/start.out" ]; then
|
||||
touch "${BASE_DIR}/logs/start.out"
|
||||
fi
|
||||
|
||||
# start
|
||||
echo -e "---- 启动脚本 ------\n $JAVA ${JAVA_OPT}" > ${BASE_DIR}/logs/start.out 2>&1 &
|
||||
|
||||
|
||||
nohup $JAVA ${JAVA_OPT} >> ${BASE_DIR}/logs/start.out 2>&1 &
|
||||
|
||||
echo "${WEB_SERVER} is starting,you can check the ${BASE_DIR}/logs/start.out"
|
||||
|
Before Width: | Height: | Size: 73 KiB |
|
Before Width: | Height: | Size: 20 KiB |
|
Before Width: | Height: | Size: 7.4 KiB |
111
docs/contribute_guide/assets/分支管理.drawio
Normal file
@@ -0,0 +1,111 @@
|
||||
<mxfile host="65bd71144e">
|
||||
<diagram id="vxzhwhZdNVAY19FZ4dgb" name="Page-1">
|
||||
<mxGraphModel dx="1194" dy="733" grid="0" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="1169" pageHeight="827" math="0" shadow="0">
|
||||
<root>
|
||||
<mxCell id="0"/>
|
||||
<mxCell id="1" parent="0"/>
|
||||
<mxCell id="4" style="edgeStyle=none;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;startArrow=none;strokeWidth=2;strokeColor=#6666FF;" edge="1" parent="1" source="16">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="200" y="540" as="targetPoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="7" style="edgeStyle=none;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;exitPerimeter=0;strokeColor=#33FF33;strokeWidth=2;" edge="1" parent="1" source="2">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="360" y="240" as="targetPoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="5" style="edgeStyle=none;html=1;startArrow=none;strokeColor=#33FF33;strokeWidth=2;" edge="1" parent="1">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="200" y="400" as="targetPoint"/>
|
||||
<mxPoint x="360" y="360" as="sourcePoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="3" value="C3" style="verticalLabelPosition=middle;verticalAlign=middle;html=1;shape=mxgraph.flowchart.on-page_reference;labelPosition=center;align=center;strokeColor=#FF8000;strokeWidth=2;" vertex="1" parent="1">
|
||||
<mxGeometry x="340" y="280" width="40" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="18" style="edgeStyle=none;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;entryPerimeter=0;endArrow=none;endFill=0;strokeColor=#FF8000;strokeWidth=2;" edge="1" parent="1" source="8" target="3">
|
||||
<mxGeometry relative="1" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="8" value="fix_928" style="rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;" vertex="1" parent="1">
|
||||
<mxGeometry x="320" y="40" width="80" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="9" value="github_master" style="rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;" vertex="1" parent="1">
|
||||
<mxGeometry x="160" y="40" width="80" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="10" value="" style="edgeStyle=none;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;endArrow=classic;startArrow=none;endFill=1;strokeWidth=2;strokeColor=#6666FF;" edge="1" parent="1" source="11" target="2">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="200" y="640" as="targetPoint"/>
|
||||
<mxPoint x="200" y="80" as="sourcePoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="2" value="C2" style="verticalLabelPosition=middle;verticalAlign=middle;html=1;shape=mxgraph.flowchart.on-page_reference;labelPosition=center;align=center;strokeColor=#6666FF;strokeWidth=2;" vertex="1" parent="1">
|
||||
<mxGeometry x="180" y="200" width="40" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="12" value="" style="edgeStyle=none;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;endArrow=classic;endFill=1;strokeWidth=2;strokeColor=#6666FF;" edge="1" parent="1" source="9" target="11">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="200" y="200" as="targetPoint"/>
|
||||
<mxPoint x="200" y="80" as="sourcePoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="11" value="C1" style="verticalLabelPosition=middle;verticalAlign=middle;html=1;shape=mxgraph.flowchart.on-page_reference;labelPosition=center;align=center;strokeColor=#6666FF;strokeWidth=2;" vertex="1" parent="1">
|
||||
<mxGeometry x="180" y="120" width="40" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="23" style="edgeStyle=none;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;exitPerimeter=0;endArrow=none;endFill=0;strokeColor=#FF8000;strokeWidth=2;" edge="1" parent="1" source="3">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="360" y="360" as="targetPoint"/>
|
||||
<mxPoint x="360" y="400" as="sourcePoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="17" value="" style="edgeStyle=none;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;startArrow=none;endArrow=none;strokeWidth=2;strokeColor=#6666FF;" edge="1" parent="1" source="2" target="16">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="200" y="640" as="targetPoint"/>
|
||||
<mxPoint x="200" y="240" as="sourcePoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="16" value="C4" style="verticalLabelPosition=middle;verticalAlign=middle;html=1;shape=mxgraph.flowchart.on-page_reference;labelPosition=center;align=center;strokeColor=#6666FF;strokeWidth=2;" vertex="1" parent="1">
|
||||
<mxGeometry x="180" y="440" width="40" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="22" value="Tag-v3.2.0" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;fillColor=none;strokeColor=none;" vertex="1" parent="1">
|
||||
<mxGeometry x="100" y="120" width="80" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="24" value="Tag-v3.2.1" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;fillColor=none;strokeColor=none;" vertex="1" parent="1">
|
||||
<mxGeometry x="100" y="440" width="80" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="27" value="切换到主分支:git checkout github_master" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;labelPosition=center;verticalLabelPosition=middle;align=center;verticalAlign=middle;" vertex="1" parent="1">
|
||||
<mxGeometry x="520" y="90" width="240" height="30" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="34" style="edgeStyle=none;html=1;exitX=0;exitY=0;exitDx=0;exitDy=0;entryX=0.855;entryY=0.145;entryDx=0;entryDy=0;entryPerimeter=0;dashed=1;dashPattern=8 8;fontSize=18;endArrow=none;endFill=0;" edge="1" parent="1" source="28" target="2">
|
||||
<mxGeometry relative="1" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="28" value="主分支拉最新代码:git pull" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;labelPosition=center;verticalLabelPosition=middle;align=center;verticalAlign=middle;" vertex="1" parent="1">
|
||||
<mxGeometry x="520" y="120" width="160" height="30" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="35" style="edgeStyle=none;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;dashed=1;dashPattern=8 8;fontSize=18;endArrow=none;endFill=0;" edge="1" parent="1" source="29">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="270" y="225" as="targetPoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="29" value="基于主分支拉新分支:git checkout -b fix_928" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;labelPosition=center;verticalLabelPosition=middle;align=center;verticalAlign=middle;" vertex="1" parent="1">
|
||||
<mxGeometry x="520" y="210" width="250" height="30" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="37" style="edgeStyle=none;html=1;exitX=0;exitY=1;exitDx=0;exitDy=0;entryX=1;entryY=0.5;entryDx=0;entryDy=0;entryPerimeter=0;dashed=1;dashPattern=8 8;fontSize=18;endArrow=none;endFill=0;" edge="1" parent="1" source="30" target="3">
|
||||
<mxGeometry relative="1" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="30" value="提交代码:git commit -m "[Optimize]优化xxx问题(#928)"" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;labelPosition=center;verticalLabelPosition=middle;align=center;verticalAlign=middle;" vertex="1" parent="1">
|
||||
<mxGeometry x="520" y="270" width="320" height="30" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="31" value="提交到自己远端仓库:git push --set-upstream origin fix_928" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;labelPosition=center;verticalLabelPosition=middle;align=center;verticalAlign=middle;" vertex="1" parent="1">
|
||||
<mxGeometry x="520" y="300" width="334" height="30" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="38" style="edgeStyle=none;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;dashed=1;dashPattern=8 8;fontSize=18;endArrow=none;endFill=0;" edge="1" parent="1" source="32">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="280" y="380" as="targetPoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="32" value="GitHub页面发起Pull Request请求,管理员合入主仓库" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;labelPosition=center;verticalLabelPosition=middle;align=center;verticalAlign=middle;" vertex="1" parent="1">
|
||||
<mxGeometry x="520" y="360" width="300" height="30" as="geometry"/>
|
||||
</mxCell>
|
||||
</root>
|
||||
</mxGraphModel>
|
||||
</diagram>
|
||||
</mxfile>
|
||||
BIN
docs/contribute_guide/assets/分支管理.png
Normal file
|
After Width: | Height: | Size: 64 KiB |
BIN
docs/contribute_guide/assets/环境初始化.jpg
Normal file
|
After Width: | Height: | Size: 180 KiB |
BIN
docs/contribute_guide/assets/申请合并.jpg
Normal file
|
After Width: | Height: | Size: 80 KiB |
BIN
docs/contribute_guide/assets/问题认领.jpg
Normal file
|
After Width: | Height: | Size: 631 KiB |
1
docs/contribute_guide/代码规范.md
Normal file
@@ -0,0 +1 @@
|
||||
TODO.
|
||||
100
docs/contribute_guide/贡献名单.md
Normal file
@@ -0,0 +1,100 @@
|
||||
# 贡献名单
|
||||
|
||||
- [贡献名单](#贡献名单)
|
||||
- [1、贡献者角色](#1贡献者角色)
|
||||
- [1.1、Maintainer](#11maintainer)
|
||||
- [1.2、Committer](#12committer)
|
||||
- [1.3、Contributor](#13contributor)
|
||||
- [2、贡献者名单](#2贡献者名单)
|
||||
|
||||
|
||||
## 1、贡献者角色
|
||||
|
||||
KnowStreaming 开发者包含 Maintainer、Committer、Contributor 三种角色,每种角色的标准定义如下。
|
||||
|
||||
### 1.1、Maintainer
|
||||
|
||||
Maintainer 是对 KnowStreaming 项目的演进和发展做出显著贡献的个人。具体包含以下的标准:
|
||||
|
||||
- 完成多个关键模块或者工程的设计与开发,是项目的核心开发人员;
|
||||
- 持续的投入和激情,能够积极参与社区、官网、issue、PR 等项目相关事项的维护;
|
||||
- 在社区中具有有目共睹的影响力,能够代表 KnowStreaming 参加重要的社区会议和活动;
|
||||
- 具有培养 Committer 和 Contributor 的意识和能力;
|
||||
|
||||
### 1.2、Committer
|
||||
|
||||
Committer 是具有 KnowStreaming 仓库写权限的个人,包含以下的标准:
|
||||
|
||||
- 能够在长时间内做持续贡献 issue、PR 的个人;
|
||||
- 参与 issue 列表的维护及重要 feature 的讨论;
|
||||
- 参与 code review;
|
||||
|
||||
### 1.3、Contributor
|
||||
|
||||
Contributor 是对 KnowStreaming 项目有贡献的个人,标准为:
|
||||
|
||||
- 提交过 PR 并被合并;
|
||||
|
||||
---
|
||||
|
||||
## 2、贡献者名单
|
||||
|
||||
开源贡献者名单(不定期更新)
|
||||
|
||||
在名单内,但是没有收到贡献者礼品的同学,可以联系:szzdzhp001
|
||||
|
||||
| 姓名 | Github | 角色 | 公司 |
|
||||
| ------------------- | ---------------------------------------------------------- | ----------- | -------- |
|
||||
| 张亮 | [@zhangliangboy](https://github.com/zhangliangboy) | Maintainer | 滴滴出行 |
|
||||
| 谢鹏 | [@PenceXie](https://github.com/PenceXie) | Maintainer | 滴滴出行 |
|
||||
| 赵情融 | [@zqrferrari](https://github.com/zqrferrari) | Maintainer | 滴滴出行 |
|
||||
| 石臻臻 | [@shirenchuang](https://github.com/shirenchuang) | Maintainer | 滴滴出行 |
|
||||
| 曾巧 | [@ZQKC](https://github.com/ZQKC) | Maintainer | 滴滴出行 |
|
||||
| 孙超 | [@lucasun](https://github.com/lucasun) | Maintainer | 滴滴出行 |
|
||||
| 洪华驰 | [@brodiehong](https://github.com/brodiehong) | Maintainer | 滴滴出行 |
|
||||
| 许喆 | [@potaaaaaato](https://github.com/potaaaaaato) | Committer | 滴滴出行 |
|
||||
| 郭宇航 | [@GraceWalk](https://github.com/GraceWalk) | Committer | 滴滴出行 |
|
||||
| 李伟 | [@velee](https://github.com/velee) | Committer | 滴滴出行 |
|
||||
| 张占昌 | [@zzccctv](https://github.com/zzccctv) | Committer | 滴滴出行 |
|
||||
| 王东方 | [@wangdongfang-aden](https://github.com/wangdongfang-aden) | Committer | 滴滴出行 |
|
||||
| 王耀波 | [@WYAOBO](https://github.com/WYAOBO) | Committer | 滴滴出行 |
|
||||
| 赵寅锐 | [@ZHAOYINRUI](https://github.com/ZHAOYINRUI) | Maintainer | 字节跳动 |
|
||||
| haoqi123 | [@haoqi123](https://github.com/haoqi123) | Contributor | 前程无忧 |
|
||||
| chaixiaoxue | [@chaixiaoxue](https://github.com/chaixiaoxue) | Contributor | SYNNEX |
|
||||
| 陆晗 | [@luhea](https://github.com/luhea) | Contributor | 竞技世界 |
|
||||
| Mengqi777 | [@Mengqi777](https://github.com/Mengqi777) | Contributor | 腾讯 |
|
||||
| ruanliang-hualun | [@ruanliang-hualun](https://github.com/ruanliang-hualun) | Contributor | 网易 |
|
||||
| 17hao | [@17hao](https://github.com/17hao) | Contributor | |
|
||||
| Huyueeer | [@Huyueeer](https://github.com/Huyueeer) | Contributor | INVENTEC |
|
||||
| lomodays207 | [@lomodays207](https://github.com/lomodays207) | Contributor | 建信金科 |
|
||||
| Super .Wein(星痕) | [@superspeedone](https://github.com/superspeedone) | Contributor | 韵达 |
|
||||
| Hongten | [@Hongten](https://github.com/Hongten) | Contributor | Shopee |
|
||||
| 徐正熙 | [@hyper-xx)](https://github.com/hyper-xx) | Contributor | 滴滴出行 |
|
||||
| RichardZhengkay | [@RichardZhengkay](https://github.com/RichardZhengkay) | Contributor | 趣街 |
|
||||
| 罐子里的茶 | [@gzldc](https://github.com/gzldc) | Contributor | 道富 |
|
||||
| 陈忠玉 | [@paula](https://github.com/chenzhongyu11) | Contributor | 平安产险 |
|
||||
| 杨光 | [@yaangvipguang](https://github.com/yangvipguang) | Contributor |
|
||||
| 王亚聪 | [@wangyacongi](https://github.com/wangyacongi) | Contributor |
|
||||
| Yang Jing | [@yangbajing](https://github.com/yangbajing) | Contributor | |
|
||||
| 刘新元 Liu XinYuan | [@Liu-XinYuan](https://github.com/Liu-XinYuan) | Contributor | |
|
||||
| Joker | [@LiubeyJokerQueue](https://github.com/JokerQueue) | Contributor | 丰巢 |
|
||||
| Eason Lau | [@Liubey](https://github.com/Liubey) | Contributor | |
|
||||
| hailanxin | [@hailanxin](https://github.com/hailanxin) | Contributor | |
|
||||
| Qi Zhang | [@zzzhangqi](https://github.com/zzzhangqi) | Contributor | 好雨科技 |
|
||||
| fengxsong | [@fengxsong](https://github.com/fengxsong) | Contributor | |
|
||||
| 谢晓东 | [@Strangevy](https://github.com/Strangevy) | Contributor | 花生日记 |
|
||||
| ZhaoXinlong | [@ZhaoXinlong](https://github.com/ZhaoXinlong) | Contributor | |
|
||||
| xuehaipeng | [@xuehaipeng](https://github.com/xuehaipeng) | Contributor | |
|
||||
| 孔令续 | [@mrazkong](https://github.com/mrazkong) | Contributor | |
|
||||
| pierre xiong | [@pierre94](https://github.com/pierre94) | Contributor | |
|
||||
| PengShuaixin | [@PengShuaixin](https://github.com/PengShuaixin) | Contributor | |
|
||||
| 梁壮 | [@lz](https://github.com/silent-night-no-trace) | Contributor | |
|
||||
| 张晓寅 | [@ahu0605](https://github.com/ahu0605) | Contributor | 电信数智 |
|
||||
| 黄海婷 | [@Huanghaiting](https://github.com/Huanghaiting) | Contributor | 云徙科技 |
|
||||
| 任祥德 | [@RenChauncy](https://github.com/RenChauncy) | Contributor | 探马企服 |
|
||||
| 胡圣林 | [@slhu997](https://github.com/slhu997) | Contributor | |
|
||||
| 史泽颖 | [@shizeying](https://github.com/shizeying) | Contributor | |
|
||||
| 王玉博 | [@Wyb7290](https://github.com/Wyb7290) | Committer | |
|
||||
| 伍璇 | [@Luckywustone](https://github.com/Luckywustone) | Contributor ||
|
||||
| 邓苑 | [@CatherineDY](https://github.com/CatherineDY) | Contributor ||
|
||||
| 封琼凤 | [@Luckywustone](https://github.com/fengqiongfeng) | Committer ||
|
||||
168
docs/contribute_guide/贡献指南.md
Normal file
@@ -0,0 +1,168 @@
|
||||
# 贡献指南
|
||||
|
||||
- [贡献指南](#贡献指南)
|
||||
- [1、行为准则](#1行为准则)
|
||||
- [2、仓库规范](#2仓库规范)
|
||||
- [2.1、Issue 规范](#21issue-规范)
|
||||
- [2.2、Commit-Log 规范](#22commit-log-规范)
|
||||
- [2.3、Pull-Request 规范](#23pull-request-规范)
|
||||
- [3、操作示例](#3操作示例)
|
||||
- [3.1、初始化环境](#31初始化环境)
|
||||
- [3.2、认领问题](#32认领问题)
|
||||
- [3.3、处理问题 \& 提交解决](#33处理问题--提交解决)
|
||||
- [3.4、请求合并](#34请求合并)
|
||||
- [4、常见问题](#4常见问题)
|
||||
- [4.1、如何将多个 Commit-Log 合并为一个?](#41如何将多个-commit-log-合并为一个)
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
欢迎 👏🏻 👏🏻 👏🏻 来到 `KnowStreaming`。本文档是关于如何为 `KnowStreaming` 做出贡献的指南。如果您发现不正确或遗漏的内容, 请留下您的意见/建议。
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## 1、行为准则
|
||||
|
||||
请务必阅读并遵守我们的:[行为准则](https://github.com/didi/KnowStreaming/blob/master/CODE_OF_CONDUCT.md)。
|
||||
|
||||
|
||||
## 2、仓库规范
|
||||
|
||||
### 2.1、Issue 规范
|
||||
|
||||
按要求,在 [创建Issue](https://github.com/didi/KnowStreaming/issues/new/choose) 中创建ISSUE即可。
|
||||
|
||||
需要重点说明的是:
|
||||
- 提供出现问题的环境信息,包括使用的系统,使用的KS版本等;
|
||||
- 提供出现问题的复现方式;
|
||||
|
||||
|
||||
### 2.2、Commit-Log 规范
|
||||
|
||||
`Commit-Log` 包含三部分 `Header`、`Body`、`Footer`。其中 `Header` 是必须的,格式固定,`Body` 在变更有必要详细解释时使用。
|
||||
|
||||
|
||||
**1、`Header` 规范**
|
||||
|
||||
`Header` 格式为 `[Type]Message`, 主要有三部分组成,分别是`Type`、`Message`,
|
||||
|
||||
- `Type`:说明这个提交是哪一个类型的,比如有 Bugfix、Feature、Optimize等;
|
||||
- `Message`:说明提交的信息,比如修复xx问题;
|
||||
|
||||
|
||||
实际例子:[`[Bugfix]修复新接入的集群,Controller-Host不显示的问题`](https://github.com/didi/KnowStreaming/pull/933/commits)
|
||||
|
||||
|
||||
|
||||
**2、`Body` 规范**
|
||||
|
||||
一般不需要,如果解决了较复杂问题,或者代码较多,需要 `Body` 说清楚解决的问题,解决的思路等信息。
|
||||
|
||||
---
|
||||
|
||||
**3、实际例子**
|
||||
|
||||
```
|
||||
[Optimize]优化 MySQL & ES 测试容器的初始化
|
||||
|
||||
主要的变更
|
||||
1、knowstreaming/knowstreaming-manager 容器;
|
||||
2、knowstreaming/knowstreaming-mysql 容器调整为使用 mysql:5.7 容器;
|
||||
3、初始化 mysql:5.7 容器后,增加初始化 MySQL 表及数据的动作;
|
||||
|
||||
被影响的变更:
|
||||
1、移动 km-dist/init/sql 下的MySQL初始化脚本至 km-persistence/src/main/resource/sql 下,以便项目测试时加载到所需的初始化 SQL;
|
||||
2、删除无用的 km-dist/init/template 目录;
|
||||
3、因为 km-dist/init/sql 和 km-dist/init/template 目录的调整,因此也调整 ReleaseKnowStreaming.xml 内的文件内容;
|
||||
```
|
||||
|
||||
|
||||
**TODO : 后续有兴趣的同学,可以考虑引入 Git 的 Hook 进行更好的 Commit-Log 的管理。**
|
||||
|
||||
|
||||
### 2.3、Pull-Request 规范
|
||||
|
||||
详细见:[PULL-REQUEST 模版](../../.github/PULL_REQUEST_TEMPLATE.md)
|
||||
|
||||
需要重点说明的是:
|
||||
|
||||
- <font color=red > 任何 PR 都必须与有效 ISSUE 相关联。否则, PR 将被拒绝;</font>
|
||||
- <font color=red> 一个分支只修改一件事,一个 PR 只修改一件事;</b></font>
|
||||
|
||||
---
|
||||
|
||||
|
||||
## 3、操作示例
|
||||
|
||||
本节主要介绍对 `KnowStreaming` 进行代码贡献时,相关的操作方式及操作命令。
|
||||
|
||||
名词说明:
|
||||
- 主仓库:https://github.com/didi/KnowStreaming 这个仓库为主仓库。
|
||||
- 分仓库:Fork 到自己账号下的 KnowStreaming 仓库为分仓库;
|
||||
|
||||
|
||||
### 3.1、初始化环境
|
||||
|
||||
1. `Fork KnowStreaming` 主仓库至自己账号下,见 https://github.com/didi/KnowStreaming 地址右上角的 `Fork` 按钮;
|
||||
2. 克隆分仓库至本地:`git clone git@github.com:xxxxxxx/KnowStreaming.git`,该仓库的简写名通常是`origin`;
|
||||
3. 添加主仓库至本地:`git remote add upstream https://github.com/didi/KnowStreaming`,`upstream`是主仓库在本地的简写名,可以随意命名,前后保持一致即可;
|
||||
4. 拉取主仓库代码:`git fetch upstream`;
|
||||
5. 拉取分仓库代码:`git fetch origin`;
|
||||
6. 将主仓库的`master`分支,拉取到本地并命名为`github_master`:`git checkout -b upstream/master`;
|
||||
|
||||
最后,我们来看一下初始化完成之后的大致效果,具体如下图所示:
|
||||

|
||||
|
||||
|
||||
至此,我们的环境就初始化好了。后续,`github_master` 分支就是主仓库的`master`分支,我们可以使用`git pull`拉取该分支的最新代码,还可以使用`git checkout -b xxx`拉取我们想要的分支。
|
||||
|
||||
|
||||
|
||||
### 3.2、认领问题
|
||||
|
||||
在文末评论说明自己要处理该问题即可,具体如下图所示:
|
||||
|
||||

|
||||
|
||||
|
||||
### 3.3、处理问题 & 提交解决
|
||||
|
||||
本节主要介绍一下处理问题 & 提交解决过程中的分支管理,具体如下图所示:
|
||||
|
||||

|
||||
|
||||
1. 切换到主分支:`git checkout github_master`;
|
||||
2. 主分支拉最新代码:`git pull`;
|
||||
3. 基于主分支拉新分支:`git checkout -b fix_928`;
|
||||
4. 提交代码,安装commit的规范进行提交,例如:`git commit -m "[Optimize]优化xxx问题"`;
|
||||
5. 提交到自己远端仓库:`git push --set-upstream origin fix_928`;
|
||||
6. `GitHub` 页面发起 `Pull Request` 请求,管理员合入主仓库。这部分详细见下一节;
|
||||
|
||||
|
||||
### 3.4、请求合并
|
||||
|
||||
代码在提交到 `GitHub` 分仓库之后,就可以在 `GitHub` 的网站创建 `Pull Request`,申请将代码合入主仓库了。 `Pull Request` 具体见下图所示:
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
[Pull Request 创建的例子](https://github.com/didi/KnowStreaming/pull/945)
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## 4、常见问题
|
||||
|
||||
### 4.1、如何将多个 Commit-Log 合并为一个?
|
||||
|
||||
可以不需要将多个commit合并为一个,如果要合并,可以使用 `git rebase -i` 命令进行解决。
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,575 +0,0 @@
|
||||
--
|
||||
-- Table structure for table `account`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `account`;
|
||||
CREATE TABLE `account` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`username` varchar(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '用户名',
|
||||
`password` varchar(128) NOT NULL DEFAULT '' COMMENT '密码',
|
||||
`role` tinyint(8) NOT NULL DEFAULT '0' COMMENT '角色类型, 0:普通用户 1:研发 2:运维',
|
||||
`status` int(16) NOT NULL DEFAULT '0' COMMENT '0标识使用中,-1标识已废弃',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_username` (`username`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='账号表';
|
||||
INSERT INTO account(username, password, role) VALUES ('admin', '21232f297a57a5a743894a0e4a801fc3', 2);
|
||||
|
||||
--
|
||||
-- Table structure for table `app`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `app`;
|
||||
CREATE TABLE `app` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`app_id` varchar(128) NOT NULL DEFAULT '' COMMENT '应用id',
|
||||
`name` varchar(192) NOT NULL DEFAULT '' COMMENT '应用名称',
|
||||
`password` varchar(256) NOT NULL DEFAULT '' COMMENT '应用密码',
|
||||
`type` int(11) NOT NULL DEFAULT '0' COMMENT '类型, 0:普通用户, 1:超级用户',
|
||||
`applicant` varchar(64) NOT NULL DEFAULT '' COMMENT '申请人',
|
||||
`principals` text COMMENT '应用负责人',
|
||||
`description` text COMMENT '应用描述',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_name` (`name`),
|
||||
UNIQUE KEY `uniq_app_id` (`app_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='应用信息';
|
||||
INSERT INTO app(app_id, name, password, type, applicant, principals, description) VALUES ('km-admin-tmp', 'km-admin-tmp', '123456', 0, 'admin', 'admin', '临时应用');
|
||||
|
||||
|
||||
--
|
||||
-- Table structure for table `authority`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `authority`;
|
||||
CREATE TABLE `authority` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`app_id` varchar(128) NOT NULL DEFAULT '' COMMENT '应用id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id',
|
||||
`topic_name` varchar(192) NOT NULL DEFAULT '' COMMENT 'topic名称',
|
||||
`access` int(11) NOT NULL DEFAULT '0' COMMENT '0:无权限, 1:读, 2:写, 3:读写',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_app_id_cluster_id_topic_name` (`app_id`,`cluster_id`,`topic_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='权限信息(kafka-manager)';
|
||||
|
||||
--
|
||||
-- Table structure for table `broker`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `broker`;
|
||||
CREATE TABLE `broker` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
|
||||
`broker_id` int(16) NOT NULL DEFAULT '-1' COMMENT 'brokerid',
|
||||
`host` varchar(128) NOT NULL DEFAULT '' COMMENT 'broker主机名',
|
||||
`port` int(16) NOT NULL DEFAULT '-1' COMMENT 'broker端口',
|
||||
`timestamp` bigint(20) NOT NULL DEFAULT '-1' COMMENT '启动时间',
|
||||
`max_avg_bytes_in` bigint(20) NOT NULL DEFAULT '-1' COMMENT '峰值的均值流量',
|
||||
`version` varchar(128) NOT NULL DEFAULT '' COMMENT 'broker版本',
|
||||
`status` int(16) NOT NULL DEFAULT '0' COMMENT '状态: 0有效,-1无效',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_id_broker_id` (`cluster_id`,`broker_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='broker信息表';
|
||||
|
||||
--
|
||||
-- Table structure for table `broker_metrics`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `broker_metrics`;
|
||||
CREATE TABLE `broker_metrics` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
|
||||
`broker_id` int(16) NOT NULL DEFAULT '-1' COMMENT 'brokerid',
|
||||
`metrics` text COMMENT '指标',
|
||||
`messages_in` double(53,2) NOT NULL DEFAULT '0.00' COMMENT '每秒消息数流入',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `idx_cluster_id_broker_id_gmt_create` (`cluster_id`,`broker_id`,`gmt_create`),
|
||||
KEY `idx_gmt_create` (`gmt_create`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='broker-metric信息表';
|
||||
|
||||
--
|
||||
-- Table structure for table `cluster`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `cluster`;
|
||||
CREATE TABLE `cluster` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '集群id',
|
||||
`cluster_name` varchar(128) NOT NULL DEFAULT '' COMMENT '集群名称',
|
||||
`zookeeper` varchar(512) NOT NULL DEFAULT '' COMMENT 'zk地址',
|
||||
`bootstrap_servers` varchar(512) NOT NULL DEFAULT '' COMMENT 'server地址',
|
||||
`kafka_version` varchar(32) NOT NULL DEFAULT '' COMMENT 'kafka版本',
|
||||
`security_properties` text COMMENT '安全认证参数',
|
||||
`status` tinyint(4) NOT NULL DEFAULT '1' COMMENT ' 监控标记, 0表示未监控, 1表示监控中',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_name` (`cluster_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='cluster信息表';
|
||||
|
||||
--
|
||||
-- Table structure for table `cluster_metrics`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `cluster_metrics`;
|
||||
CREATE TABLE `cluster_metrics` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id',
|
||||
`metrics` text COMMENT '指标',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `idx_cluster_id_gmt_create` (`cluster_id`,`gmt_create`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='clustermetrics信息';
|
||||
|
||||
--
|
||||
-- Table structure for table `cluster_tasks`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `cluster_tasks`;
|
||||
CREATE TABLE `cluster_tasks` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`uuid` varchar(128) NOT NULL DEFAULT '' COMMENT '任务UUID',
|
||||
`cluster_id` bigint(128) NOT NULL DEFAULT '-1' COMMENT '集群id',
|
||||
`task_type` varchar(128) NOT NULL DEFAULT '' COMMENT '任务类型',
|
||||
`kafka_package` text COMMENT 'kafka包',
|
||||
`kafka_package_md5` varchar(128) NOT NULL DEFAULT '' COMMENT 'kafka包的md5',
|
||||
`server_properties` text COMMENT 'kafkaserver配置',
|
||||
`server_properties_md5` varchar(128) NOT NULL DEFAULT '' COMMENT '配置文件的md5',
|
||||
`agent_task_id` bigint(128) NOT NULL DEFAULT '-1' COMMENT '任务id',
|
||||
`agent_rollback_task_id` bigint(128) NOT NULL DEFAULT '-1' COMMENT '回滚任务id',
|
||||
`host_list` text COMMENT '升级的主机',
|
||||
`pause_host_list` text COMMENT '暂停点',
|
||||
`rollback_host_list` text COMMENT '回滚机器列表',
|
||||
`rollback_pause_host_list` text COMMENT '回滚暂停机器列表',
|
||||
`operator` varchar(64) NOT NULL DEFAULT '' COMMENT '操作人',
|
||||
`task_status` int(11) NOT NULL DEFAULT '0' COMMENT '任务状态',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='集群任务(集群升级部署)';
|
||||
|
||||
--
|
||||
-- Table structure for table `config`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `config`;
|
||||
CREATE TABLE `config` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`config_key` varchar(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '配置key',
|
||||
`config_value` text COMMENT '配置value',
|
||||
`config_description` text COMMENT '备注说明',
|
||||
`status` int(16) NOT NULL DEFAULT '0' COMMENT '0标识使用中,-1标识已废弃',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_config_key` (`config_key`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='配置表';
|
||||
|
||||
--
|
||||
-- Table structure for table `controller`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `controller`;
|
||||
CREATE TABLE `controller` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
|
||||
`broker_id` int(16) NOT NULL DEFAULT '-1' COMMENT 'brokerid',
|
||||
`host` varchar(256) NOT NULL DEFAULT '' COMMENT '主机名',
|
||||
`timestamp` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'controller变更时间',
|
||||
`version` int(16) NOT NULL DEFAULT '-1' COMMENT 'controller格式版本',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_id_broker_id_timestamp` (`cluster_id`,`broker_id`,`timestamp`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='controller记录表';
|
||||
|
||||
--
|
||||
-- Table structure for table `gateway_config`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `gateway_config`;
|
||||
CREATE TABLE `gateway_config` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`type` varchar(128) NOT NULL DEFAULT '' COMMENT '配置类型',
|
||||
`name` varchar(128) NOT NULL DEFAULT '' COMMENT '配置名称',
|
||||
`value` text COMMENT '配置值',
|
||||
`version` bigint(20) unsigned NOT NULL DEFAULT '0' COMMENT '版本信息',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_type_name` (`type`,`name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='gateway配置';
|
||||
|
||||
--
|
||||
-- Table structure for table `heartbeat`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `heartbeat`;
|
||||
CREATE TABLE `heartbeat` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`ip` varchar(128) NOT NULL DEFAULT '' COMMENT '主机ip',
|
||||
`hostname` varchar(256) NOT NULL DEFAULT '' COMMENT '主机名',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_ip` (`ip`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='心跳信息';
|
||||
|
||||
--
|
||||
-- Table structure for table `kafka_acl`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `kafka_acl`;
|
||||
CREATE TABLE `kafka_acl` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`app_id` varchar(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '用户id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id',
|
||||
`topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称',
|
||||
`access` int(11) NOT NULL DEFAULT '0' COMMENT '0:无权限, 1:读, 2:写, 3:读写',
|
||||
`operation` int(11) NOT NULL DEFAULT '0' COMMENT '0:创建, 1:更新 2:删除, 以最新的一条数据为准',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='权限信息(kafka-broker)';
|
||||
|
||||
--
|
||||
-- Table structure for table `kafka_bill`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `kafka_bill`;
|
||||
CREATE TABLE `kafka_bill` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id',
|
||||
`topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称',
|
||||
`principal` varchar(64) NOT NULL DEFAULT '' COMMENT '负责人',
|
||||
`quota` double(53,2) NOT NULL DEFAULT '0.00' COMMENT '配额, 单位mb/s',
|
||||
`cost` double(53,2) NOT NULL DEFAULT '0.00' COMMENT '成本, 单位元',
|
||||
`cost_type` int(16) NOT NULL DEFAULT '0' COMMENT '成本类型, 0:共享集群, 1:独享集群, 2:独立集群',
|
||||
`gmt_day` varchar(64) NOT NULL DEFAULT '' COMMENT '计价的日期, 例如2019-02-02的计价结果',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_id_topic_name_gmt_day` (`cluster_id`,`topic_name`,`gmt_day`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='kafka账单';
|
||||
|
||||
--
|
||||
-- Table structure for table `kafka_file`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `kafka_file`;
|
||||
CREATE TABLE `kafka_file` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
|
||||
`storage_name` varchar(128) NOT NULL DEFAULT '' COMMENT '存储位置',
|
||||
`file_name` varchar(128) NOT NULL DEFAULT '' COMMENT '文件名',
|
||||
`file_md5` varchar(256) NOT NULL DEFAULT '' COMMENT '文件md5',
|
||||
`file_type` int(16) NOT NULL DEFAULT '-1' COMMENT '0:kafka压缩包, 1:kafkaserver配置',
|
||||
`description` text COMMENT '备注信息',
|
||||
`operator` varchar(64) NOT NULL DEFAULT '' COMMENT '创建用户',
|
||||
`status` int(16) NOT NULL DEFAULT '0' COMMENT '状态, 0:正常, -1:删除',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_id_file_name_storage_name` (`cluster_id`,`file_name`,`storage_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='文件管理';
|
||||
|
||||
--
|
||||
-- Table structure for table `kafka_user`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `kafka_user`;
|
||||
CREATE TABLE `kafka_user` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`app_id` varchar(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '应用id',
|
||||
`password` varchar(256) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '密码',
|
||||
`user_type` int(11) NOT NULL DEFAULT '0' COMMENT '0:普通用户, 1:超级用户',
|
||||
`operation` int(11) NOT NULL DEFAULT '0' COMMENT '0:创建, 1:更新 2:删除, 以最新一条的记录为准',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='kafka用户表';
|
||||
|
||||
--
|
||||
-- Table structure for table `logical_cluster`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `logical_cluster`;
|
||||
CREATE TABLE `logical_cluster` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`name` varchar(192) NOT NULL DEFAULT '' COMMENT '逻辑集群名称',
|
||||
`mode` int(16) NOT NULL DEFAULT '0' COMMENT '逻辑集群类型, 0:共享集群, 1:独享集群, 2:独立集群',
|
||||
`app_id` varchar(64) NOT NULL DEFAULT '' COMMENT '所属应用',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
|
||||
`region_list` varchar(256) NOT NULL DEFAULT '' COMMENT 'regionid列表',
|
||||
`description` text COMMENT '备注说明',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_name` (`name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='逻辑集群信息表';
|
||||
|
||||
--
|
||||
-- Table structure for table `monitor_rule`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `monitor_rule`;
|
||||
CREATE TABLE `monitor_rule` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`name` varchar(192) NOT NULL DEFAULT '' COMMENT '告警名称',
|
||||
`strategy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '监控id',
|
||||
`app_id` varchar(64) NOT NULL DEFAULT '' COMMENT 'appid',
|
||||
`operator` varchar(64) NOT NULL DEFAULT '' COMMENT '操作人',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_name` (`name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='监控规则';
|
||||
|
||||
--
|
||||
-- Table structure for table `operate_record`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `operate_record`;
|
||||
CREATE TABLE `operate_record` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`module_id` int(16) NOT NULL DEFAULT '-1' COMMENT '模块类型, 0:topic, 1:应用, 2:配额, 3:权限, 4:集群, -1:未知',
|
||||
`operate_id` int(16) NOT NULL DEFAULT '-1' COMMENT '操作类型, 0:新增, 1:删除, 2:修改',
|
||||
`resource` varchar(256) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称、app名称',
|
||||
`content` text COMMENT '操作内容',
|
||||
`operator` varchar(64) NOT NULL DEFAULT '' COMMENT '操作人',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `idx_module_id_operate_id_operator` (`module_id`,`operate_id`,`operator`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='操作记录';
|
||||
|
||||
--
|
||||
-- Table structure for table `reassign_task`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `reassign_task`;
|
||||
CREATE TABLE `reassign_task` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`task_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '任务ID',
|
||||
`name` varchar(256) NOT NULL DEFAULT '' COMMENT '任务名称',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id',
|
||||
`topic_name` varchar(192) NOT NULL DEFAULT '' COMMENT 'Topic名称',
|
||||
`partitions` text COMMENT '分区',
|
||||
`reassignment_json` text COMMENT '任务参数',
|
||||
`real_throttle` bigint(20) NOT NULL DEFAULT '0' COMMENT '限流值',
|
||||
`max_throttle` bigint(20) NOT NULL DEFAULT '0' COMMENT '限流上限',
|
||||
`min_throttle` bigint(20) NOT NULL DEFAULT '0' COMMENT '限流下限',
|
||||
`begin_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '开始时间',
|
||||
`operator` varchar(64) NOT NULL DEFAULT '' COMMENT '操作人',
|
||||
`description` varchar(256) NOT NULL DEFAULT '' COMMENT '备注说明',
|
||||
`status` int(16) NOT NULL DEFAULT '0' COMMENT '任务状态',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务创建时间',
|
||||
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '任务修改时间',
|
||||
`original_retention_time` bigint(20) NOT NULL DEFAULT '86400000' COMMENT 'Topic存储时间',
|
||||
`reassign_retention_time` bigint(20) NOT NULL DEFAULT '86400000' COMMENT '迁移时的存储时间',
|
||||
`src_brokers` text COMMENT '源Broker',
|
||||
`dest_brokers` text COMMENT '目标Broker',
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='topic迁移信息';
|
||||
|
||||
--
|
||||
-- Table structure for table `region`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `region`;
|
||||
CREATE TABLE `region` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`name` varchar(192) NOT NULL DEFAULT '' COMMENT 'region名称',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
|
||||
`broker_list` varchar(256) NOT NULL DEFAULT '' COMMENT 'broker列表',
|
||||
`capacity` bigint(20) NOT NULL DEFAULT '0' COMMENT '容量(B/s)',
|
||||
`real_used` bigint(20) NOT NULL DEFAULT '0' COMMENT '实际使用量(B/s)',
|
||||
`estimate_used` bigint(20) NOT NULL DEFAULT '0' COMMENT '预估使用量(B/s)',
|
||||
`description` text COMMENT '备注说明',
|
||||
`status` int(16) NOT NULL DEFAULT '0' COMMENT '状态,0正常,1已满',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_name` (`name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='region信息表';
|
||||
|
||||
--
|
||||
-- Table structure for table `topic`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `topic`;
|
||||
CREATE TABLE `topic` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
|
||||
`topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称',
|
||||
`app_id` varchar(64) NOT NULL DEFAULT '' COMMENT 'topic所属appid',
|
||||
`peak_bytes_in` bigint(20) NOT NULL DEFAULT '0' COMMENT '峰值流量',
|
||||
`description` text COMMENT '备注信息',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_id_topic_name` (`cluster_id`,`topic_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='topic信息表';
|
||||
|
||||
--
|
||||
-- Table structure for table `topic_app_metrics`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `topic_app_metrics`;
|
||||
CREATE TABLE `topic_app_metrics` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id',
|
||||
`topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称',
|
||||
`app_id` varchar(64) NOT NULL DEFAULT '' COMMENT 'appid',
|
||||
`metrics` text COMMENT '指标',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `idx_cluster_id_topic_name_app_id_gmt_create` (`cluster_id`,`topic_name`,`app_id`,`gmt_create`),
|
||||
KEY `idx_gmt_create` (`gmt_create`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='topic app metrics';
|
||||
|
||||
--
|
||||
-- Table structure for table `topic_connections`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `topic_connections`;
|
||||
CREATE TABLE `topic_connections` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`app_id` varchar(64) NOT NULL DEFAULT '' COMMENT '应用id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id',
|
||||
`topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称',
|
||||
`type` varchar(16) NOT NULL DEFAULT '' COMMENT 'producer or consumer',
|
||||
`ip` varchar(32) NOT NULL DEFAULT '' COMMENT 'ip地址',
|
||||
`client_version` varchar(8) NOT NULL DEFAULT '' COMMENT '客户端版本',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_app_id_cluster_id_topic_name_type_ip_client_version` (`app_id`,`cluster_id`,`topic_name`,`type`,`ip`,`client_version`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='topic连接信息表';
|
||||
|
||||
--
|
||||
-- Table structure for table `topic_expired`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `topic_expired`;
|
||||
CREATE TABLE `topic_expired` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
|
||||
`topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称',
|
||||
`produce_connection_num` bigint(20) NOT NULL DEFAULT '0' COMMENT '发送连接数',
|
||||
`fetch_connection_num` bigint(20) NOT NULL DEFAULT '0' COMMENT '消费连接数',
|
||||
`expired_day` bigint(20) NOT NULL DEFAULT '0' COMMENT '过期天数',
|
||||
`gmt_retain` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '保留截止时间',
|
||||
`status` int(16) NOT NULL DEFAULT '0' COMMENT '-1:可下线, 0:过期待通知, 1+:已通知待反馈',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_id_topic_name` (`cluster_id`,`topic_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='topic过期信息表';
|
||||
|
||||
--
|
||||
-- Table structure for table `topic_metrics`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `topic_metrics`;
|
||||
CREATE TABLE `topic_metrics` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
|
||||
`topic_name` varchar(192) NOT NULL DEFAULT '' COMMENT 'topic名称',
|
||||
`metrics` text COMMENT '指标数据JSON',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `idx_cluster_id_topic_name_gmt_create` (`cluster_id`,`topic_name`,`gmt_create`),
|
||||
KEY `idx_gmt_create` (`gmt_create`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='topicmetrics表';
|
||||
|
||||
--
|
||||
-- Table structure for table `topic_report`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `topic_report`;
|
||||
CREATE TABLE `topic_report` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id',
|
||||
`topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称',
|
||||
`start_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '开始上报时间',
|
||||
`end_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '结束上报时间',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_id_topic_name` (`cluster_id`,`topic_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='开启jmx采集的topic';
|
||||
|
||||
--
|
||||
-- Table structure for table `topic_request_time_metrics`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `topic_request_time_metrics`;
|
||||
CREATE TABLE `topic_request_time_metrics` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
|
||||
`topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称',
|
||||
`metrics` text COMMENT '指标',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `idx_cluster_id_topic_name_gmt_create` (`cluster_id`,`topic_name`,`gmt_create`),
|
||||
KEY `idx_gmt_create` (`gmt_create`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='topic请求耗时信息';
|
||||
|
||||
--
|
||||
-- Table structure for table `topic_statistics`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `topic_statistics`;
|
||||
CREATE TABLE `topic_statistics` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
|
||||
`topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称',
|
||||
`offset_sum` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'offset和',
|
||||
`max_avg_bytes_in` double(53,2) NOT NULL DEFAULT '-1.00' COMMENT '峰值的均值流量',
|
||||
`gmt_day` varchar(64) NOT NULL DEFAULT '' COMMENT '日期2020-03-30的形式',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`max_avg_messages_in` double(53,2) NOT NULL DEFAULT '-1.00' COMMENT '峰值的均值消息条数',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_id_topic_name_gmt_day` (`cluster_id`,`topic_name`,`gmt_day`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='topic统计信息表';
|
||||
|
||||
--
|
||||
-- Table structure for table `topic_throttled_metrics`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `topic_throttled_metrics`;
|
||||
CREATE TABLE `topic_throttled_metrics` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
|
||||
`topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic name',
|
||||
`app_id` varchar(64) NOT NULL DEFAULT '' COMMENT 'app',
|
||||
`produce_throttled` tinyint(8) NOT NULL DEFAULT '0' COMMENT '是否是生产耗时',
|
||||
`fetch_throttled` tinyint(8) NOT NULL DEFAULT '0' COMMENT '是否是消费耗时',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `idx_cluster_id_topic_name_app_id` (`cluster_id`,`topic_name`,`app_id`),
|
||||
KEY `idx_gmt_create` (`gmt_create`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='topic限流信息';
|
||||
|
||||
--
|
||||
-- Table structure for table `work_order`
|
||||
--
|
||||
|
||||
-- DROP TABLE IF EXISTS `work_order`;
|
||||
CREATE TABLE `work_order` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`type` int(16) NOT NULL DEFAULT '-1' COMMENT '工单类型',
|
||||
`title` varchar(512) NOT NULL DEFAULT '' COMMENT '工单标题',
|
||||
`applicant` varchar(64) NOT NULL DEFAULT '' COMMENT '申请人',
|
||||
`description` text COMMENT '备注信息',
|
||||
`approver` varchar(64) NOT NULL DEFAULT '' COMMENT '审批人',
|
||||
`gmt_handle` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '审批时间',
|
||||
`opinion` varchar(256) NOT NULL DEFAULT '' COMMENT '审批信息',
|
||||
`extensions` text COMMENT '扩展信息',
|
||||
`status` int(16) NOT NULL DEFAULT '0' COMMENT '工单状态, 0:待审批, 1:通过, 2:拒绝, 3:取消',
|
||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='工单表';
|
||||
264
docs/dev_guide/Task模块简介.md
Normal file
@@ -0,0 +1,264 @@
|
||||
# Task模块简介
|
||||
|
||||
## 1、Task简介
|
||||
|
||||
在 KnowStreaming 中(下面简称KS),Task模块主要是用于执行一些周期任务,包括Cluster、Broker、Topic等指标的定时采集,集群元数据定时更新至DB,集群状态的健康巡检等。在KS中,与Task模块相关的代码,我们都统一存放在km-task模块中。
|
||||
|
||||
Task模块是基于 LogiCommon 中的Logi-Job组件实现的任务周期执行,Logi-Job 的功能类似 XXX-Job,它是 XXX-Job 在 KnowStreaming 的内嵌实现,主要用于简化 KnowStreaming 的部署。
|
||||
Logi-Job 的任务总共有两种执行模式,分别是:
|
||||
|
||||
+ 广播模式:同一KS集群下,同一任务周期中,所有KS主机都会执行该定时任务。
|
||||
+ 抢占模式:同一KS集群下,同一任务周期中,仅有某一台KS主机会执行该任务。
|
||||
|
||||
KS集群范围定义:连接同一个DB,且application.yml中的spring.logi-job.app-name的名称一样的KS主机为同一KS集群。
|
||||
|
||||
## 2、使用指南
|
||||
|
||||
Task模块基于Logi-Job的广播模式与抢占模式,分别实现了任务的抢占执行、重复执行以及均衡执行,他们之间的差别是:
|
||||
|
||||
+ 抢占执行:同一个KS集群,同一个任务执行周期中,仅有一台KS主机执行该任务;
|
||||
+ 重复执行:同一个KS集群,同一个任务执行周期中,所有KS主机都执行该任务。比如3台KS主机,3个Kafka集群,此时每台KS主机都会去采集这3个Kafka集群的指标;
|
||||
+ 均衡执行:同一个KS集群,同一个任务执行周期中,每台KS主机仅执行该任务的一部分,所有的KS主机共同协作完成了任务。比如3台KS主机,3个Kafka集群,稳定运行情况下,每台KS主机将仅采集1个Kafka集群的指标,3台KS主机共同完成3个Kafka集群指标的采集。
|
||||
|
||||
下面我们看一下具体例子。
|
||||
|
||||
### 2.1、抢占模式——抢占执行
|
||||
|
||||
功能说明:
|
||||
|
||||
+ 同一个KS集群,同一个任务执行周期中,仅有一台KS主机执行该任务。
|
||||
|
||||
代码例子:
|
||||
|
||||
```java
|
||||
// 1、实现Job接口,重写excute方法;
|
||||
// 2、在类上添加@Task注解,并且配置好信息,指定为随机抢占模式;
|
||||
// 效果:KS集群中,每5秒,会有一台KS主机输出 "测试定时任务运行中";
|
||||
@Task(name = "TestJob",
|
||||
description = "测试定时任务",
|
||||
cron = "*/5 * * * * ?",
|
||||
autoRegister = true,
|
||||
consensual = ConsensualEnum.RANDOM, // 这里一定要设置为RANDOM
|
||||
timeout = 6 * 60)
|
||||
public class TestJob implements Job {
|
||||
|
||||
@Override
|
||||
public TaskResult execute(JobContext jobContext) throws Exception {
|
||||
|
||||
System.out.println("测试定时任务运行中");
|
||||
return new TaskResult();
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
### 2.2、广播模式——重复执行
|
||||
|
||||
功能说明:
|
||||
|
||||
+ 同一个KS集群,同一个任务执行周期中,所有KS主机都执行该任务。比如3台KS主机,3个Kafka集群,此时每台KS主机都会去重复采集这3个Kafka集群的指标。
|
||||
|
||||
代码例子:
|
||||
|
||||
```java
|
||||
// 1、实现Job接口,重写excute方法;
|
||||
// 2、在类上添加@Task注解,并且配置好信息,指定为广播抢占模式;
|
||||
// 效果:KS集群中,每5秒,每台KS主机都会输出 "测试定时任务运行中";
|
||||
@Task(name = "TestJob",
|
||||
description = "测试定时任务",
|
||||
cron = "*/5 * * * * ?",
|
||||
autoRegister = true,
|
||||
consensual = ConsensualEnum.BROADCAST, // 这里一定要设置为BROADCAST
|
||||
timeout = 6 * 60)
|
||||
public class TestJob implements Job {
|
||||
|
||||
@Override
|
||||
public TaskResult execute(JobContext jobContext) throws Exception {
|
||||
|
||||
System.out.println("测试定时任务运行中");
|
||||
return new TaskResult();
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
### 2.3、广播模式——均衡执行
|
||||
|
||||
功能说明:
|
||||
|
||||
+ 同一个KS集群,同一个任务执行周期中,每台KS主机仅执行该任务的一部分,所有的KS主机共同协作完成了任务。比如3台KS主机,3个Kafka集群,稳定运行情况下,每台KS主机将仅采集1个Kafka集群的指标,3台KS主机共同完成3个Kafka集群指标的采集。
|
||||
|
||||
代码例子:
|
||||
|
||||
+ 该模式有点特殊,是KS基于Logi-Job的广播模式,做的一个扩展,以下为一个使用例子:
|
||||
|
||||
```java
|
||||
// 1、继承AbstractClusterPhyDispatchTask,实现processSubTask方法;
|
||||
// 2、在类上添加@Task注解,并且配置好信息,指定为广播模式;
|
||||
// 效果:在本样例中,每隔1分钟ks会将所有的kafka集群列表在ks集群主机内均衡拆分,每台主机会将分发到自身的Kafka集群依次执行processSubTask方法,实现KS集群的任务协同处理。
|
||||
@Task(name = "kmJobTask",
|
||||
description = "km job 模块调度执行任务",
|
||||
cron = "0 0/1 * * * ? *",
|
||||
autoRegister = true,
|
||||
consensual = ConsensualEnum.BROADCAST,
|
||||
timeout = 6 * 60)
|
||||
public class KMJobTask extends AbstractClusterPhyDispatchTask {
|
||||
|
||||
@Autowired
|
||||
private JobService jobService;
|
||||
|
||||
@Override
|
||||
protected TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception {
|
||||
jobService.scheduleJobByClusterId(clusterPhy.getId());
|
||||
return TaskResult.SUCCESS;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
## 3、原理简介
|
||||
|
||||
### 3.1、Task注解说明
|
||||
|
||||
```java
|
||||
public @interface Task {
|
||||
String name() default ""; //任务名称
|
||||
String description() default ""; //任务描述
|
||||
String owner() default "system"; //拥有者
|
||||
String cron() default ""; //定时执行的时间策略
|
||||
int retryTimes() default 0; //失败以后所能重试的最大次数
|
||||
long timeout() default 0; //在超时时间里重试
|
||||
//是否自动注册任务到数据库中
|
||||
//如果设置为false,需要手动去数据库km_task表注册定时任务信息。数据库记录和@Task注解缺一不可
|
||||
boolean autoRegister() default false;
|
||||
//执行模式:广播、随机抢占
|
||||
//广播模式:同一集群下的所有服务器都会执行该定时任务
|
||||
//随机抢占模式:同一集群下随机一台服务器执行该任务
|
||||
ConsensualEnum consensual() default ConsensualEnum.RANDOM;
|
||||
}
|
||||
```
|
||||
|
||||
### 3.2、数据库表介绍
|
||||
|
||||
+ logi_task:记录项目中的定时任务信息,一个定时任务对应一条记录。
|
||||
+ logi_job:具体任务执行信息。
|
||||
+ logi_job_log:定时任务的执行日志。
|
||||
+ logi_worker:记录机器信息,实现集群控制。
|
||||
|
||||
### 3.3、均衡执行简介
|
||||
|
||||
#### 3.3.1、类关系图
|
||||
|
||||
这里以KMJobTask为例,简单介绍KM中的定时任务实现逻辑。
|
||||
|
||||

|
||||
|
||||
+ Job:使用logi组件实现定时任务,必须实现该接口。
|
||||
+ Comparable & EntufyIdInterface:比较接口,实现任务的排序逻辑。
|
||||
+ AbstractDispatchTask:实现广播模式下,任务的均衡分发。
|
||||
+ AbstractClusterPhyDispatchTask:对分发到当前服务器的集群列表进行枚举。
|
||||
+ KMJobTask:实现对单个集群的定时任务处理。
|
||||
|
||||
#### 3.3.2、关键类代码
|
||||
|
||||
+ **AbstractDispatchTask类**
|
||||
|
||||
```java
|
||||
// 实现Job接口的抽象类,进行任务的负载均衡执行
|
||||
public abstract class AbstractDispatchTask<E extends Comparable & EntifyIdInterface> implements Job {
|
||||
|
||||
// 罗列所有的任务
|
||||
protected abstract List<E> listAllTasks();
|
||||
|
||||
// 执行被分配给该KS主机的任务
|
||||
protected abstract TaskResult processTask(List<E> subTaskList, long triggerTimeUnitMs);
|
||||
|
||||
// 被Logi-Job触发执行该方法
|
||||
// 该方法进行任务的分配
|
||||
@Override
|
||||
public TaskResult execute(JobContext jobContext) {
|
||||
try {
|
||||
|
||||
long triggerTimeUnitMs = System.currentTimeMillis();
|
||||
|
||||
// 获取所有的任务
|
||||
List<E> allTaskList = this.listAllTasks();
|
||||
|
||||
// 计算当前KS机器需要执行的任务
|
||||
List<E> subTaskList = this.selectTask(allTaskList, jobContext.getAllWorkerCodes(), jobContext.getCurrentWorkerCode());
|
||||
|
||||
// 进行任务处理
|
||||
return this.processTask(subTaskList, triggerTimeUnitMs);
|
||||
} catch (Exception e) {
|
||||
// ...
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
+ **AbstractClusterPhyDispatchTask类**
|
||||
|
||||
```java
|
||||
// 继承AbstractDispatchTask的抽象类,对Kafka集群进行负载均衡执行
|
||||
public abstract class AbstractClusterPhyDispatchTask extends AbstractDispatchTask<ClusterPhy> {
|
||||
|
||||
// 执行被分配的任务,具体由子类实现
|
||||
protected abstract TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception;
|
||||
|
||||
// 返回所有的Kafka集群
|
||||
@Override
|
||||
public List<ClusterPhy> listAllTasks() {
|
||||
return clusterPhyService.listAllClusters();
|
||||
}
|
||||
|
||||
// 执行被分配给该KS主机的Kafka集群任务
|
||||
@Override
|
||||
public TaskResult processTask(List<ClusterPhy> subTaskList, long triggerTimeUnitMs) { // ... }
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
+ **KMJobTask类**
|
||||
|
||||
```java
|
||||
// 加上@Task注解,并配置任务执行信息
|
||||
@Task(name = "kmJobTask",
|
||||
description = "km job 模块调度执行任务",
|
||||
cron = "0 0/1 * * * ? *",
|
||||
autoRegister = true,
|
||||
consensual = ConsensualEnum.BROADCAST,
|
||||
timeout = 6 * 60)
|
||||
// 继承AbstractClusterPhyDispatchTask类
|
||||
public class KMJobTask extends AbstractClusterPhyDispatchTask {
|
||||
|
||||
@Autowired
|
||||
private JobService jobService;
|
||||
|
||||
// 执行该Kafka集群的Job模块的任务
|
||||
@Override
|
||||
protected TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception {
|
||||
jobService.scheduleJobByClusterId(clusterPhy.getId());
|
||||
return TaskResult.SUCCESS;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### 3.3.3、均衡执行总结
|
||||
|
||||
均衡执行的实现原理总结起来就是以下几点:
|
||||
|
||||
+ Logi-Job设置为广播模式,触发所有的KS主机执行任务;
|
||||
+ 每台KS主机,被触发执行后,按照统一的规则,对任务列表,KS集群主机列表进行排序。然后按照顺序将任务列表均衡的分配给排序后的KS集群主机。KS集群稳定运行情况下,这一步保证了每台KS主机之间分配到的任务列表不重复,不丢失。
|
||||
+ 最后每台KS主机,执行被分配到的任务。
|
||||
|
||||
## 4、注意事项
|
||||
|
||||
+ 不能100%保证任务在一个周期内,且仅且执行一次,可能出现重复执行或丢失的情况,所以必须严格是且仅且执行一次的任务,不建议基于Logi-Job进行任务控制。
|
||||
+ 尽量让Logi-Job仅负责任务的触发,后续的执行建议放到自己创建的线程池中进行。
|
||||
BIN
docs/dev_guide/assets/support_kerberos_zk/need_modify_code.png
Normal file
|
After Width: | Height: | Size: 63 KiB |
BIN
docs/dev_guide/assets/support_kerberos_zk/success_1.png
Normal file
|
After Width: | Height: | Size: 306 KiB |
BIN
docs/dev_guide/assets/support_kerberos_zk/success_2.png
Normal file
|
After Width: | Height: | Size: 306 KiB |
BIN
docs/dev_guide/assets/support_kerberos_zk/watch_user_acl.png
Normal file
|
After Width: | Height: | Size: 17 KiB |
43
docs/dev_guide/多版本兼容方案.md
Normal file
@@ -0,0 +1,43 @@
|
||||
|
||||
## 4.2、Kafka 多版本兼容方案
|
||||
|
||||
  当前 KnowStreaming 支持纳管多个版本的 kafka 集群,由于不同版本的 kafka 在指标采集、接口查询、行为操作上有些不一致,因此 KnowStreaming 需要一套机制来解决多 kafka 版本的纳管兼容性问题。
|
||||
|
||||
### 4.2.1、整体思路
|
||||
|
||||
  由于需要纳管多个 kafka 版本,而且未来还可能会纳管非 kafka 官方的版本,kafka 的版本号会存在着多种情况,所以首先要明确一个核心思想:KnowStreaming 提供尽可能多的纳管能力,但是不提供无限的纳管能力,每一个版本的 KnowStreaming 只纳管其自身声明的 kafka 版本,后续随着 KnowStreaming 自身版本的迭代,会逐步支持更多 kafka 版本的纳管接入。
|
||||
|
||||
### 4.2.2、构建版本兼容列表
|
||||
|
||||
  每一个版本的 KnowStreaming 都声明一个自身支持纳管的 kafka 版本列表,并且对 kafka 的版本号进行归一化处理,后续所有 KnowStreaming 对不同 kafka 集群的操作都和这个集群对应的版本号严格相关。
|
||||
|
||||
  KnowStreaming 对外提供自身所支持的 kafka 版本兼容列表,用以声明自身支持的版本范围。
|
||||
|
||||
  对于在集群接入过程中,如果希望接入当前 KnowStreaming 不支持的 kafka 版本的集群,KnowStreaming 建议在于的过程中选择相近的版本号接入。
|
||||
|
||||
### 4.2.3、构建版本兼容性字典
|
||||
|
||||
  在构建了 KnowStreaming 支持的 kafka 版本列表的基础上,KnowStreaming 在实现过程中,还会声明自身支持的所有兼容性,构建兼容性字典。
|
||||
|
||||
  当前 KnowStreaming 支持的 kafka 版本兼容性字典包括三个维度:
|
||||
|
||||
- 指标采集:同一个指标在不同 kafka 版本下可能获取的方式不一样,不同版本的 kafka 可能会有不同的指标,因此对于指标采集的处理需要构建兼容性字典。
|
||||
- kafka api:同一个 kafka 的操作处理的方式在不同 kafka 版本下可能存在不一致,如:topic 的创建,因此 KnowStreaming 针对不同 kafka-api 的处理需要构建兼容性字典。
|
||||
- 平台操作:KnowStreaming 在接入不同版本的 kafka 集群的时候,在平台页面上会根据不同的 kafka 版。
|
||||
|
||||
兼容性字典的核心设计字段如下:
|
||||
|
||||
| 兼容性维度 | 兼容项名称 | 最小 Kafka 版本号(归一化) | 最大 Kafka 版本号(归一化) | 处理器 |
|
||||
| ---------- | ---------- | --------------------------- | --------------------------- | ------ |
|
||||
|
||||
KS-KM 根据其需要纳管的 kafka 版本,按照上述三个维度构建了完善了兼容性字典。
|
||||
|
||||
### 4.2.4、兼容性问题
|
||||
|
||||
  KS-KM 的每个版本针对需要纳管的 kafka 版本列表,事先分析各个版本的差异性和产品需求,同时 KS-KM 构建了一套专门处理兼容性的服务,来进行兼容性的注册、字典构建、处理器分发等操作,其中版本兼容性处理器是来具体处理不同 kafka 版本差异性的地方。
|
||||
|
||||

|
||||
|
||||
  如上图所示,KS-KM 的 topic 服务在面对不同 kafka 版本时,其 topic 的创建、删除、扩容由于 kafka 版本自身的差异,导致 KnowStreaming 的处理也不一样,所以需要根据不同的 kafka 版本来实现不同的兼容性处理器,同时向 KnowStreaming 的兼容服务进行兼容性的注册,构建兼容性字典,后续在 KnowStreaming 的运行过程中,针对不同的 kafka 版本即可分发到不同的处理器中执行。
|
||||
|
||||
  后续随着 KnowStreaming 产品的发展,如果有新的兼容性的地方需要增加,只需要实现新版本的处理器,增加注册项即可。
|
||||
152
docs/dev_guide/指标说明.md
Normal file
@@ -0,0 +1,152 @@
|
||||
## 3.3、指标说明
|
||||
|
||||
当前 KnowStreaming 支持针对 kafka 集群的多维度指标的采集和展示,同时也支持多个 kafka 版本的指标进行兼容,以下是 KnowStreaming 支持的指标说明。
|
||||
|
||||
现在对当前 KnowStreaming 支持的指标从指标名称、指标单位、指标说明、kafka 版本、企业/开源版指标 五个维度进行说明。
|
||||
|
||||
### 3.3.1、Cluster 指标
|
||||
|
||||
| 指标名称 | 指标单位 | 指标含义 | kafka 版本 | 企业/开源版指标 |
|
||||
| ------------------------- | -------- |--------------------------------| ---------------- | --------------- |
|
||||
| HealthScore | 分 | 集群总体的健康分 | 全部版本 | 开源版 |
|
||||
| HealthCheckPassed | 个 | 集群总体健康检查通过数 | 全部版本 | 开源版 |
|
||||
| HealthCheckTotal | 个 | 集群总体健康检查总数 | 全部版本 | 开源版 |
|
||||
| HealthScore_Topics | 分 | 集群 Topics 的健康分 | 全部版本 | 开源版 |
|
||||
| HealthCheckPassed_Topics | 个 | 集群 Topics 健康检查通过数 | 全部版本 | 开源版 |
|
||||
| HealthCheckTotal_Topics | 个 | 集群 Topics 健康检查总数 | 全部版本 | 开源版 |
|
||||
| HealthScore_Brokers | 分 | 集群 Brokers 的健康分 | 全部版本 | 开源版 |
|
||||
| HealthCheckPassed_Brokers | 个 | 集群 Brokers 健康检查通过数 | 全部版本 | 开源版 |
|
||||
| HealthCheckTotal_Brokers | 个 | 集群 Brokers 健康检查总数 | 全部版本 | 开源版 |
|
||||
| HealthScore_Groups | 分 | 集群 Groups 的健康分 | 全部版本 | 开源版 |
|
||||
| HealthCheckPassed_Groups | 个 | 集群 Groups 健康检查总数 | 全部版本 | 开源版 |
|
||||
| HealthCheckTotal_Groups | 个 | 集群 Groups 健康检查总数 | 全部版本 | 开源版 |
|
||||
| HealthScore_Cluster | 分 | 集群自身的健康分 | 全部版本 | 开源版 |
|
||||
| HealthCheckPassed_Cluster | 个 | 集群自身健康检查通过数 | 全部版本 | 开源版 |
|
||||
| HealthCheckTotal_Cluster | 个 | 集群自身健康检查总数 | 全部版本 | 开源版 |
|
||||
| TotalRequestQueueSize | 个 | 集群中总的请求队列数 | 全部版本 | 开源版 |
|
||||
| TotalResponseQueueSize | 个 | 集群中总的响应队列数 | 全部版本 | 开源版 |
|
||||
| EventQueueSize | 个 | 集群中 Controller 的 EventQueue 大小 | 2.0.0 及以上版本 | 开源版 |
|
||||
| ActiveControllerCount | 个 | 集群中存活的 Controller 数 | 全部版本 | 开源版 |
|
||||
| TotalProduceRequests | 个 | 集群中的 Produce 每秒请求数 | 全部版本 | 开源版 |
|
||||
| TotalLogSize | byte | 集群总的已使用的磁盘大小 | 全部版本 | 开源版 |
|
||||
| ConnectionsCount | 个 | 集群的连接(Connections)个数 | 全部版本 | 开源版 |
|
||||
| Zookeepers | 个 | 集群中存活的 zk 节点个数 | 全部版本 | 开源版 |
|
||||
| ZookeepersAvailable | 是/否 | ZK 地址是否合法 | 全部版本 | 开源版 |
|
||||
| Brokers | 个 | 集群的 broker 的总数 | 全部版本 | 开源版 |
|
||||
| BrokersAlive | 个 | 集群的 broker 的存活数 | 全部版本 | 开源版 |
|
||||
| BrokersNotAlive | 个 | 集群的 broker 的未存活数 | 全部版本 | 开源版 |
|
||||
| Replicas | 个 | 集群中 Replica 的总数 | 全部版本 | 开源版 |
|
||||
| Topics | 个 | 集群中 Topic 的总数 | 全部版本 | 开源版 |
|
||||
| Partitions | 个 | 集群的 Partitions 总数 | 全部版本 | 开源版 |
|
||||
| PartitionNoLeader | 个 | 集群中的 PartitionNoLeader 总数 | 全部版本 | 开源版 |
|
||||
| PartitionMinISR_S | 个 | 集群中的小于 PartitionMinISR 总数 | 全部版本 | 开源版 |
|
||||
| PartitionMinISR_E | 个 | 集群中的等于 PartitionMinISR 总数 | 全部版本 | 开源版 |
|
||||
| PartitionURP | 个 | 集群中的未同步的 Partition 总数 | 全部版本 | 开源版 |
|
||||
| MessagesIn | 条/s | 集群每秒消息写入条数 | 全部版本 | 开源版 |
|
||||
| Messages | 条 | 集群总的消息条数 | 全部版本 | 开源版 |
|
||||
| LeaderMessages | 条 | 集群中 leader 总的消息条数 | 全部版本 | 开源版 |
|
||||
| BytesIn | byte/s | 集群的每秒写入字节数 | 全部版本 | 开源版 |
|
||||
| BytesIn_min_5 | byte/s | 集群的每秒写入字节数,5 分钟均值 | 全部版本 | 开源版 |
|
||||
| BytesIn_min_15 | byte/s | 集群的每秒写入字节数,15 分钟均值 | 全部版本 | 开源版 |
|
||||
| BytesOut | byte/s | 集群的每秒流出字节数 | 全部版本 | 开源版 |
|
||||
| BytesOut_min_5 | byte/s | 集群的每秒流出字节数,5 分钟均值 | 全部版本 | 开源版 |
|
||||
| BytesOut_min_15 | byte/s | 集群的每秒流出字节数,15 分钟均值 | 全部版本 | 开源版 |
|
||||
| Groups | 个 | 集群中 Group 的总数 | 全部版本 | 开源版 |
|
||||
| GroupActives | 个 | 集群中 ActiveGroup 的总数 | 全部版本 | 开源版 |
|
||||
| GroupEmptys | 个 | 集群中 EmptyGroup 的总数 | 全部版本 | 开源版 |
|
||||
| GroupRebalances | 个 | 集群中 RebalanceGroup 的总数 | 全部版本 | 开源版 |
|
||||
| GroupDeads | 个 | 集群中 DeadGroup 的总数 | 全部版本 | 开源版 |
|
||||
| Alive | 是/否 | 集群是否存活,1:存活;0:没有存活 | 全部版本 | 开源版 |
|
||||
| AclEnable | 是/否 | 集群是否开启 Acl,1:是;0:否 | 全部版本 | 开源版 |
|
||||
| Acls | 个 | ACL 数 | 全部版本 | 开源版 |
|
||||
| AclUsers | 个 | ACL-KafkaUser 数 | 全部版本 | 开源版 |
|
||||
| AclTopics | 个 | ACL-Topic 数 | 全部版本 | 开源版 |
|
||||
| AclGroups | 个 | ACL-Group 数 | 全部版本 | 开源版 |
|
||||
| Jobs | 个 | 集群任务总数 | 全部版本 | 开源版 |
|
||||
| JobsRunning | 个 | 集群 running 任务总数 | 全部版本 | 开源版 |
|
||||
| JobsWaiting | 个 | 集群 waiting 任务总数 | 全部版本 | 开源版 |
|
||||
| JobsSuccess | 个 | 集群 success 任务总数 | 全部版本 | 开源版 |
|
||||
| JobsFailed | 个 | 集群 failed 任务总数 | 全部版本 | 开源版 |
|
||||
| LoadReBalanceEnable | 是/否 | 是否开启均衡, 1:是;0:否 | 全部版本 | 企业版 |
|
||||
| LoadReBalanceCpu | 是/否 | CPU 是否均衡, 1:是;0:否 | 全部版本 | 企业版 |
|
||||
| LoadReBalanceNwIn | 是/否 | BytesIn 是否均衡, 1:是;0:否 | 全部版本 | 企业版 |
|
||||
| LoadReBalanceNwOut | 是/否 | BytesOut 是否均衡, 1:是;0:否 | 全部版本 | 企业版 |
|
||||
| LoadReBalanceDisk | 是/否 | Disk 是否均衡, 1:是;0:否 | 全部版本 | 企业版 |
|
||||
|
||||
### 3.3.2、Broker 指标
|
||||
|
||||
| 指标名称 | 指标单位 | 指标含义 | kafka 版本 | 企业/开源版指标 |
|
||||
| ----------------------- | -------- | ------------------------------------- | ---------- | --------------- |
|
||||
| HealthScore | 分 | Broker 健康分 | 全部版本 | 开源版 |
|
||||
| HealthCheckPassed | 个 | Broker 健康检查通过数 | 全部版本 | 开源版 |
|
||||
| HealthCheckTotal | 个 | Broker 健康检查总数 | 全部版本 | 开源版 |
|
||||
| TotalRequestQueueSize | 个 | Broker 的请求队列大小 | 全部版本 | 开源版 |
|
||||
| TotalResponseQueueSize | 个 | Broker 的应答队列大小 | 全部版本 | 开源版 |
|
||||
| ReplicationBytesIn | byte/s | Broker 的副本流入流量 | 全部版本 | 开源版 |
|
||||
| ReplicationBytesOut | byte/s | Broker 的副本流出流量 | 全部版本 | 开源版 |
|
||||
| MessagesIn | 条/s | Broker 的每秒消息流入条数 | 全部版本 | 开源版 |
|
||||
| TotalProduceRequests | 个/s | Broker 上 Produce 的每秒请求数 | 全部版本 | 开源版 |
|
||||
| NetworkProcessorAvgIdle | % | Broker 的网络处理器的空闲百分比 | 全部版本 | 开源版 |
|
||||
| RequestHandlerAvgIdle | % | Broker 上请求处理器的空闲百分比 | 全部版本 | 开源版 |
|
||||
| PartitionURP | 个 | Broker 上的未同步的副本的个数 | 全部版本 | 开源版 |
|
||||
| ConnectionsCount | 个 | Broker 上网络链接的个数 | 全部版本 | 开源版 |
|
||||
| BytesIn | byte/s | Broker 的每秒数据写入量 | 全部版本 | 开源版 |
|
||||
| BytesIn_min_5 | byte/s | Broker 的每秒数据写入量,5 分钟均值 | 全部版本 | 开源版 |
|
||||
| BytesIn_min_15 | byte/s | Broker 的每秒数据写入量,15 分钟均值 | 全部版本 | 开源版 |
|
||||
| BytesOut | byte/s | Broker 的每秒数据流出量 | 全部版本 | 开源版 |
|
||||
| BytesOut_min_5 | byte/s | Broker 的每秒数据流出量,5 分钟均值 | 全部版本 | 开源版 |
|
||||
| BytesOut_min_15 | byte/s | Broker 的每秒数据流出量,15 分钟均值 | 全部版本 | 开源版 |
|
||||
| ReassignmentBytesIn | byte/s | Broker 的每秒数据迁移写入量 | 全部版本 | 开源版 |
|
||||
| ReassignmentBytesOut | byte/s | Broker 的每秒数据迁移流出量 | 全部版本 | 开源版 |
|
||||
| Partitions | 个 | Broker 上的 Partition 个数 | 全部版本 | 开源版 |
|
||||
| PartitionsSkew | % | Broker 上的 Partitions 倾斜度 | 全部版本 | 开源版 |
|
||||
| Leaders | 个 | Broker 上的 Leaders 个数 | 全部版本 | 开源版 |
|
||||
| LeadersSkew | % | Broker 上的 Leaders 倾斜度 | 全部版本 | 开源版 |
|
||||
| LogSize | byte | Broker 上的消息容量大小 | 全部版本 | 开源版 |
|
||||
| Alive | 是/否 | Broker 是否存活,1:存活;0:没有存活 | 全部版本 | 开源版 |
|
||||
|
||||
### 3.3.3、Topic 指标
|
||||
|
||||
| 指标名称 | 指标单位 | 指标含义 | kafka 版本 | 企业/开源版指标 |
|
||||
| --------------------- | -------- | ------------------------------------- | ---------- | --------------- |
|
||||
| HealthScore | 分 | 健康分 | 全部版本 | 开源版 |
|
||||
| HealthCheckPassed | 个 | 健康项检查通过数 | 全部版本 | 开源版 |
|
||||
| HealthCheckTotal | 个 | 健康项检查总数 | 全部版本 | 开源版 |
|
||||
| TotalProduceRequests | 条/s | Topic 的 TotalProduceRequests | 全部版本 | 开源版 |
|
||||
| BytesRejected | 个/s | Topic 的每秒写入拒绝量 | 全部版本 | 开源版 |
|
||||
| FailedFetchRequests | 个/s | Topic 的 FailedFetchRequests | 全部版本 | 开源版 |
|
||||
| FailedProduceRequests | 个/s | Topic 的 FailedProduceRequests | 全部版本 | 开源版 |
|
||||
| ReplicationCount | 个 | Topic 总的副本数 | 全部版本 | 开源版 |
|
||||
| Messages | 条 | Topic 总的消息数 | 全部版本 | 开源版 |
|
||||
| MessagesIn | 条/s | Topic 每秒消息条数 | 全部版本 | 开源版 |
|
||||
| BytesIn | byte/s | Topic 每秒消息写入字节数 | 全部版本 | 开源版 |
|
||||
| BytesIn_min_5 | byte/s | Topic 每秒消息写入字节数,5 分钟均值 | 全部版本 | 开源版 |
|
||||
| BytesIn_min_15 | byte/s | Topic 每秒消息写入字节数,15 分钟均值 | 全部版本 | 开源版 |
|
||||
| BytesOut | byte/s | Topic 每秒消息流出字节数 | 全部版本 | 开源版 |
|
||||
| BytesOut_min_5 | byte/s | Topic 每秒消息流出字节数,5 分钟均值 | 全部版本 | 开源版 |
|
||||
| BytesOut_min_15 | byte/s | Topic 每秒消息流出字节数,15 分钟均值 | 全部版本 | 开源版 |
|
||||
| LogSize | byte | Topic 的大小 | 全部版本 | 开源版 |
|
||||
| PartitionURP | 个 | Topic 未同步的副本数 | 全部版本 | 开源版 |
|
||||
|
||||
### 3.3.4、Partition 指标
|
||||
|
||||
| 指标名称 | 指标单位 | 指标含义 | kafka 版本 | 企业/开源版指标 |
|
||||
| -------------- | -------- | ----------------------------------------- | ---------- | --------------- |
|
||||
| LogEndOffset | 条 | Partition 中 leader 副本的 LogEndOffset | 全部版本 | 开源版 |
|
||||
| LogStartOffset | 条 | Partition 中 leader 副本的 LogStartOffset | 全部版本 | 开源版 |
|
||||
| Messages | 条 | Partition 总的消息数 | 全部版本 | 开源版 |
|
||||
| BytesIn | byte/s | Partition 的每秒消息流入字节数 | 全部版本 | 开源版 |
|
||||
| BytesOut | byte/s | Partition 的每秒消息流出字节数 | 全部版本 | 开源版 |
|
||||
| LogSize | byte | Partition 的大小 | 全部版本 | 开源版 |
|
||||
|
||||
### 3.3.5、Group 指标
|
||||
|
||||
| 指标名称 | 指标单位 | 指标含义 | kafka 版本 | 企业/开源版指标 |
|
||||
| ----------------- | -------- | -------------------------- | ---------- | --------------- |
|
||||
| HealthScore | 分 | 健康分 | 全部版本 | 开源版 |
|
||||
| HealthCheckPassed | 个 | 健康检查通过数 | 全部版本 | 开源版 |
|
||||
| HealthCheckTotal | 个 | 健康检查总数 | 全部版本 | 开源版 |
|
||||
| OffsetConsumed | 条 | Consumer 的 CommitedOffset | 全部版本 | 开源版 |
|
||||
| LogEndOffset | 条 | Consumer 的 LogEndOffset | 全部版本 | 开源版 |
|
||||
| Lag | 条 | Group 消费者的 Lag 数 | 全部版本 | 开源版 |
|
||||
| State | 个 | Group 组的状态 | 全部版本 | 开源版 |
|
||||
180
docs/dev_guide/接入ZK带认证Kafka集群.md
Normal file
@@ -0,0 +1,180 @@
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
# 接入 ZK 带认证的 Kafka 集群
|
||||
|
||||
- [接入 ZK 带认证的 Kafka 集群](#接入-zk-带认证的-kafka-集群)
|
||||
- [1、简要说明](#1简要说明)
|
||||
- [2、支持 Digest-MD5 认证](#2支持-digest-md5-认证)
|
||||
- [3、支持 Kerberos 认证](#3支持-kerberos-认证)
|
||||
|
||||
|
||||
|
||||
## 1、简要说明
|
||||
|
||||
- 1、当前 KnowStreaming 暂无页面可以直接配置 ZK 的认证信息,但是 KnowStreaming 的后端预留了 MySQL 的字段用于存储 ZK 的认证信息,用户可通过将认证信息存储至该字段,从而达到支持接入 ZK 带认证的 Kafka 集群。
|
||||
|
||||
|
||||
- 2、该字段位于 MySQL 库 ks_km_physical_cluster 表中的 zk_properties 字段,该字段的格式是:
|
||||
```json
|
||||
{
|
||||
"openSecure": false, # 是否开启认证,开启时配置为true
|
||||
"sessionTimeoutUnitMs": 15000, # session超时时间
|
||||
"requestTimeoutUnitMs": 5000, # request超时时间
|
||||
"otherProps": { # 其他配置,认证信息主要配置在该位置
|
||||
"zookeeper.sasl.clientconfig": "kafkaClusterZK1" # 例子,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- 3、实际生效的代码位置
|
||||
```java
|
||||
// 代码位置:https://github.com/didi/KnowStreaming/blob/master/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/KafkaAdminZKClient.java
|
||||
|
||||
kafkaZkClient = KafkaZkClient.apply(
|
||||
clusterPhy.getZookeeper(),
|
||||
zkConfig.getOpenSecure(), // 是否开启认证,开启时配置为true
|
||||
zkConfig.getSessionTimeoutUnitMs(), // session超时时间
|
||||
zkConfig.getRequestTimeoutUnitMs(), // request超时时间
|
||||
5,
|
||||
Time.SYSTEM,
|
||||
"KS-ZK-ClusterPhyId-" + clusterPhyId,
|
||||
"KS-ZK-SessionExpireListener-clusterPhyId-" + clusterPhyId,
|
||||
Option.apply("KS-ZK-ClusterPhyId-" + clusterPhyId),
|
||||
Option.apply(this.getZKConfig(clusterPhyId, zkConfig.getOtherProps())) // 其他配置,认证信息主要配置在该位置
|
||||
);
|
||||
```
|
||||
|
||||
- 4、SQL例子
|
||||
```sql
|
||||
update ks_km_physical_cluster set zk_properties='{ "openSecure": true, "otherProps": { "zookeeper.sasl.clientconfig": "kafkaClusterZK1" } }' where id=集群1的ID;
|
||||
```
|
||||
|
||||
|
||||
- 5、zk_properties 字段不能覆盖所有的场景,所以实际使用过程中还可能需要在此基础之上,进行其他的调整。比如,`Digest-MD5 认证` 和 `Kerberos 认证` 都还需要修改启动脚本等。后续看能否通过修改 ZK 客户端的源码,使得 ZK 认证的相关配置能和 Kafka 认证的配置一样方便。
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## 2、支持 Digest-MD5 认证
|
||||
|
||||
1. 假设你有两个 Kafka 集群, 对应两个 ZK 集群;
|
||||
2. 两个 ZK 集群的认证信息如下所示
|
||||
|
||||
```bash
|
||||
# ZK1集群的认证信息,这里的 kafkaClusterZK1 可以是随意的名称,只需要和后续数据库的配置对应上即可。
|
||||
kafkaClusterZK1 {
|
||||
org.apache.zookeeper.server.auth.DigestLoginModule required
|
||||
username="zk1"
|
||||
password="zk1-passwd";
|
||||
};
|
||||
|
||||
# ZK2集群的认证信息,这里的 kafkaClusterZK2 可以是随意的名称,只需要和后续数据库的配置对应上即可。
|
||||
kafkaClusterZK2 {
|
||||
org.apache.zookeeper.server.auth.DigestLoginModule required
|
||||
username="zk2"
|
||||
password="zk2-passwd";
|
||||
};
|
||||
```
|
||||
|
||||
3. 将这两个ZK集群的认证信息存储到 `/xxx/zk_client_jaas.conf` 文件中,文件中的内容如下所示:
|
||||
|
||||
```bash
|
||||
kafkaClusterZK1 {
|
||||
org.apache.zookeeper.server.auth.DigestLoginModule required
|
||||
username="zk1"
|
||||
password="zk1-passwd";
|
||||
};
|
||||
|
||||
kafkaClusterZK2 {
|
||||
org.apache.zookeeper.server.auth.DigestLoginModule required
|
||||
username="zk2"
|
||||
password="zk2-passwd";
|
||||
};
|
||||
|
||||
```
|
||||
|
||||
4. 修改 KnowStreaming 的启动脚本
|
||||
|
||||
```bash
|
||||
# `KnowStreaming/bin/startup.sh` 中的 47 行的 JAVA_OPT 中追加如下设置
|
||||
|
||||
-Djava.security.auth.login.config=/xxx/zk_client_jaas.conf
|
||||
```
|
||||
|
||||
5. 修改 KnowStreaming 的表数据
|
||||
|
||||
```sql
|
||||
# 这里的 kafkaClusterZK1 要和 /xxx/zk_client_jaas.conf 中的对应上
|
||||
update ks_km_physical_cluster set zk_properties='{ "openSecure": true, "otherProps": { "zookeeper.sasl.clientconfig": "kafkaClusterZK1" } }' where id=集群1的ID;
|
||||
|
||||
update ks_km_physical_cluster set zk_properties='{ "openSecure": true, "otherProps": { "zookeeper.sasl.clientconfig": "kafkaClusterZK2" } }' where id=集群2的ID;
|
||||
```
|
||||
|
||||
6. 重启 KnowStreaming
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## 3、支持 Kerberos 认证
|
||||
|
||||
**第一步:查看用户在ZK的ACL**
|
||||
|
||||
假设我们使用的用户是 `kafka` 这个用户。
|
||||
|
||||
- 1、查看 server.properties 的配置的 zookeeper.connect 的地址;
|
||||
- 2、使用 `zkCli.sh -serve zookeeper.connect的地址` 登录到ZK页面;
|
||||
- 3、ZK页面上,执行命令 `getAcl /kafka` 查看 `kafka` 用户的权限;
|
||||
|
||||
此时,我们可以看到如下信息:
|
||||

|
||||
|
||||
`kafka` 用户需要的权限是 `cdrwa`。如果用户没有 `cdrwa` 权限的话,需要创建用户并授权,授权命令为:`setAcl`
|
||||
|
||||
|
||||
**第二步:创建Kerberos的keytab并修改 KnowStreaming 主机**
|
||||
|
||||
- 1、在 Kerberos 的域中创建 `kafka/_HOST` 的 `keytab`,并导出。例如:`kafka/dbs-kafka-test-8-53`;
|
||||
- 2、导出 keytab 后上传到安装 KS 的机器的 `/etc/keytab` 下;
|
||||
- 3、在 KS 机器上,执行 `kinit -kt zookeepe.keytab kafka/dbs-kafka-test-8-53` 看是否能进行 `Kerberos` 登录;
|
||||
- 4、可以登录后,配置 `/opt/zookeeper.jaas` 文件,例子如下:
|
||||
```bash
|
||||
Client {
|
||||
com.sun.security.auth.module.Krb5LoginModule required
|
||||
useKeyTab=true
|
||||
storeKey=false
|
||||
serviceName="zookeeper"
|
||||
keyTab="/etc/keytab/zookeeper.keytab"
|
||||
principal="kafka/dbs-kafka-test-8-53@XXX.XXX.XXX";
|
||||
};
|
||||
```
|
||||
- 5、需要配置 `KDC-Server` 对 `KnowStreaming` 的机器开通防火墙,并在KS的机器 `/etc/host/` 配置 `kdc-server` 的 `hostname`。并将 `krb5.conf` 导入到 `/etc` 下;
|
||||
|
||||
|
||||
**第三步:修改 KnowStreaming 的配置**
|
||||
|
||||
- 1、修改数据库,开启ZK的认证
|
||||
```sql
|
||||
update ks_km_physical_cluster set zk_properties='{ "openSecure": true }' where id=集群1的ID;
|
||||
```
|
||||
|
||||
- 2、在 `KnowStreaming/bin/startup.sh` 中的47行的JAVA_OPT中追加如下设置
|
||||
```bash
|
||||
-Dsun.security.krb5.debug=true -Djava.security.krb5.conf=/etc/krb5.conf -Djava.security.auth.login.config=/opt/zookeeper.jaas
|
||||
```
|
||||
|
||||
- 3、重启KS集群后再 start.out 中看到如下信息,则证明Kerberos配置成功;
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
|
||||
**第四步:补充说明**
|
||||
|
||||
- 1、多Kafka集群如果用的是一样的Kerberos域的话,只需在每个`ZK`中给`kafka`用户配置`crdwa`权限即可,这样集群初始化的时候`zkclient`是都可以认证;
|
||||
- 2、多个Kerberos域暂时未适配;
|
||||
90
docs/dev_guide/本地源码启动手册.md
Normal file
@@ -0,0 +1,90 @@
|
||||
## 6.1、本地源码启动手册
|
||||
|
||||
### 6.1.1、打包方式
|
||||
|
||||
`Know Streaming` 采用前后端分离的开发模式,使用 Maven 对项目进行统一的构建管理。maven 在打包构建过程中,会将前后端代码一并打包生成最终的安装包。
|
||||
|
||||
`Know Streaming` 除了使用安装包启动之外,还可以通过本地源码启动完整的带前端页面的项目,下面我们正式开始介绍本地源码如何启动 `Know Streaming`。
|
||||
|
||||
### 6.1.2、环境要求
|
||||
|
||||
**系统支持**
|
||||
|
||||
`windows7+`、`Linux`、`Mac`
|
||||
|
||||
**环境依赖**
|
||||
|
||||
- Maven 3.6.3
|
||||
- Node v12.20.0
|
||||
- Java 8+
|
||||
- MySQL 5.7
|
||||
- Idea
|
||||
- Elasticsearch 7.6
|
||||
- Git
|
||||
|
||||
### 6.1.3、环境初始化
|
||||
|
||||
安装好环境信息之后,还需要初始化 MySQL 与 Elasticsearch 信息,包括:
|
||||
|
||||
- 初始化 MySQL 表及数据
|
||||
- 初始化 Elasticsearch 索引
|
||||
|
||||
具体见:[单机部署手册](../install_guide/单机部署手册.md) 中的最后一步,部署 KnowStreaming 服务中的初始化相关工作。
|
||||
|
||||
### 6.1.4、本地启动
|
||||
|
||||
**第一步:本地打包**
|
||||
|
||||
执行 `mvn install` 可对项目进行前后端同时进行打包,通过该命令,除了可以对后端进行打包之外,还可以将前端相关的静态资源文件也一并打包出来。
|
||||
|
||||
**第二步:修改配置**
|
||||
|
||||
```yaml
|
||||
# 修改 km-rest/src/main/resources/application.yml 中相关的配置
|
||||
|
||||
# 修改MySQL的配置,中间省略了一些非必需修改的配置
|
||||
spring:
|
||||
datasource:
|
||||
know-streaming:
|
||||
jdbc-url: 修改为实际MYSQL地址
|
||||
username: 修改为实际MYSQL用户名
|
||||
password: 修改为实际MYSQL密码
|
||||
logi-job:
|
||||
jdbc-url: 修改为实际MYSQL地址
|
||||
username: 修改为实际MYSQL用户名
|
||||
password: 修改为实际MYSQL密码
|
||||
logi-security:
|
||||
jdbc-url: 修改为实际MYSQL地址
|
||||
username: 修改为实际MYSQL用户名
|
||||
password: 修改为实际MYSQL密码
|
||||
|
||||
# 修改ES的配置,中间省略了一些非必需修改的配置
|
||||
es.client.address: 修改为实际ES地址
|
||||
```
|
||||
|
||||
**第三步:配置 IDEA**
|
||||
|
||||
`Know Streaming`的 Main 方法在:
|
||||
|
||||
```java
|
||||
km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/KnowStreaming.java
|
||||
```
|
||||
|
||||
IDEA 更多具体的配置如下图所示:
|
||||
|
||||
<p align="center">
|
||||
<img src="http://img-ys011.didistatic.com/static/dc2img/do1_BW1RzgEMh4n6L4dL4ncl" width = "512" height = "318" div align=center />
|
||||
</p>
|
||||
|
||||
**第四步:启动项目**
|
||||
|
||||
最后就是启动项目,在本地 console 中输出了 `KnowStreaming-KM started` 则表示我们已经成功启动 `Know Streaming` 了。
|
||||
|
||||
### 6.1.5、本地访问
|
||||
|
||||
`Know Streaming` 启动之后,可以访问一些信息,包括:
|
||||
|
||||
- 产品页面:http://localhost:8080 ,默认账号密码:`admin` / `admin2022_` 进行登录。`v3.0.0-beta.2`版本开始,默认账号密码为`admin` / `admin`;
|
||||
- 接口地址:http://localhost:8080/swagger-ui.html 查看后端提供的相关接口。
|
||||
|
||||
更多信息,详见:[KnowStreaming 官网](https://knowstreaming.com/)
|
||||
199
docs/dev_guide/登录系统对接.md
Normal file
@@ -0,0 +1,199 @@
|
||||
|
||||
|
||||

|
||||
|
||||
## 登录系统对接
|
||||
|
||||
[KnowStreaming](https://github.com/didi/KnowStreaming)(以下简称KS) 除了实现基于本地MySQL的用户登录认证方式外,还已经实现了基于Ldap的登录认证。
|
||||
|
||||
但是,登录认证系统并非仅此两种。因此,为了具有更好的拓展性,KS具有自定义登陆认证逻辑,快速对接已有系统的特性。
|
||||
|
||||
在KS中,我们将登陆认证相关的一些文件放在[km-extends](https://github.com/didi/KnowStreaming/tree/master/km-extends)模块下的[km-account](https://github.com/didi/KnowStreaming/tree/master/km-extends/km-account)模块里。
|
||||
|
||||
本文将介绍KS如何快速对接自有的用户登录认证系统。
|
||||
|
||||
### 对接步骤
|
||||
|
||||
- 创建一个登陆认证类,实现[LogiCommon](https://github.com/didi/LogiCommon)的LoginExtend接口;
|
||||
- 将[application.yml](https://github.com/didi/KnowStreaming/blob/master/km-rest/src/main/resources/application.yml)中的spring.logi-security.login-extend-bean-name字段改为登陆认证类的bean名称;
|
||||
|
||||
```Java
|
||||
//LoginExtend 接口
|
||||
public interface LoginExtend {
|
||||
|
||||
/**
|
||||
* 验证登录信息,同时记住登录状态
|
||||
*/
|
||||
UserBriefVO verifyLogin(AccountLoginDTO var1, HttpServletRequest var2, HttpServletResponse var3) throws LogiSecurityException;
|
||||
|
||||
/**
|
||||
* 登出接口,清楚登录状态
|
||||
*/
|
||||
Result<Boolean> logout(HttpServletRequest var1, HttpServletResponse var2);
|
||||
|
||||
/**
|
||||
* 检查是否已经登录
|
||||
*/
|
||||
boolean interceptorCheck(HttpServletRequest var1, HttpServletResponse var2, String var3, List<String> var4) throws IOException;
|
||||
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
### 对接例子
|
||||
|
||||
我们以Ldap对接为例,说明KS如何对接登录认证系统。
|
||||
|
||||
+ 编写[LdapLoginServiceImpl](https://github.com/didi/KnowStreaming/blob/master/km-extends/km-account/src/main/java/com/xiaojukeji/know/streaming/km/account/login/ldap/LdapLoginServiceImpl.java)类,实现LoginExtend接口。
|
||||
+ 设置[application.yml](https://github.com/didi/KnowStreaming/blob/master/km-rest/src/main/resources/application.yml)中的spring.logi-security.login-extend-bean-name=ksLdapLoginService。
|
||||
|
||||
完成上述两步即可实现KS对接Ldap认证登陆。
|
||||
|
||||
```Java
|
||||
@Service("ksLdapLoginService")
|
||||
public class LdapLoginServiceImpl implements LoginExtend {
|
||||
|
||||
|
||||
@Override
|
||||
public UserBriefVO verifyLogin(AccountLoginDTO loginDTO,
|
||||
HttpServletRequest request,
|
||||
HttpServletResponse response) throws LogiSecurityException {
|
||||
String decodePasswd = AESUtils.decrypt(loginDTO.getPw());
|
||||
|
||||
// 去LDAP验证账密
|
||||
LdapPrincipal ldapAttrsInfo = ldapAuthentication.authenticate(loginDTO.getUserName(), decodePasswd);
|
||||
if (ldapAttrsInfo == null) {
|
||||
// 用户不存在,正常来说上如果有问题,上一步会直接抛出异常
|
||||
throw new LogiSecurityException(ResultCode.USER_NOT_EXISTS);
|
||||
}
|
||||
|
||||
// 进行业务相关操作
|
||||
|
||||
// 记录登录状态,Ldap因为无法记录登录状态,因此有KnowStreaming进行记录
|
||||
initLoginContext(request, response, loginDTO.getUserName(), user.getId());
|
||||
return CopyBeanUtil.copy(user, UserBriefVO.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<Boolean> logout(HttpServletRequest request, HttpServletResponse response) {
|
||||
|
||||
//清理cookie和session
|
||||
|
||||
return Result.buildSucc(Boolean.TRUE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean interceptorCheck(HttpServletRequest request, HttpServletResponse response, String requestMappingValue, List<String> whiteMappingValues) throws IOException {
|
||||
|
||||
// 检查是否已经登录
|
||||
String userName = HttpRequestUtil.getOperator(request);
|
||||
if (StringUtils.isEmpty(userName)) {
|
||||
// 未登录,则进行登出
|
||||
logout(request, response);
|
||||
return Boolean.FALSE;
|
||||
}
|
||||
|
||||
return Boolean.TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
### 实现原理
|
||||
|
||||
因为登陆和登出整体实现逻辑是一致的,所以我们以登陆逻辑为例进行介绍。
|
||||
|
||||
+ 登陆原理
|
||||
|
||||
登陆走的是[LogiCommon](https://github.com/didi/LogiCommon)自带的LoginController。
|
||||
|
||||
```java
|
||||
@RestController
|
||||
public class LoginController {
|
||||
|
||||
|
||||
//登陆接口
|
||||
@PostMapping({"/login"})
|
||||
public Result<UserBriefVO> login(HttpServletRequest request, HttpServletResponse response, @RequestBody AccountLoginDTO loginDTO) {
|
||||
try {
|
||||
//登陆认证
|
||||
UserBriefVO userBriefVO = this.loginService.verifyLogin(loginDTO, request, response);
|
||||
return Result.success(userBriefVO);
|
||||
|
||||
} catch (LogiSecurityException var5) {
|
||||
return Result.fail(var5);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
而登陆操作是调用LoginServiceImpl类来实现,但是具体由哪个登陆认证类来执行登陆操作却由loginExtendBeanTool来指定。
|
||||
|
||||
```java
|
||||
//LoginServiceImpl类
|
||||
@Service
|
||||
public class LoginServiceImpl implements LoginService {
|
||||
|
||||
//实现登陆操作,但是具体哪个登陆类由loginExtendBeanTool来管理
|
||||
public UserBriefVO verifyLogin(AccountLoginDTO loginDTO, HttpServletRequest request, HttpServletResponse response) throws LogiSecurityException {
|
||||
|
||||
return this.loginExtendBeanTool.getLoginExtendImpl().verifyLogin(loginDTO, request, response);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
而loginExtendBeanTool类会优先去查找用户指定的登陆认证类,如果失败则调用默认的登陆认证函数。
|
||||
|
||||
```java
|
||||
//LoginExtendBeanTool类
|
||||
@Component("logiSecurityLoginExtendBeanTool")
|
||||
public class LoginExtendBeanTool {
|
||||
|
||||
public LoginExtend getLoginExtendImpl() {
|
||||
LoginExtend loginExtend;
|
||||
//先调用用户指定登陆类,如果失败则调用系统默认登陆认证
|
||||
try {
|
||||
//调用的类由spring.logi-security.login-extend-bean-name指定
|
||||
loginExtend = this.getCustomLoginExtendImplBean();
|
||||
} catch (UnsupportedOperationException var3) {
|
||||
loginExtend = this.getDefaultLoginExtendImplBean();
|
||||
}
|
||||
|
||||
return loginExtend;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
+ 认证原理
|
||||
|
||||
认证的实现则比较简单,向Spring中注册我们的拦截器PermissionInterceptor。
|
||||
|
||||
拦截器会调用LoginServiceImpl类的拦截方法,LoginServiceImpl后续处理逻辑就和前面登陆是一致的。
|
||||
|
||||
```java
|
||||
public class PermissionInterceptor implements HandlerInterceptor {
|
||||
|
||||
|
||||
/**
|
||||
* 拦截预处理
|
||||
* @return boolean false:拦截, 不向下执行, true:放行
|
||||
*/
|
||||
@Override
|
||||
public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) throws Exception {
|
||||
|
||||
//免登录相关校验,如果验证通过,提前返回
|
||||
|
||||
//走拦截函数,进行普通用户验证
|
||||
return loginService.interceptorCheck(request, response, classRequestMappingValue, whiteMappingValues);
|
||||
}
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
276
docs/dev_guide/解决连接JMX失败.md
Normal file
@@ -0,0 +1,276 @@
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
## 2、解决连接 JMX 失败
|
||||
|
||||
- [2、解决连接 JMX 失败](#2解决连接-jmx-失败)
|
||||
- [2.1、正异常现象](#21正异常现象)
|
||||
- [2.2、异因一:JMX未开启](#22异因一jmx未开启)
|
||||
- [2.2.1、异常现象](#221异常现象)
|
||||
- [2.2.2、解决方案](#222解决方案)
|
||||
- [2.3、异原二:JMX配置错误](#23异原二jmx配置错误)
|
||||
- [2.3.1、异常现象](#231异常现象)
|
||||
- [2.3.2、解决方案](#232解决方案)
|
||||
- [2.4、异因三:JMX开启SSL](#24异因三jmx开启ssl)
|
||||
- [2.4.1、异常现象](#241异常现象)
|
||||
- [2.4.2、解决方案](#242解决方案)
|
||||
- [2.5、异因四:连接了错误IP](#25异因四连接了错误ip)
|
||||
- [2.5.1、异常现象](#251异常现象)
|
||||
- [2.5.2、解决方案](#252解决方案)
|
||||
- [2.6、异因五:连接了错误端口](#26异因五连接了错误端口)
|
||||
- [2.6.1、异常现象](#261异常现象)
|
||||
- [2.6.2、解决方案](#262解决方案)
|
||||
|
||||
|
||||
背景:Kafka 通过 JMX 服务进行运行指标的暴露,因此 `KnowStreaming` 会主动连接 Kafka 的 JMX 服务进行指标采集。如果我们发现页面缺少指标,那么可能原因之一是 Kafka 的 JMX 端口配置的有问题导致指标获取失败,进而页面没有数据。
|
||||
|
||||
|
||||
### 2.1、正异常现象
|
||||
|
||||
**1、异常现象**
|
||||
|
||||
Broker 列表的 JMX PORT 列出现红色感叹号,则表示 JMX 连接存在异常。
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_MLlLCfAktne4X6MBtBUd width="90%">
|
||||
|
||||
|
||||
**2、正常现象**
|
||||
|
||||
Broker 列表的 JMX PORT 列出现绿色,则表示 JMX 连接正常。
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_ymtDTCiDlzfrmSCez2lx width="90%">
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### 2.2、异因一:JMX未开启
|
||||
|
||||
#### 2.2.1、异常现象
|
||||
|
||||
broker列表的JMX Port值为-1,对应Broker的JMX未开启。
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_E1PD8tPsMeR2zYLFBFAu width="90%">
|
||||
|
||||
#### 2.2.2、解决方案
|
||||
|
||||
开启JMX,开启流程如下:
|
||||
|
||||
1、修改kafka的bin目录下面的:`kafka-server-start.sh`文件
|
||||
|
||||
```bash
|
||||
# 在这个下面增加JMX端口的配置
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G"
|
||||
export JMX_PORT=9999 # 增加这个配置, 这里的数值并不一定是要9999
|
||||
fi
|
||||
```
|
||||
|
||||
|
||||
2、修改kafka的bin目录下面对的:`kafka-run-class.sh`文件
|
||||
|
||||
```bash
|
||||
# JMX settings
|
||||
if [ -z "$KAFKA_JMX_OPTS" ]; then
|
||||
KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=当前机器的IP"
|
||||
fi
|
||||
|
||||
# JMX port to use
|
||||
if [ $JMX_PORT ]; then
|
||||
KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
|
||||
fi
|
||||
```
|
||||
|
||||
|
||||
|
||||
3、重启Kafka-Broker。
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### 2.3、异原二:JMX配置错误
|
||||
|
||||
#### 2.3.1、异常现象
|
||||
|
||||
错误日志:
|
||||
|
||||
```log
|
||||
# 错误一: 错误提示的是真实的IP,这样的话基本就是JMX配置的有问题了。
|
||||
2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:192.168.0.1 port:9999. java.rmi.ConnectException: Connection refused to host: 192.168.0.1; nested exception is:
|
||||
|
||||
# 错误二:错误提示的是127.0.0.1这个IP,这个是机器的hostname配置的可能有问题。
|
||||
2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:127.0.0.1 port:9999. java.rmi.ConnectException: Connection refused to host: 127.0.0.1;; nested exception is:
|
||||
```
|
||||
|
||||
#### 2.3.2、解决方案
|
||||
|
||||
开启JMX,开启流程如下:
|
||||
|
||||
1、修改kafka的bin目录下面的:`kafka-server-start.sh`文件
|
||||
|
||||
```bash
|
||||
# 在这个下面增加JMX端口的配置
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G"
|
||||
export JMX_PORT=9999 # 增加这个配置, 这里的数值并不一定是要9999
|
||||
fi
|
||||
```
|
||||
|
||||
2、修改kafka的bin目录下面对的:`kafka-run-class.sh`文件
|
||||
|
||||
```bash
|
||||
# JMX settings
|
||||
if [ -z "$KAFKA_JMX_OPTS" ]; then
|
||||
KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=当前机器的IP"
|
||||
fi
|
||||
|
||||
# JMX port to use
|
||||
if [ $JMX_PORT ]; then
|
||||
KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
|
||||
fi
|
||||
```
|
||||
|
||||
3、重启Kafka-Broker。
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### 2.4、异因三:JMX开启SSL
|
||||
|
||||
#### 2.4.1、异常现象
|
||||
|
||||
```log
|
||||
# 连接JMX的日志中,出现SSL认证失败的相关日志。TODO:欢迎补充具体日志案例。
|
||||
```
|
||||
|
||||
#### 2.4.2、解决方案
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_kNyCi8H9wtHSRkWurB6S width="50%">
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
### 2.5、异因四:连接了错误IP
|
||||
|
||||
#### 2.5.1、异常现象
|
||||
|
||||
Broker 配置了内外网,而JMX在配置时,可能配置了内网IP或者外网IP,此时`KnowStreaming` 需要连接到特定网络的IP才可以进行访问。
|
||||
|
||||
比如:Broker在ZK的存储结构如下所示,我们期望连接到 `endpoints` 中标记为 `INTERNAL` 的地址,但是 `KnowStreaming` 却连接了 `EXTERNAL` 的地址。
|
||||
|
||||
```json
|
||||
{
|
||||
"listener_security_protocol_map": {
|
||||
"EXTERNAL": "SASL_PLAINTEXT",
|
||||
"INTERNAL": "SASL_PLAINTEXT"
|
||||
},
|
||||
"endpoints": [
|
||||
"EXTERNAL://192.168.0.1:7092",
|
||||
"INTERNAL://192.168.0.2:7093"
|
||||
],
|
||||
"jmx_port": 8099,
|
||||
"host": "192.168.0.1",
|
||||
"timestamp": "1627289710439",
|
||||
"port": -1,
|
||||
"version": 4
|
||||
}
|
||||
```
|
||||
|
||||
#### 2.5.2、解决方案
|
||||
|
||||
可以手动往`ks_km_physical_cluster`表的`jmx_properties`字段增加一个`useWhichEndpoint`字段,从而控制 `KnowStreaming` 连接到特定的JMX IP及PORT。
|
||||
|
||||
`jmx_properties`格式:
|
||||
|
||||
```json
|
||||
{
|
||||
"maxConn": 100, // KM对单台Broker的最大JMX连接数
|
||||
"username": "xxxxx", //用户名,可以不填写
|
||||
"password": "xxxx", // 密码,可以不填写
|
||||
"openSSL": true, //开启SSL, true表示开启ssl, false表示关闭
|
||||
"useWhichEndpoint": "EXTERNAL" //指定要连接的网络名称,填写EXTERNAL就是连接endpoints里面的EXTERNAL地址
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
SQL例子:
|
||||
|
||||
```sql
|
||||
UPDATE ks_km_physical_cluster SET jmx_properties='{ "maxConn": 10, "username": "xxxxx", "password": "xxxx", "openSSL": false , "useWhichEndpoint": "xxx"}' where id={xxx};
|
||||
```
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### 2.6、异因五:连接了错误端口
|
||||
|
||||
3.3.0 以上版本,或者是 master 分支最新代码,才具备该能力。
|
||||
|
||||
#### 2.6.1、异常现象
|
||||
|
||||
在 AWS 或者是容器上的 Kafka-Broker,使用同一个IP,但是外部服务想要去连接 JMX 端口时,需要进行映射。因此 KnowStreaming 如果直接连接 ZK 上获取到的 JMX 端口,会连接失败,因此需要具备连接端口可配置的能力。
|
||||
|
||||
TODO:补充具体的日志。
|
||||
|
||||
|
||||
#### 2.6.2、解决方案
|
||||
|
||||
可以手动往`ks_km_physical_cluster`表的`jmx_properties`字段增加一个`specifiedJmxPortList`字段,从而控制 `KnowStreaming` 连接到特定的JMX PORT。
|
||||
|
||||
`jmx_properties`格式:
|
||||
```json
|
||||
{
|
||||
"jmxPort": 2445, // 最低优先级使用的jmx端口
|
||||
"maxConn": 100, // KM对单台Broker的最大JMX连接数
|
||||
"username": "xxxxx", //用户名,可以不填写
|
||||
"password": "xxxx", // 密码,可以不填写
|
||||
"openSSL": true, //开启SSL, true表示开启ssl, false表示关闭
|
||||
"useWhichEndpoint": "EXTERNAL", //指定要连接的网络名称,填写EXTERNAL就是连接endpoints里面的EXTERNAL地址
|
||||
"specifiedJmxPortList": [ // 配置最高优先使用的jmx端口
|
||||
{
|
||||
"serverId": "1", // kafka-broker的brokerId, 注意这个是字符串类型,字符串类型的原因是要兼容connect的jmx端口的连接
|
||||
"jmxPort": 1234 // 该 broker 所连接的jmx端口
|
||||
},
|
||||
{
|
||||
"serverId": "2",
|
||||
"jmxPort": 1234
|
||||
},
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
SQL例子:
|
||||
|
||||
```sql
|
||||
UPDATE ks_km_physical_cluster SET jmx_properties='{ "maxConn": 10, "username": "xxxxx", "password": "xxxx", "openSSL": false , "specifiedJmxPortList": [{"serverId": "1", "jmxPort": 1234}] }' where id={xxx};
|
||||
```
|
||||
|
||||
|
||||
---
|
||||
183
docs/dev_guide/页面无数据排查手册.md
Normal file
@@ -0,0 +1,183 @@
|
||||

|
||||
|
||||
# 页面无数据排查手册
|
||||
|
||||
- [页面无数据排查手册](#页面无数据排查手册)
|
||||
- [1、集群接入错误](#1集群接入错误)
|
||||
- [1.1、异常现象](#11异常现象)
|
||||
- [1.2、解决方案](#12解决方案)
|
||||
- [1.3、正常情况](#13正常情况)
|
||||
- [2、JMX连接失败](#2jmx连接失败)
|
||||
- [3、ElasticSearch问题](#3elasticsearch问题)
|
||||
- [3.1、异因一:缺少索引](#31异因一缺少索引)
|
||||
- [3.1.1、异常现象](#311异常现象)
|
||||
- [3.1.2、解决方案](#312解决方案)
|
||||
- [3.2、异因二:索引模板错误](#32异因二索引模板错误)
|
||||
- [3.2.1、异常现象](#321异常现象)
|
||||
- [3.2.2、解决方案](#322解决方案)
|
||||
- [3.3、异因三:集群Shard满](#33异因三集群shard满)
|
||||
- [3.3.1、异常现象](#331异常现象)
|
||||
- [3.3.2、解决方案](#332解决方案)
|
||||
|
||||
|
||||
---
|
||||
|
||||
## 1、集群接入错误
|
||||
|
||||
### 1.1、异常现象
|
||||
|
||||
如下图所示,集群非空时,大概率为地址配置错误导致。
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_BRiXBvqYFK2dxSF1aqgZ width="80%">
|
||||
|
||||
|
||||
|
||||
### 1.2、解决方案
|
||||
|
||||
接入集群时,依据提示的错误,进行相应的解决。例如:
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_Yn4LhV8aeSEKX1zrrkUi width="50%">
|
||||
|
||||
### 1.3、正常情况
|
||||
|
||||
接入集群时,页面信息都自动正常出现,没有提示错误。
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
## 2、JMX连接失败
|
||||
|
||||
背景:Kafka 通过 JMX 服务进行运行指标的暴露,因此 `KnowStreaming` 会主动连接 Kafka 的 JMX 服务进行指标采集。如果我们发现页面缺少指标,那么可能原因之一是 Kafka 的 JMX 端口配置的有问题导致指标获取失败,进而页面没有数据。
|
||||
|
||||
|
||||
具体见同目录下的文档:[解决连接JMX失败](./%E8%A7%A3%E5%86%B3%E8%BF%9E%E6%8E%A5JMX%E5%A4%B1%E8%B4%A5.md)
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## 3、ElasticSearch问题
|
||||
|
||||
**背景:**
|
||||
`KnowStreaming` 将从 Kafka 中采集到的指标存储到 ES 中,如果 ES 存在问题,则也可能会导致页面出现无数据的情况。
|
||||
|
||||
**日志:**
|
||||
`KnowStreaming` 读写 ES 相关日志,在 `logs/es/es.log` 中!
|
||||
|
||||
|
||||
**注意:**
|
||||
mac系统在执行curl指令时,可能报zsh错误。可参考以下操作。
|
||||
|
||||
```bash
|
||||
1 进入.zshrc 文件 vim ~/.zshrc
|
||||
2.在.zshrc中加入 setopt no_nomatch
|
||||
3.更新配置 source ~/.zshrc
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 3.1、异因一:缺少索引
|
||||
|
||||
#### 3.1.1、异常现象
|
||||
|
||||
报错信息
|
||||
|
||||
```log
|
||||
# 日志位置 logs/es/es.log
|
||||
com.didiglobal.logi.elasticsearch.client.model.exception.ESIndexNotFoundException: method [GET], host[http://127.0.0.1:9200], URI [/ks_kafka_broker_metric_2022-10-21,ks_kafka_broker_metric_2022-10-22/_search], status line [HTTP/1.1 404 Not Found]
|
||||
```
|
||||
|
||||
|
||||
`curl http://{ES的IP地址}:{ES的端口号}/_cat/indices/ks_kafka*` 查看KS索引列表,发现没有索引。
|
||||
|
||||
#### 3.1.2、解决方案
|
||||
|
||||
执行 [ES索引及模版初始化](https://github.com/didi/KnowStreaming/blob/master/bin/init_es_template.sh) 脚本,来创建索引及模版。
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
### 3.2、异因二:索引模板错误
|
||||
|
||||
#### 3.2.1、异常现象
|
||||
|
||||
多集群列表有数据,集群详情页图标无数据。查询KS索引模板列表,发现不存在。
|
||||
|
||||
```bash
|
||||
curl {ES的IP地址}:{ES的端口号}/_cat/templates/ks_kafka*?v&h=name
|
||||
```
|
||||
|
||||
正常KS模板如下图所示。
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_l79bPYSci9wr6KFwZDA6 width="90%">
|
||||
|
||||
|
||||
|
||||
#### 3.2.2、解决方案
|
||||
|
||||
删除KS索引模板和索引
|
||||
|
||||
```bash
|
||||
curl -XDELETE {ES的IP地址}:{ES的端口号}/ks_kafka*
|
||||
curl -XDELETE {ES的IP地址}:{ES的端口号}/_template/ks_kafka*
|
||||
```
|
||||
|
||||
执行 [ES索引及模版初始化](https://github.com/didi/KnowStreaming/blob/master/bin/init_es_template.sh) 脚本,来创建索引及模版。
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
### 3.3、异因三:集群Shard满
|
||||
|
||||
#### 3.3.1、异常现象
|
||||
|
||||
报错信息
|
||||
|
||||
```log
|
||||
# 日志位置 logs/es/es.log
|
||||
|
||||
{"error":{"root_cause":[{"type":"validation_exception","reason":"Validation Failed: 1: this action would add [4] total shards, but this cluster currently has [1000]/[1000] maximum shards open;"}],"type":"validation_exception","reason":"Validation Failed: 1: this action would add [4] total shards, but this cluster currently has [1000]/[1000] maximum shards open;"},"status":400}
|
||||
```
|
||||
|
||||
尝试手动创建索引失败。
|
||||
|
||||
```bash
|
||||
#创建ks_kafka_cluster_metric_test索引的指令
|
||||
curl -s -XPUT http://{ES的IP地址}:{ES的端口号}/ks_kafka_cluster_metric_test
|
||||
```
|
||||
|
||||
|
||||
#### 3.3.2、解决方案
|
||||
|
||||
ES索引的默认分片数量为1000,达到数量以后,索引创建失败。
|
||||
|
||||
+ 扩大ES索引数量上限,执行指令
|
||||
|
||||
```
|
||||
curl -XPUT -H"content-type:application/json" http://{ES的IP地址}:{ES的端口号}/_cluster/settings -d '
|
||||
{
|
||||
"persistent": {
|
||||
"cluster": {
|
||||
"max_shards_per_node":{索引上限,默认为1000, 测试时可以将其调整为10000}
|
||||
}
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
执行 [ES索引及模版初始化](https://github.com/didi/KnowStreaming/blob/master/bin/init_es_template.sh) 脚本,来补全索引。
|
||||
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
|
||||
---
|
||||
|
||||

|
||||
|
||||
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
||||
|
||||
---
|
||||
|
||||
# 安装手册
|
||||
|
||||
|
||||
## 环境依赖
|
||||
|
||||
- `Maven 3.2+`(后端打包依赖)
|
||||
- `node 10+`(前端打包依赖)
|
||||
- `Java 8+`(运行环境需要)
|
||||
- `MySQL`(数据存储)
|
||||
|
||||
---
|
||||
|
||||
## 环境初始化
|
||||
|
||||
执行[create_mysql_table.sql](./create_mysql_table.sql)中的SQL命令,从而创建所需的MySQL库及表,默认创建的库名是`kafka_manager`。
|
||||
|
||||
```
|
||||
# 示例:
|
||||
mysql -uXXXX -pXXX -h XXX.XXX.XXX.XXX -PXXXX < ./create_mysql_table.sql
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 打包
|
||||
|
||||
```bash
|
||||
|
||||
# 一次性打包
|
||||
cd ..
|
||||
mvn install
|
||||
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 启动
|
||||
|
||||
```
|
||||
# application.yml 是配置文件
|
||||
|
||||
cp web/src/main/resources/application.yml web/target/
|
||||
cd web/target/
|
||||
nohup java -jar kafka-manager-web-2.0.0-SNAPSHOT.jar --spring.config.location=./application.yml > /dev/null 2>&1 &
|
||||
```
|
||||
|
||||
## 使用
|
||||
|
||||
本地启动的话,访问`http://localhost:8080`,输入帐号及密码进行登录。更多参考:[kafka-manager使用手册](./user_cn_guide.md)
|
||||
|
||||
417
docs/install_guide/单机部署手册.md
Normal file
@@ -0,0 +1,417 @@
|
||||
## 2.1、单机部署
|
||||
|
||||
**风险提示**
|
||||
|
||||
⚠️ 脚本全自动安装,会将所部署机器上的 MySQL、JDK、ES 等进行删除重装,请注意原有服务丢失风险。
|
||||
|
||||
### 2.1.1、安装说明
|
||||
|
||||
- 以 `v3.0.0-beta.1` 版本为例进行部署;
|
||||
- 以 CentOS-7 为例,系统基础配置要求 4C-8G;
|
||||
- 部署完成后,可通过浏览器:`IP:PORT` 进行访问,默认端口是 `8080`,系统默认账号密码: `admin` / `admin2022_`。
|
||||
- `v3.0.0-beta.2`版本开始,默认账号密码为`admin` / `admin`;
|
||||
- 本文为单机部署,如需分布式部署,[请联系我们](https://knowstreaming.com/support-center)
|
||||
|
||||
**软件依赖**
|
||||
|
||||
| 软件名 | 版本要求 | 默认端口 |
|
||||
| ------------- | ------------ | -------- |
|
||||
| MySQL | v5.7 或 v8.0 | 3306 |
|
||||
| ElasticSearch | v7.6+ | 8060 |
|
||||
| JDK | v8+ | - |
|
||||
| CentOS | v6+ | - |
|
||||
| Ubuntu | v16+ | - |
|
||||
|
||||
|
||||
|
||||
### 2.1.2、脚本部署
|
||||
|
||||
**在线安装**
|
||||
|
||||
```bash
|
||||
# 在服务器中下载安装脚本, 该脚本中会在当前目录下,重新安装MySQL。重装后的mysql密码存放在当前目录的mysql.password文件中。
|
||||
wget https://s3-gzpu.didistatic.com/pub/knowstreaming/deploy_KnowStreaming-3.0.0-beta.1.sh
|
||||
|
||||
# 执行脚本
|
||||
sh deploy_KnowStreaming.sh
|
||||
|
||||
# 访问地址
|
||||
127.0.0.1:8080
|
||||
```
|
||||
|
||||
**离线安装**
|
||||
|
||||
```bash
|
||||
# 将安装包下载到本地且传输到目标服务器
|
||||
wget https://s3-gzpu.didistatic.com/pub/knowstreaming/KnowStreaming-3.0.0-beta.1-offline.tar.gz
|
||||
|
||||
# 解压安装包
|
||||
tar -zxf KnowStreaming-3.0.0-beta.1-offline.tar.gz
|
||||
|
||||
# 执行安装脚本
|
||||
sh deploy_KnowStreaming-offline.sh
|
||||
|
||||
# 访问地址
|
||||
127.0.0.1:8080
|
||||
```
|
||||
|
||||
|
||||
|
||||
### 2.1.3、容器部署
|
||||
|
||||
#### 2.1.3.1、Helm
|
||||
|
||||
**环境依赖**
|
||||
|
||||
- Kubernetes >= 1.14 ,Helm >= 2.17.0
|
||||
|
||||
- 默认依赖全部安装,ElasticSearch(3 节点集群模式) + MySQL(单机) + KnowStreaming-manager + KnowStreaming-ui
|
||||
|
||||
- 使用已有的 ElasticSearch(7.6.x) 和 MySQL(5.7) 只需调整 values.yaml 部分参数即可
|
||||
|
||||
**安装命令**
|
||||
|
||||
```bash
|
||||
# 相关镜像在Docker Hub都可以下载
|
||||
# 快速安装(NAMESPACE需要更改为已存在的,安装启动需要几分钟初始化请稍等~)
|
||||
helm install -n [NAMESPACE] [NAME] http://download.knowstreaming.com/charts/knowstreaming-manager-0.1.5.tgz
|
||||
|
||||
# 获取KnowStreaming前端ui的service. 默认nodeport方式.
|
||||
# (http://nodeIP:nodeport,默认用户名密码:admin/admin2022_)
|
||||
# `v3.0.0-beta.2`版本开始(helm chart包版本0.1.4开始),默认账号密码为`admin` / `admin`;
|
||||
|
||||
# 添加仓库
|
||||
helm repo add knowstreaming http://download.knowstreaming.com/charts
|
||||
|
||||
# 拉取最新版本
|
||||
helm pull knowstreaming/knowstreaming-manager
|
||||
```
|
||||
|
||||
|
||||
|
||||
#### 2.1.3.2、Docker Compose
|
||||
**环境依赖**
|
||||
|
||||
- [Docker](https://docs.docker.com/engine/install/)
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/)
|
||||
|
||||
|
||||
**安装命令**
|
||||
```bash
|
||||
# `v3.0.0-beta.2`版本开始(docker镜像为0.2.0版本开始),默认账号密码为`admin` / `admin`;
|
||||
# https://hub.docker.com/u/knowstreaming 在此处寻找最新镜像版本
|
||||
# mysql与es可以使用自己搭建的服务,调整对应配置即可
|
||||
|
||||
# 复制docker-compose.yml到指定位置后执行下方命令即可启动
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
**验证安装**
|
||||
```shell
|
||||
docker-compose ps
|
||||
# 验证启动 - 状态为 UP 则表示成功
|
||||
Name Command State Ports
|
||||
----------------------------------------------------------------------------------------------------
|
||||
elasticsearch-single /usr/local/bin/docker-entr ... Up 9200/tcp, 9300/tcp
|
||||
knowstreaming-init /bin/bash /es_template_cre ... Up
|
||||
knowstreaming-manager /bin/sh /ks-start.sh Up 80/tcp
|
||||
knowstreaming-mysql /entrypoint.sh mysqld Up (health: starting) 3306/tcp, 33060/tcp
|
||||
knowstreaming-ui /docker-entrypoint.sh ngin ... Up 0.0.0.0:80->80/tcp
|
||||
|
||||
# 稍等一分钟左右 knowstreaming-init 会退出,表示es初始化完成,可以访问页面
|
||||
Name Command State Ports
|
||||
-------------------------------------------------------------------------------------------
|
||||
knowstreaming-init /bin/bash /es_template_cre ... Exit 0
|
||||
knowstreaming-mysql /entrypoint.sh mysqld Up (healthy) 3306/tcp, 33060/tcp
|
||||
```
|
||||
|
||||
**访问**
|
||||
```http request
|
||||
http://127.0.0.1:80/
|
||||
```
|
||||
|
||||
|
||||
**docker-compose.yml**
|
||||
```yml
|
||||
version: "2"
|
||||
services:
|
||||
# *不要调整knowstreaming-manager服务名称,ui中会用到
|
||||
knowstreaming-manager:
|
||||
image: knowstreaming/knowstreaming-manager:latest
|
||||
container_name: knowstreaming-manager
|
||||
privileged: true
|
||||
restart: always
|
||||
depends_on:
|
||||
- elasticsearch-single
|
||||
- knowstreaming-mysql
|
||||
expose:
|
||||
- 80
|
||||
command:
|
||||
- /bin/sh
|
||||
- /ks-start.sh
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
# mysql服务地址
|
||||
SERVER_MYSQL_ADDRESS: knowstreaming-mysql:3306
|
||||
# mysql数据库名
|
||||
SERVER_MYSQL_DB: know_streaming
|
||||
# mysql用户名
|
||||
SERVER_MYSQL_USER: root
|
||||
# mysql用户密码
|
||||
SERVER_MYSQL_PASSWORD: admin2022_
|
||||
# es服务地址
|
||||
SERVER_ES_ADDRESS: elasticsearch-single:9200
|
||||
# 服务JVM参数
|
||||
JAVA_OPTS: -Xmx1g -Xms1g
|
||||
# 对于kafka中ADVERTISED_LISTENERS填写的hostname可以通过该方式完成
|
||||
# extra_hosts:
|
||||
# - "hostname:x.x.x.x"
|
||||
# 服务日志路径
|
||||
# volumes:
|
||||
# - /ks/manage/log:/logs
|
||||
knowstreaming-ui:
|
||||
image: knowstreaming/knowstreaming-ui:latest
|
||||
container_name: knowstreaming-ui
|
||||
restart: always
|
||||
ports:
|
||||
- '80:80'
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
depends_on:
|
||||
- knowstreaming-manager
|
||||
# extra_hosts:
|
||||
# - "hostname:x.x.x.x"
|
||||
elasticsearch-single:
|
||||
image: docker.io/library/elasticsearch:7.6.2
|
||||
container_name: elasticsearch-single
|
||||
restart: always
|
||||
expose:
|
||||
- 9200
|
||||
- 9300
|
||||
# ports:
|
||||
# - '9200:9200'
|
||||
# - '9300:9300'
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
# es的JVM参数
|
||||
ES_JAVA_OPTS: -Xms512m -Xmx512m
|
||||
# 单节点配置,多节点集群参考 https://www.elastic.co/guide/en/elasticsearch/reference/7.6/docker.html#docker-compose-file
|
||||
discovery.type: single-node
|
||||
# 数据持久化路径
|
||||
# volumes:
|
||||
# - /ks/es/data:/usr/share/elasticsearch/data
|
||||
|
||||
# es初始化服务,与manager使用同一镜像
|
||||
# 首次启动es需初始化模版和索引,后续会自动创建
|
||||
knowstreaming-init:
|
||||
image: knowstreaming/knowstreaming-manager:latest
|
||||
container_name: knowstreaming-init
|
||||
depends_on:
|
||||
- elasticsearch-single
|
||||
command:
|
||||
- /bin/bash
|
||||
- /es_template_create.sh
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
# es服务地址
|
||||
SERVER_ES_ADDRESS: elasticsearch-single:9200
|
||||
|
||||
knowstreaming-mysql:
|
||||
image: knowstreaming/knowstreaming-mysql:latest
|
||||
container_name: knowstreaming-mysql
|
||||
restart: always
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
# root 用户密码
|
||||
MYSQL_ROOT_PASSWORD: admin2022_
|
||||
# 初始化时创建的数据库名称
|
||||
MYSQL_DATABASE: know_streaming
|
||||
# 通配所有host,可以访问远程
|
||||
MYSQL_ROOT_HOST: '%'
|
||||
expose:
|
||||
- 3306
|
||||
# ports:
|
||||
# - '3306:3306'
|
||||
# 数据持久化路径
|
||||
# volumes:
|
||||
# - /ks/mysql/data:/data/mysql
|
||||
```
|
||||
|
||||
|
||||
|
||||
### 2.1.4、手动部署
|
||||
|
||||
**部署流程**
|
||||
|
||||
1. 安装 `JDK-11`、`MySQL`、`ElasticSearch` 等依赖服务
|
||||
2. 安装 KnowStreaming
|
||||
|
||||
|
||||
|
||||
#### 2.1.4.1、安装 MySQL 服务
|
||||
|
||||
**yum 方式安装**
|
||||
|
||||
```bash
|
||||
# 配置yum源
|
||||
wget https://dev.mysql.com/get/mysql57-community-release-el7-9.noarch.rpm
|
||||
rpm -ivh mysql57-community-release-el7-9.noarch.rpm
|
||||
|
||||
# 执行安装
|
||||
yum -y install mysql-server mysql-client
|
||||
|
||||
# 服务启动
|
||||
systemctl start mysqld
|
||||
|
||||
# 获取初始密码并修改
|
||||
old_pass=`grep 'temporary password' /var/log/mysqld.log | awk '{print $NF}' | tail -n 1`
|
||||
|
||||
mysql -NBe "alter user USER() identified by 'Didi_km_678';" --connect-expired-password -uroot -p$old_pass
|
||||
```
|
||||
|
||||
**rpm 方式安装**
|
||||
|
||||
```bash
|
||||
# 下载安装包
|
||||
wget https://s3-gzpu.didistatic.com/knowsearch/mysql5.7.tar.gz
|
||||
|
||||
# 解压到指定目录
|
||||
tar -zxf mysql5.7.tar.gz -C /tmp/
|
||||
|
||||
# 执行安装
|
||||
yum -y localinstall /tmp/libaio-*.rpm /tmp/mysql-*.rpm
|
||||
|
||||
# 服务启动
|
||||
systemctl start mysqld
|
||||
|
||||
|
||||
# 获取初始密码并修改
|
||||
old_pass=`grep 'temporary password' /var/log/mysqld.log | awk '{print $NF}' | tail -n 1`
|
||||
|
||||
mysql -NBe "alter user USER() identified by 'Didi_km_678';" --connect-expired-password -uroot -p$old_pass
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
#### 2.1.4.2、配置 JDK 环境
|
||||
|
||||
```bash
|
||||
# 下载安装包
|
||||
wget https://s3-gzpu.didistatic.com/pub/jdk11.tar.gz
|
||||
|
||||
# 解压到指定目录
|
||||
tar -zxf jdk11.tar.gz -C /usr/local/
|
||||
|
||||
# 更改目录名
|
||||
mv /usr/local/jdk-11.0.2 /usr/local/java11
|
||||
|
||||
# 添加到环境变量
|
||||
echo "export JAVA_HOME=/usr/local/java11" >> ~/.bashrc
|
||||
echo "export CLASSPATH=/usr/java/java11/lib" >> ~/.bashrc
|
||||
echo "export PATH=$JAVA_HOME/bin:$PATH:$HOME/bin" >> ~/.bashrc
|
||||
|
||||
source ~/.bashrc
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
#### 2.1.4.3、ElasticSearch 实例搭建
|
||||
|
||||
- ElasticSearch 用于存储平台采集的 Kafka 指标;
|
||||
- 以下安装示例为单节点模式,如需集群部署可以参考:[Elasticsearch 官方文档](https://www.elastic.co/guide/en/elasticsearch/reference/7.6/elasticsearch-intro.html)
|
||||
|
||||
```bash
|
||||
# 下载安装包
|
||||
wget https://s3-gzpu.didistatic.com/pub/elasticsearch.tar.gz
|
||||
|
||||
# 创建ES数据存储目录
|
||||
mkdir -p /data/es_data
|
||||
|
||||
# 创建ES所属用户
|
||||
useradd arius
|
||||
|
||||
# 配置用户的打开文件数
|
||||
echo "arius soft nofile 655350" >> /etc/security/limits.conf
|
||||
echo "arius hard nofile 655350" >> /etc/security/limits.conf
|
||||
echo "vm.max_map_count = 655360" >> /etc/sysctl.conf
|
||||
sysctl -p
|
||||
|
||||
# 解压安装包
|
||||
tar -zxf elasticsearch.tar.gz -C /data/
|
||||
|
||||
# 更改目录所属组
|
||||
chown -R arius:arius /data/
|
||||
|
||||
# 修改配置文件(参考以下配置)
|
||||
vim /data/elasticsearch/config/elasticsearch.yml
|
||||
cluster.name: km_es
|
||||
node.name: es-node1
|
||||
node.master: true
|
||||
node.data: true
|
||||
path.data: /data/es_data
|
||||
http.port: 8060
|
||||
discovery.seed_hosts: ["127.0.0.1:9300"]
|
||||
|
||||
# 修改内存配置
|
||||
vim /data/elasticsearch/config/jvm.options
|
||||
-Xms2g
|
||||
-Xmx2g
|
||||
|
||||
# 启动服务
|
||||
su - arius
|
||||
export JAVA_HOME=/usr/local/java11
|
||||
sh /data/elasticsearch/control.sh start
|
||||
|
||||
# 确认状态
|
||||
sh /data/elasticsearch/control.sh status
|
||||
```
|
||||
|
||||
|
||||
|
||||
#### 2.1.4.4、KnowStreaming 实例搭建
|
||||
|
||||
```bash
|
||||
# 下载安装包
|
||||
wget https://s3-gzpu.didistatic.com/pub/knowstreaming/KnowStreaming-3.0.0-beta.1.tar.gz
|
||||
|
||||
# 解压安装包到指定目录
|
||||
tar -zxf KnowStreaming-3.0.0-beta.1.tar.gz -C /data/
|
||||
|
||||
# 修改启动脚本并加入systemd管理
|
||||
cd /data/KnowStreaming/
|
||||
|
||||
# 创建相应的库和导入初始化数据
|
||||
mysql -uroot -pDidi_km_678 -e "create database know_streaming;"
|
||||
mysql -uroot -pDidi_km_678 know_streaming < ./init/sql/ddl-ks-km.sql
|
||||
mysql -uroot -pDidi_km_678 know_streaming < ./init/sql/ddl-logi-job.sql
|
||||
mysql -uroot -pDidi_km_678 know_streaming < ./init/sql/ddl-logi-security.sql
|
||||
mysql -uroot -pDidi_km_678 know_streaming < ./init/sql/dml-ks-km.sql
|
||||
mysql -uroot -pDidi_km_678 know_streaming < ./init/sql/dml-logi.sql
|
||||
|
||||
# 创建elasticsearch初始化数据
|
||||
sh ./bin/init_es_template.sh
|
||||
|
||||
# 修改配置文件
|
||||
vim ./conf/application.yml
|
||||
|
||||
# 监听端口
|
||||
server:
|
||||
port: 8080 # web 服务端口
|
||||
tomcat:
|
||||
accept-count: 1000
|
||||
max-connections: 10000
|
||||
|
||||
# ES地址
|
||||
es.client.address: 127.0.0.1:8060
|
||||
|
||||
# 数据库配置(一共三处地方,修改正确的mysql地址和数据库名称以及用户名密码)
|
||||
jdbc-url: jdbc:mariadb://127.0.0.1:3306/know_streaming?.....
|
||||
username: root
|
||||
password: Didi_km_678
|
||||
|
||||
# 启动服务
|
||||
cd /data/KnowStreaming/bin/
|
||||
sh startup.sh
|
||||
```
|
||||
62
docs/install_guide/源码编译打包手册.md
Normal file
@@ -0,0 +1,62 @@
|
||||

|
||||
|
||||
# `Know Streaming` 源码编译打包手册
|
||||
|
||||
## 1、环境信息
|
||||
|
||||
**系统支持**
|
||||
|
||||
`windows7+`、`Linux`、`Mac`
|
||||
|
||||
**环境依赖**
|
||||
|
||||
- Maven 3.6.3 (后端)
|
||||
- Node v12.20.0/v14.17.3 (前端)
|
||||
- Java 8+ (后端)
|
||||
- Git
|
||||
|
||||
## 2、编译打包
|
||||
|
||||
整个工程中,除了`km-console`为前端模块之外,其他模块都是后端工程相关模块。
|
||||
|
||||
因此,如果前后端合并打包,则打对整个工程进行打包;如果前端单独打包,则仅打包 `km-console` 中的代码;如果是仅需要后端打包,则在顶层 `pom.xml` 中去掉 `km-console`模块,然后进行打包。
|
||||
|
||||
具体见下面描述。
|
||||
|
||||
### 2.1、前后端合并打包
|
||||
|
||||
1. 下载源码;
|
||||
2. 进入 `KS-KM` 工程目录,执行 `mvn -Prelease-package -Dmaven.test.skip=true clean install -U` 命令;
|
||||
3. 打包命令执行完成后,会在 `km-dist/target` 目录下面生成一个 `KnowStreaming-*.tar.gz` 的安装包。
|
||||
|
||||
### 2.2、前端单独打包
|
||||
|
||||
1. 下载源码;
|
||||
2. 跳转到 [前端打包构建文档](https://github.com/didi/KnowStreaming/blob/master/km-console/README.md) 按步骤进行。打包成功后,会在 `km-rest/src/main/resources` 目录下生成名为 `templates` 的前端静态资源包;
|
||||
3. 如果上一步过程中报错,请查看 [FAQ](https://github.com/didi/KnowStreaming/blob/master/docs/user_guide/faq.md) 第 8.10 条;
|
||||
|
||||
### 2.3、后端单独打包
|
||||
|
||||
1. 下载源码;
|
||||
2. 修改顶层 `pom.xml` ,去掉其中的 `km-console` 模块,如下所示;
|
||||
|
||||
```xml
|
||||
<modules>
|
||||
<!-- <module>km-console</module>-->
|
||||
<module>km-common</module>
|
||||
<module>km-persistence</module>
|
||||
<module>km-core</module>
|
||||
<module>km-biz</module>
|
||||
<module>km-extends/km-account</module>
|
||||
<module>km-extends/km-monitor</module>
|
||||
<module>km-extends/km-license</module>
|
||||
<module>km-extends/km-rebalance</module>
|
||||
<module>km-task</module>
|
||||
<module>km-collector</module>
|
||||
<module>km-rest</module>
|
||||
<module>km-dist</module>
|
||||
</modules>
|
||||
```
|
||||
|
||||
3. 执行 `mvn -U clean package -Dmaven.test.skip=true`命令;
|
||||
4. 执行完成之后会在 `KS-KM/km-rest/target` 目录下面生成一个 `ks-km.jar` 即为 KS 的后端部署的 Jar 包,也可以执行 `mvn -Prelease-package -Dmaven.test.skip=true clean install -U` 生成的 tar 包也仅有后端服务的功能;
|
||||
526
docs/install_guide/版本升级手册.md
Normal file
@@ -0,0 +1,526 @@
|
||||
## 6.2、版本升级手册
|
||||
|
||||
注意:
|
||||
- 如果想升级至具体版本,需要将你当前版本至你期望使用版本的变更统统执行一遍,然后才能正常使用。
|
||||
- 如果中间某个版本没有升级信息,则表示该版本直接替换安装包即可从前一个版本升级至当前版本。
|
||||
|
||||
### 升级至 `master` 版本
|
||||
|
||||
暂无
|
||||
|
||||
---
|
||||
|
||||
### 升级至 `3.4.0` 版本
|
||||
|
||||
**配置变更**
|
||||
|
||||
```yaml
|
||||
# 新增的配置
|
||||
request: # 请求相关的配置
|
||||
api-call: # api调用
|
||||
timeout-unit-ms: 8000 # 超时时间,默认8000毫秒
|
||||
```
|
||||
|
||||
**SQL 变更**
|
||||
```sql
|
||||
-- 多集群管理权限2023-06-27新增
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2026', 'Connector-新增', '1593', '1', '2', 'Connector-新增', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2028', 'Connector-编辑', '1593', '1', '2', 'Connector-编辑', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2030', 'Connector-删除', '1593', '1', '2', 'Connector-删除', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2032', 'Connector-重启', '1593', '1', '2', 'Connector-重启', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2034', 'Connector-暂停&恢复', '1593', '1', '2', 'Connector-暂停&恢复', '0', 'know-streaming');
|
||||
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2026', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2028', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2030', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2032', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2034', '0', 'know-streaming');
|
||||
|
||||
|
||||
-- 多集群管理权限2023-06-29新增
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2036', 'Security-ACL新增', '1593', '1', '2', 'Security-ACL新增', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2038', 'Security-ACL删除', '1593', '1', '2', 'Security-ACL删除', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2040', 'Security-User新增', '1593', '1', '2', 'Security-User新增', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2042', 'Security-User删除', '1593', '1', '2', 'Security-User删除', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2044', 'Security-User修改密码', '1593', '1', '2', 'Security-User修改密码', '0', 'know-streaming');
|
||||
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2036', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2038', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2040', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2042', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2044', '0', 'know-streaming');
|
||||
|
||||
|
||||
-- 多集群管理权限2023-07-06新增
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2046', 'Group-删除', '1593', '1', '2', 'Group-删除', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2048', 'GroupOffset-Topic纬度删除', '1593', '1', '2', 'GroupOffset-Topic纬度删除', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2050', 'GroupOffset-Partition纬度删除', '1593', '1', '2', 'GroupOffset-Partition纬度删除', '0', 'know-streaming');
|
||||
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2046', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2048', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2050', '0', 'know-streaming');
|
||||
|
||||
|
||||
-- 多集群管理权限2023-07-18新增
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2052', 'Security-User查看密码', '1593', '1', '2', 'Security-User查看密码', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2052', '0', 'know-streaming');
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 升级至 `3.3.0` 版本
|
||||
|
||||
**SQL 变更**
|
||||
```sql
|
||||
ALTER TABLE `logi_security_user`
|
||||
CHANGE COLUMN `phone` `phone` VARCHAR(20) NOT NULL DEFAULT '' COMMENT 'mobile' ;
|
||||
|
||||
ALTER TABLE ks_kc_connector ADD `heartbeat_connector_name` varchar(512) DEFAULT '' COMMENT '心跳检测connector名称';
|
||||
ALTER TABLE ks_kc_connector ADD `checkpoint_connector_name` varchar(512) DEFAULT '' COMMENT '进度确认connector名称';
|
||||
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_MIRROR_MAKER_TOTAL_RECORD_ERRORS', '{\"value\" : 1}', 'MirrorMaker消息处理错误的次数', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_MIRROR_MAKER_REPLICATION_LATENCY_MS_MAX', '{\"value\" : 6000}', 'MirrorMaker消息复制最大延迟时间', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_MIRROR_MAKER_UNASSIGNED_TASK_COUNT', '{\"value\" : 20}', 'MirrorMaker未被分配的任务数量', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_MIRROR_MAKER_FAILED_TASK_COUNT', '{\"value\" : 10}', 'MirrorMaker失败状态的任务数量', 'admin');
|
||||
|
||||
|
||||
-- 多集群管理权限2023-01-05新增
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2012', 'Topic-新增Topic复制', '1593', '1', '2', 'Topic-新增Topic复制', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2014', 'Topic-详情-取消Topic复制', '1593', '1', '2', 'Topic-详情-取消Topic复制', '0', 'know-streaming');
|
||||
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2012', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2014', '0', 'know-streaming');
|
||||
|
||||
|
||||
-- 多集群管理权限2023-01-18新增
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2016', 'MM2-新增', '1593', '1', '2', 'MM2-新增', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2018', 'MM2-编辑', '1593', '1', '2', 'MM2-编辑', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2020', 'MM2-删除', '1593', '1', '2', 'MM2-删除', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2022', 'MM2-重启', '1593', '1', '2', 'MM2-重启', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2024', 'MM2-暂停&恢复', '1593', '1', '2', 'MM2-暂停&恢复', '0', 'know-streaming');
|
||||
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2016', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2018', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2020', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2022', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2024', '0', 'know-streaming');
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_ha_active_standby_relation`;
|
||||
CREATE TABLE `ks_ha_active_standby_relation` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`active_cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '主集群ID',
|
||||
`standby_cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '备集群ID',
|
||||
`res_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '资源名称',
|
||||
`res_type` int(11) NOT NULL DEFAULT '-1' COMMENT '资源类型,0:集群,1:镜像Topic,2:主备Topic',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_res` (`res_type`,`active_cluster_phy_id`,`standby_cluster_phy_id`,`res_name`),
|
||||
UNIQUE KEY `uniq_res_type_standby_cluster_res_name` (`res_type`,`standby_cluster_phy_id`,`res_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='HA主备关系表';
|
||||
|
||||
|
||||
-- 删除idx_cluster_phy_id 索引并新增idx_cluster_update_time索引
|
||||
ALTER TABLE `ks_km_kafka_change_record` DROP INDEX `idx_cluster_phy_id` ,
|
||||
ADD INDEX `idx_cluster_update_time` (`cluster_phy_id` ASC, `update_time` ASC);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 升级至 `3.2.0` 版本
|
||||
|
||||
**配置变更**
|
||||
|
||||
```yaml
|
||||
# 新增如下配置
|
||||
|
||||
spring:
|
||||
logi-job: # know-streaming 依赖的 logi-job 模块的数据库的配置,默认与 know-streaming 的数据库配置保持一致即可
|
||||
enable: true # true表示开启job任务, false表关闭。KS在部署上可以考虑部署两套服务,一套处理前端请求,一套执行job任务,此时可以通过该字段进行控制
|
||||
|
||||
# 线程池大小相关配置
|
||||
thread-pool:
|
||||
es:
|
||||
search: # es查询线程池
|
||||
thread-num: 20 # 线程池大小
|
||||
queue-size: 10000 # 队列大小
|
||||
|
||||
# 客户端池大小相关配置
|
||||
client-pool:
|
||||
kafka-admin:
|
||||
client-cnt: 1 # 每个Kafka集群创建的KafkaAdminClient数
|
||||
|
||||
# ES客户端配置
|
||||
es:
|
||||
index:
|
||||
expire: 15 # 索引过期天数,15表示超过15天的索引会被KS过期删除
|
||||
```
|
||||
|
||||
**SQL 变更**
|
||||
```sql
|
||||
DROP TABLE IF EXISTS `ks_kc_connect_cluster`;
|
||||
CREATE TABLE `ks_kc_connect_cluster` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'Connect集群ID',
|
||||
`kafka_cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Kafka集群ID',
|
||||
`name` varchar(128) NOT NULL DEFAULT '' COMMENT '集群名称',
|
||||
`group_name` varchar(128) NOT NULL DEFAULT '' COMMENT '集群Group名称',
|
||||
`cluster_url` varchar(1024) NOT NULL DEFAULT '' COMMENT '集群地址',
|
||||
`member_leader_url` varchar(1024) NOT NULL DEFAULT '' COMMENT 'URL地址',
|
||||
`version` varchar(64) NOT NULL DEFAULT '' COMMENT 'connect版本',
|
||||
`jmx_properties` text COMMENT 'JMX配置',
|
||||
`state` tinyint(4) NOT NULL DEFAULT '1' COMMENT '集群使用的消费组状态,也表示集群状态:-1 Unknown,0 ReBalance,1 Active,2 Dead,3 Empty',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '接入时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_id_group_name` (`id`,`group_name`),
|
||||
UNIQUE KEY `uniq_name_kafka_cluster` (`name`,`kafka_cluster_phy_id`),
|
||||
KEY `idx_kafka_cluster_phy_id` (`kafka_cluster_phy_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Connect集群信息表';
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_kc_connector`;
|
||||
CREATE TABLE `ks_kc_connector` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`kafka_cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Kafka集群ID',
|
||||
`connect_cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Connect集群ID',
|
||||
`connector_name` varchar(512) NOT NULL DEFAULT '' COMMENT 'Connector名称',
|
||||
`connector_class_name` varchar(512) NOT NULL DEFAULT '' COMMENT 'Connector类',
|
||||
`connector_type` varchar(32) NOT NULL DEFAULT '' COMMENT 'Connector类型',
|
||||
`state` varchar(45) NOT NULL DEFAULT '' COMMENT '状态',
|
||||
`topics` text COMMENT '访问过的Topics',
|
||||
`task_count` int(11) NOT NULL DEFAULT '0' COMMENT '任务数',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_connect_cluster_id_connector_name` (`connect_cluster_id`,`connector_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Connector信息表';
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_kc_worker`;
|
||||
CREATE TABLE `ks_kc_worker` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`kafka_cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Kafka集群ID',
|
||||
`connect_cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Connect集群ID',
|
||||
`member_id` varchar(512) NOT NULL DEFAULT '' COMMENT '成员ID',
|
||||
`host` varchar(128) NOT NULL DEFAULT '' COMMENT '主机名',
|
||||
`jmx_port` int(16) NOT NULL DEFAULT '-1' COMMENT 'Jmx端口',
|
||||
`url` varchar(1024) NOT NULL DEFAULT '' COMMENT 'URL信息',
|
||||
`leader_url` varchar(1024) NOT NULL DEFAULT '' COMMENT 'leaderURL信息',
|
||||
`leader` int(16) NOT NULL DEFAULT '0' COMMENT '状态: 1是leader,0不是leader',
|
||||
`worker_id` varchar(128) NOT NULL COMMENT 'worker地址',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_id_member_id` (`connect_cluster_id`,`member_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='worker信息表';
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_kc_worker_connector`;
|
||||
CREATE TABLE `ks_kc_worker_connector` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`kafka_cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Kafka集群ID',
|
||||
`connect_cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Connect集群ID',
|
||||
`connector_name` varchar(512) NOT NULL DEFAULT '' COMMENT 'Connector名称',
|
||||
`worker_member_id` varchar(256) NOT NULL DEFAULT '',
|
||||
`task_id` int(16) NOT NULL DEFAULT '-1' COMMENT 'Task的ID',
|
||||
`state` varchar(128) DEFAULT NULL COMMENT '任务状态',
|
||||
`worker_id` varchar(128) DEFAULT NULL COMMENT 'worker信息',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_relation` (`connect_cluster_id`,`connector_name`,`task_id`,`worker_member_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Worker和Connector关系表';
|
||||
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_CONNECTOR_FAILED_TASK_COUNT', '{\"value\" : 1}', 'connector失败状态的任务数量', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_CONNECTOR_UNASSIGNED_TASK_COUNT', '{\"value\" : 1}', 'connector未被分配的任务数量', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_CONNECT_CLUSTER_TASK_STARTUP_FAILURE_PERCENTAGE', '{\"value\" : 0.05}', 'Connect集群任务启动失败概率', 'admin');
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 升级至 `v3.1.0` 版本
|
||||
|
||||
```sql
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_BRAIN_SPLIT', '{ \"value\": 1} ', 'ZK 脑裂', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_OUTSTANDING_REQUESTS', '{ \"amount\": 100, \"ratio\":0.8} ', 'ZK Outstanding 请求堆积数', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_WATCH_COUNT', '{ \"amount\": 100000, \"ratio\": 0.8 } ', 'ZK WatchCount 数', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_ALIVE_CONNECTIONS', '{ \"amount\": 10000, \"ratio\": 0.8 } ', 'ZK 连接数', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_APPROXIMATE_DATA_SIZE', '{ \"amount\": 524288000, \"ratio\": 0.8 } ', 'ZK 数据大小(Byte)', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_SENT_RATE', '{ \"amount\": 500000, \"ratio\": 0.8 } ', 'ZK 发包数', 'admin');
|
||||
|
||||
```
|
||||
|
||||
### 升级至 `v3.0.1` 版本
|
||||
|
||||
**ES 索引模版**
|
||||
```bash
|
||||
# 新增 ks_kafka_zookeeper_metric 索引模版。
|
||||
# 可通过再次执行 bin/init_es_template.sh 脚本,创建该索引模版。
|
||||
|
||||
# 索引模版内容
|
||||
PUT _template/ks_kafka_zookeeper_metric
|
||||
{
|
||||
"order" : 10,
|
||||
"index_patterns" : [
|
||||
"ks_kafka_zookeeper_metric*"
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "10"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
"properties" : {
|
||||
"routingValue" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"clusterPhyId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"metrics" : {
|
||||
"properties" : {
|
||||
"AvgRequestLatency" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"MinRequestLatency" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"MaxRequestLatency" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"OutstandingRequests" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"NodeCount" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"WatchCount" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"NumAliveConnections" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"PacketsReceived" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"PacketsSent" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"EphemeralsCount" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"ApproximateDataSize" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"OpenFileDescriptorCount" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"MaxFileDescriptorCount" : {
|
||||
"type" : "double"
|
||||
}
|
||||
}
|
||||
},
|
||||
"key" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"timestamp" : {
|
||||
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
|
||||
"type" : "date"
|
||||
}
|
||||
}
|
||||
},
|
||||
"aliases" : { }
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
**SQL 变更**
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS `ks_km_zookeeper`;
|
||||
CREATE TABLE `ks_km_zookeeper` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '物理集群ID',
|
||||
`host` varchar(128) NOT NULL DEFAULT '' COMMENT 'zookeeper主机名',
|
||||
`port` int(16) NOT NULL DEFAULT '-1' COMMENT 'zookeeper端口',
|
||||
`role` varchar(16) NOT NULL DEFAULT '' COMMENT '角色, leader follower observer',
|
||||
`version` varchar(128) NOT NULL DEFAULT '' COMMENT 'zookeeper版本',
|
||||
`status` int(16) NOT NULL DEFAULT '0' COMMENT '状态: 1存活,0未存活,11存活但是4字命令使用不了',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_phy_id_host_port` (`cluster_phy_id`,`host`, `port`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Zookeeper信息表';
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_group`;
|
||||
CREATE TABLE `ks_km_group` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
|
||||
`name` varchar(192) COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'Group名称',
|
||||
`member_count` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '成员数',
|
||||
`topic_members` text CHARACTER SET utf8 COMMENT 'group消费的topic列表',
|
||||
`partition_assignor` varchar(255) CHARACTER SET utf8 NOT NULL COMMENT '分配策略',
|
||||
`coordinator_id` int(11) NOT NULL COMMENT 'group协调器brokerId',
|
||||
`type` int(11) NOT NULL COMMENT 'group类型 0:consumer 1:connector',
|
||||
`state` varchar(64) CHARACTER SET utf8 NOT NULL DEFAULT '' COMMENT '状态',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_phy_id_name` (`cluster_phy_id`,`name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Group信息表';
|
||||
|
||||
```
|
||||
|
||||
|
||||
### 升级至 `v3.0.0` 版本
|
||||
|
||||
**SQL 变更**
|
||||
|
||||
```sql
|
||||
ALTER TABLE `ks_km_physical_cluster`
|
||||
ADD COLUMN `zk_properties` TEXT NULL COMMENT 'ZK配置' AFTER `jmx_properties`;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
|
||||
### 升级至 `v3.0.0-beta.2`版本
|
||||
|
||||
**配置变更**
|
||||
|
||||
```yaml
|
||||
|
||||
# 新增配置
|
||||
spring:
|
||||
logi-security: # know-streaming 依赖的 logi-security 模块的数据库的配置,默认与 know-streaming 的数据库配置保持一致即可
|
||||
login-extend-bean-name: logiSecurityDefaultLoginExtendImpl # 使用的登录系统Service的Bean名称,无需修改
|
||||
|
||||
# 线程池大小相关配置,在task模块中,新增了三类线程池,
|
||||
# 从而减少不同类型任务之间的相互影响,以及减少对logi-job内的线程池的影响
|
||||
thread-pool:
|
||||
task: # 任务模块的配置
|
||||
metrics: # metrics采集任务配置
|
||||
thread-num: 18 # metrics采集任务线程池核心线程数
|
||||
queue-size: 180 # metrics采集任务线程池队列大小
|
||||
metadata: # metadata同步任务配置
|
||||
thread-num: 27 # metadata同步任务线程池核心线程数
|
||||
queue-size: 270 # metadata同步任务线程池队列大小
|
||||
common: # 剩余其他任务配置
|
||||
thread-num: 15 # 剩余其他任务线程池核心线程数
|
||||
queue-size: 150 # 剩余其他任务线程池队列大小
|
||||
|
||||
# 删除配置,下列配置将不再使用
|
||||
thread-pool:
|
||||
task: # 任务模块的配置
|
||||
heaven: # 采集任务配置
|
||||
thread-num: 20 # 采集任务线程池核心线程数
|
||||
queue-size: 1000 # 采集任务线程池队列大小
|
||||
|
||||
```
|
||||
|
||||
**SQL 变更**
|
||||
|
||||
```sql
|
||||
-- 多集群管理权限2022-09-06新增
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2000', '多集群管理查看', '1593', '1', '2', '多集群管理查看', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2002', 'Topic-迁移副本', '1593', '1', '2', 'Topic-迁移副本', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2004', 'Topic-扩缩副本', '1593', '1', '2', 'Topic-扩缩副本', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2006', 'Cluster-LoadReBalance-周期均衡', '1593', '1', '2', 'Cluster-LoadReBalance-周期均衡', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2008', 'Cluster-LoadReBalance-立即均衡', '1593', '1', '2', 'Cluster-LoadReBalance-立即均衡', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2010', 'Cluster-LoadReBalance-设置集群规格', '1593', '1', '2', 'Cluster-LoadReBalance-设置集群规格', '0', 'know-streaming');
|
||||
|
||||
|
||||
-- 系统管理权限2022-09-06新增
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('3000', '系统管理查看', '1595', '1', '2', '系统管理查看', '0', 'know-streaming');
|
||||
|
||||
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2000', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2002', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2004', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2006', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2008', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2010', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '3000', '0', 'know-streaming');
|
||||
|
||||
-- 修改字段长度
|
||||
ALTER TABLE `logi_security_oplog`
|
||||
CHANGE COLUMN `operator_ip` `operator_ip` VARCHAR(64) NOT NULL COMMENT '操作者ip' ,
|
||||
CHANGE COLUMN `operator` `operator` VARCHAR(64) NULL DEFAULT NULL COMMENT '操作者账号' ,
|
||||
CHANGE COLUMN `operate_page` `operate_page` VARCHAR(64) NOT NULL DEFAULT '' COMMENT '操作页面' ,
|
||||
CHANGE COLUMN `operate_type` `operate_type` VARCHAR(64) NOT NULL COMMENT '操作类型' ,
|
||||
CHANGE COLUMN `target_type` `target_type` VARCHAR(64) NOT NULL COMMENT '对象分类' ,
|
||||
CHANGE COLUMN `target` `target` VARCHAR(1024) NOT NULL COMMENT '操作对象' ,
|
||||
CHANGE COLUMN `operation_methods` `operation_methods` VARCHAR(64) NOT NULL DEFAULT '' COMMENT '操作方式' ;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 升级至 `v3.0.0-beta.1`版本
|
||||
|
||||
**SQL 变更**
|
||||
|
||||
1、在`ks_km_broker`表增加了一个监听信息字段。
|
||||
2、为`logi_security_oplog`表 operation_methods 字段设置默认值''。
|
||||
因此需要执行下面的 sql 对数据库表进行更新。
|
||||
|
||||
```sql
|
||||
ALTER TABLE `ks_km_broker`
|
||||
ADD COLUMN `endpoint_map` VARCHAR(1024) NOT NULL DEFAULT '' COMMENT '监听信息' AFTER `update_time`;
|
||||
|
||||
ALTER TABLE `logi_security_oplog`
|
||||
ALTER COLUMN `operation_methods` set default '';
|
||||
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### `2.x`版本 升级至 `v3.0.0-beta.0`版本
|
||||
|
||||
**升级步骤:**
|
||||
|
||||
1. 依旧使用**`2.x 版本的 DB`**,在上面初始化 3.0.0 版本所需数据库表结构及数据;
|
||||
2. 将 2.x 版本中的集群,在 3.0.0 版本,手动逐一接入;
|
||||
3. 将 Topic 业务数据,迁移至 3.0.0 表中,详见下方 SQL;
|
||||
|
||||
**注意事项**
|
||||
|
||||
- 建议升级 3.0.0 版本过程中,保留 2.x 版本的使用,待 3.0.0 版本稳定使用后,再下线 2.x 版本;
|
||||
- 3.0.0 版本仅需要`集群信息`及`Topic的描述信息`。2.x 版本的 DB 的其他数据 3.0.0 版本都不需要;
|
||||
- 部署 3.0.0 版本之后,集群、Topic 等指标数据都为空,3.0.0 版本会周期进行采集,运行一段时间之后就会有该数据了,因此不会将 2.x 中的指标数据进行迁移;
|
||||
|
||||
**迁移数据**
|
||||
|
||||
```sql
|
||||
-- 迁移Topic的备注信息。
|
||||
-- 需在 3.0.0 部署完成后,再执行该SQL。
|
||||
-- 考虑到 2.x 版本中还存在增量数据,因此建议改SQL周期执行,是的增量数据也能被迁移至 3.0.0 版本中。
|
||||
|
||||
UPDATE ks_km_topic
|
||||
INNER JOIN
|
||||
(SELECT
|
||||
topic.cluster_id AS cluster_id,
|
||||
topic.topic_name AS topic_name,
|
||||
topic.description AS description
|
||||
FROM topic WHERE description != ''
|
||||
) AS t
|
||||
|
||||
ON ks_km_topic.cluster_phy_id = t.cluster_id
|
||||
AND ks_km_topic.topic_name = t.topic_name
|
||||
AND ks_km_topic.id > 0
|
||||
SET ks_km_topic.description = t.description;
|
||||
```
|
||||
@@ -1,39 +0,0 @@
|
||||
|
||||
---
|
||||
|
||||

|
||||
|
||||
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
||||
|
||||
---
|
||||
|
||||
# 集群接入
|
||||
|
||||
集群的接入总共需要三个步骤,分别是:
|
||||
1. 接入物理集群
|
||||
2. 创建Region
|
||||
3. 创建逻辑集群
|
||||
|
||||
备注:接入集群需要2、3两步是因为普通用户的视角下,看到的都是逻辑集群,如果没有2、3两步,那么普通用户看不到任何信息。
|
||||
|
||||
|
||||
## 1、接入物理集群
|
||||
|
||||

|
||||
|
||||
如上图所示,填写集群信息,然后点击确定,即可完成集群的接入。因为考虑到分布式部署,添加集群之后,需要稍等**`1分钟`**才可以在界面上看到集群的详细信息。
|
||||
|
||||
## 2、创建Region
|
||||
|
||||

|
||||
|
||||
如上图所示,填写Region信息,然后点击确定,即可完成Region的创建。
|
||||
|
||||
备注:Region即为Broker的集合,可以按照业务需要,将Broker归类,从而创建相应的Region。
|
||||
|
||||
## 3、创建逻辑集群
|
||||
|
||||

|
||||
|
||||
|
||||
如上图所示,填写逻辑集群信息,然后点击确定,即可完成逻辑集群的创建。
|
||||
|
Before Width: | Height: | Size: 261 KiB |
|
Before Width: | Height: | Size: 240 KiB |
|
Before Width: | Height: | Size: 195 KiB |
@@ -1,166 +0,0 @@
|
||||
|
||||
---
|
||||
|
||||

|
||||
|
||||
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
||||
|
||||
---
|
||||
|
||||
# kafka-manager 使用手册
|
||||
|
||||
管控平台主要有两种用户视角,分别为:
|
||||
|
||||
- 普通用户:站在使用Kafka的角度使用kafka-manager;
|
||||
- 管理员:站在使用与管理Kafka的角度在使用kafka-manager;
|
||||
|
||||
下面我们将从这两个用户的维度说明平台的功能及使用。
|
||||
|
||||
---
|
||||
|
||||
## 1. 普通用户篇
|
||||
|
||||
### 1.1 帐号获取及登录
|
||||
|
||||
- 询问管理员让其提供普通用户的帐号;
|
||||
- 输入帐号及密码,登录kafka-manager;
|
||||
|
||||
---
|
||||
|
||||
### 1.2 Topic申请
|
||||
- 步骤一:点击"Topic申请"按钮申请Topic;
|
||||
- 步骤二:填写申请信息;
|
||||
- 步骤三:等待运维人员或管理员审批;
|
||||
|
||||
**Topic申请完成:**
|
||||

|
||||
|
||||
---
|
||||
|
||||
### 1.3 Topic信息查看
|
||||
|
||||
普通用户可查看的信息包括:
|
||||
|
||||
- 集群Topic列表及我收藏的Topic列表;
|
||||
- Topic基本信息(Topic创建及修改时间、Topic数据保存时间、Topic负责人等);
|
||||
- Topic分区信息;
|
||||
- Topic消费组信息及消费组消费详情;
|
||||
- Topic实时&历史流量信息;
|
||||
- Topic数据采样;
|
||||
|
||||
**Topic详情信息界面:**
|
||||

|
||||
|
||||
---
|
||||
|
||||
### 1.4 Topic运维
|
||||
|
||||
普通用户可进行的Topic运维的操作包括:
|
||||
- 申请Topic扩容
|
||||
- 重置消费偏移;
|
||||
|
||||
**Topic重置消费偏移界面:**
|
||||

|
||||
|
||||
---
|
||||
|
||||
### 1.5 告警配置
|
||||
|
||||
kafka-manager告警配置中,仅支持Lag/BytesIn/BytesOut这三类告警,同时告警被触发后,告警消息会被发往指定的Topic(具体哪一个请联系管理员获取)。需要用户主动消费该告警Topic的数据或者统一由管理员将该数据接入外部通知系统,比如接入短信通知或电话通知等。
|
||||
|
||||
**告警规则配置界面:**
|
||||

|
||||
|
||||
---
|
||||
|
||||
### 1.6 密码修改
|
||||
|
||||
**密码修改界面:**
|
||||

|
||||
|
||||
---
|
||||
|
||||
## 2. 管理员篇
|
||||
|
||||
|
||||
### 2.1 帐号获取及登录
|
||||
|
||||
- 默认的管理员帐号密码为`admin/admin`(详见数据库account表);
|
||||
|
||||
---
|
||||
|
||||
### 2.2 添加集群
|
||||
|
||||
登录之后,就需要将我们搭建的Kafka集群添加到kafka-manager中。
|
||||
|
||||
**添加Kafka集群界面:**
|
||||

|
||||
|
||||
---
|
||||
|
||||
### 2.3 监控指标
|
||||
|
||||
#### 2.3.1 集群维度指标
|
||||
|
||||
- 集群的基本信息;
|
||||
- 集群历史及实时流量信息;
|
||||
- 集群Topic信息;
|
||||
- 集群Broker信息;
|
||||
- 集群ConsumerGroup信息;
|
||||
- 集群Region信息;
|
||||
- 集群当前Controller及变更历史;
|
||||
|
||||
**集群维度监控指标界面:**
|
||||

|
||||
|
||||
---
|
||||
|
||||
#### 2.3.2 Broker维度指标
|
||||
|
||||
- Broker基本信息;
|
||||
- Broker历史与实时流量信息;
|
||||
- Broker内Topic信息;
|
||||
- Broker内分区信息;
|
||||
- Broker关键指标(日志刷盘时间等);
|
||||
- Topic分析(Topic流量占比等);
|
||||
|
||||
**`Broker`维度监控指标界面:**
|
||||

|
||||
|
||||
---
|
||||
|
||||
#### 2.3.3 Topic维度指标
|
||||
|
||||
- 在普通用户的基础上,增加展示Topic的Broker信息;
|
||||
|
||||
图略
|
||||
|
||||
---
|
||||
|
||||
#### 2.3.4 其他维度指标
|
||||
|
||||
- 消费组消费哪些具体的Topic;
|
||||
|
||||
图略
|
||||
|
||||
---
|
||||
|
||||
### 2.4 集群运维管控
|
||||
|
||||
- Topic申请及扩容工单审批;
|
||||
- Topic创建、删除、扩容及属性修改;
|
||||
- Broker维度优先副本选举;
|
||||
- 分区粒度迁移;
|
||||
- 逻辑Region管理;
|
||||
|
||||
**资源审批界面:**
|
||||

|
||||
|
||||
---
|
||||
|
||||
### 2.5 用户管理
|
||||
|
||||
- 对用户进行增删改查;
|
||||
|
||||
**用户管理界面:**
|
||||

|
||||
321
docs/user_guide/faq.md
Normal file
@@ -0,0 +1,321 @@
|
||||
|
||||

|
||||
|
||||
# FAQ
|
||||
|
||||
- [FAQ](#faq)
|
||||
- [1、支持哪些 Kafka 版本?](#1支持哪些-kafka-版本)
|
||||
- [1、2.x 版本和 3.0 版本有什么差异?](#12x-版本和-30-版本有什么差异)
|
||||
- [3、页面流量信息等无数据?](#3页面流量信息等无数据)
|
||||
- [4、`Jmx`连接失败如何解决?](#4jmx连接失败如何解决)
|
||||
- [5、有没有 API 文档?](#5有没有-api-文档)
|
||||
- [6、删除 Topic 成功后,为何过段时间又出现了?](#6删除-topic-成功后为何过段时间又出现了)
|
||||
- [7、如何在不登录的情况下,调用接口?](#7如何在不登录的情况下调用接口)
|
||||
- [8、Specified key was too long; max key length is 767 bytes](#8specified-key-was-too-long-max-key-length-is-767-bytes)
|
||||
- [9、出现 ESIndexNotFoundEXception 报错](#9出现-esindexnotfoundexception-报错)
|
||||
- [10、km-console 打包构建失败](#10km-console-打包构建失败)
|
||||
- [11、在 `km-console` 目录下执行 `npm run start` 时看不到应用构建和热加载过程?如何启动单个应用?](#11在-km-console-目录下执行-npm-run-start-时看不到应用构建和热加载过程如何启动单个应用)
|
||||
- [12、权限识别失败问题](#12权限识别失败问题)
|
||||
- [13、接入开启kerberos认证的kafka集群](#13接入开启kerberos认证的kafka集群)
|
||||
- [14、对接Ldap的配置](#14对接ldap的配置)
|
||||
- [15、测试时使用Testcontainers的说明](#15测试时使用testcontainers的说明)
|
||||
- [16、JMX连接失败怎么办](#16jmx连接失败怎么办)
|
||||
- [17、zk监控无数据问题](#17zk监控无数据问题)
|
||||
- [18、启动失败,报NoClassDefFoundError如何解决](#18启动失败报noclassdeffounderror如何解决)
|
||||
- [19、依赖ElasticSearch 8.0以上版本部署后指标信息无法正常显示如何解决]
|
||||
|
||||
## 1、支持哪些 Kafka 版本?
|
||||
|
||||
- 支持 0.10+ 的 Kafka 版本;
|
||||
- 支持 ZK 及 Raft 运行模式的 Kafka 版本;
|
||||
|
||||
|
||||
|
||||
## 1、2.x 版本和 3.0 版本有什么差异?
|
||||
|
||||
**全新设计理念**
|
||||
|
||||
- 在 0 侵入、0 门槛的前提下提供直观 GUI 用于管理和观测 Apache Kafka®,帮助用户降低 Kafka CLI 操作门槛,轻松实现对原生 Kafka 集群的可管、可见、可掌控,提升 Kafka 使用体验和降低管理成本。
|
||||
- 支持海量集群一键接入,无需任何改造,即可实现集群深度纳管,真正的 0 侵入、插件化系统设计,覆盖 0.10.x-3.x.x 众多 Kafka 版本无缝纳管。
|
||||
|
||||
**开源协议调整**
|
||||
|
||||
- 3.x:AGPL 3.0
|
||||
- 2.x:Apache License 2.0
|
||||
|
||||
更多具体内容见:[新旧版本对比](https://doc.knowstreaming.com/product/9-attachment#92%E6%96%B0%E6%97%A7%E7%89%88%E6%9C%AC%E5%AF%B9%E6%AF%94)
|
||||
|
||||
|
||||
|
||||
## 3、页面流量信息等无数据?
|
||||
|
||||
- 1、`Broker JMX`未正确开启
|
||||
|
||||
可以参看:[Jmx 连接配置&问题解决](https://doc.knowstreaming.com/product/9-attachment#91jmx-%E8%BF%9E%E6%8E%A5%E5%A4%B1%E8%B4%A5%E9%97%AE%E9%A2%98%E8%A7%A3%E5%86%B3)
|
||||
|
||||
- 2、`ES` 存在问题
|
||||
|
||||
建议使用`ES 7.6`版本,同时创建近 7 天的索引,具体见:[快速开始](./1-quick-start.md) 中的 ES 索引模版及索引创建。
|
||||
|
||||
|
||||
|
||||
## 4、`Jmx`连接失败如何解决?
|
||||
|
||||
- 参看 [Jmx 连接配置&问题解决](https://doc.knowstreaming.com/product/9-attachment#91jmx-%E8%BF%9E%E6%8E%A5%E5%A4%B1%E8%B4%A5%E9%97%AE%E9%A2%98%E8%A7%A3%E5%86%B3) 说明。
|
||||
|
||||
|
||||
|
||||
## 5、有没有 API 文档?
|
||||
|
||||
`KnowStreaming` 采用 Swagger 进行 API 说明,在启动 KnowStreaming 服务之后,就可以从下面地址看到。
|
||||
|
||||
Swagger-API 地址: [http://IP:PORT/swagger-ui.html#/](http://IP:PORT/swagger-ui.html#/)
|
||||
|
||||
|
||||
|
||||
## 6、删除 Topic 成功后,为何过段时间又出现了?
|
||||
|
||||
**原因说明:**
|
||||
|
||||
`KnowStreaming` 会去请求 Topic 的 endoffset 信息,要获取这个信息就需要发送 metadata 请求,发送 metadata 请求的时候,如果集群允许自动创建 Topic,那么当 Topic 不存在时,就会自动将该 Topic 创建出来。
|
||||
|
||||
**问题解决:**
|
||||
|
||||
因为在 `KnowStreaming` 上,禁止 Kafka 客户端内部元信息获取这个动作非常的难做到,因此短时间内这个问题不好从 `KnowStreaming` 上解决。
|
||||
|
||||
当然,对于不存在的 Topic,`KnowStreaming` 是不会进行元信息请求的,因此也不用担心会莫名其妙的创建一个 Topic 出来。
|
||||
|
||||
但是,另外一点,对于开启允许 Topic 自动创建的集群,建议是关闭该功能,开启是非常危险的,如果关闭之后,`KnowStreaming` 也不会有这个问题。
|
||||
|
||||
最后这里举个开启这个配置后,非常危险的代码例子吧:
|
||||
|
||||
```java
|
||||
for (int i= 0; i < 100000; ++i) {
|
||||
// 如果是客户端类似这样写的,那么一启动,那么将创建10万个Topic出来,集群元信息瞬间爆炸,controller可能就不可服务了。
|
||||
producer.send(new ProducerRecord<String, String>("know_streaming" + i,"hello logi_km"));
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
## 7、如何在不登录的情况下,调用接口?
|
||||
|
||||
步骤一:接口调用时,在 header 中,增加如下信息:
|
||||
|
||||
```shell
|
||||
# 表示开启登录绕过
|
||||
Trick-Login-Switch : on
|
||||
|
||||
# 登录绕过的用户, 这里可以是admin, 或者是其他的, 但是必须在系统管理->用户管理中设置了该用户。
|
||||
Trick-Login-User : admin
|
||||
```
|
||||
|
||||
|
||||
|
||||
步骤二:点击右上角"系统管理",选择配置管理,在页面中添加以下键值对。
|
||||
|
||||
```shell
|
||||
# 模块选择
|
||||
SECURITY.LOGIN
|
||||
|
||||
# 设置的配置键,必须是这个
|
||||
SECURITY.TRICK_USERS
|
||||
|
||||
# 设置的value,是json数组的格式,包含步骤一header中设置的用户名,例如
|
||||
[ "admin", "logi"]
|
||||
```
|
||||
|
||||
|
||||
|
||||
步骤三:解释说明
|
||||
|
||||
设置完成上面两步之后,就可以直接调用需要登录的接口了。
|
||||
|
||||
但是还有一点需要注意,绕过的用户仅能调用他有权限的接口,比如一个普通用户,那么他就只能调用普通的接口,不能去调用运维人员的接口。
|
||||
|
||||
## 8、Specified key was too long; max key length is 767 bytes
|
||||
|
||||
**原因:** 不同版本的 InoDB 引擎,参数‘innodb_large_prefix’默认值不同,即在 5.6 默认值为 OFF,5.7 默认值为 ON。
|
||||
|
||||
对于引擎为 InnoDB,innodb_large_prefix=OFF,且行格式为 Antelope 即支持 REDUNDANT 或 COMPACT 时,索引键前缀长度最大为 767 字节。innodb_large_prefix=ON,且行格式为 Barracuda 即支持 DYNAMIC 或 COMPRESSED 时,索引键前缀长度最大为 3072 字节。
|
||||
|
||||
**解决方案:**
|
||||
|
||||
- 减少 varchar 字符大小低于 767/4=191。
|
||||
- 将字符集改为 latin1(一个字符=一个字节)。
|
||||
- 开启‘innodb_large_prefix’,修改默认行格式‘innodb_file_format’为 Barracuda,并设置 row_format=dynamic。
|
||||
|
||||
## 9、出现 ESIndexNotFoundEXception 报错
|
||||
|
||||
**原因 :**没有创建 ES 索引模版
|
||||
|
||||
**解决方案:**执行 init_es_template.sh 脚本,创建 ES 索引模版即可。
|
||||
|
||||
## 10、km-console 打包构建失败
|
||||
|
||||
首先,**请确保您正在使用最新版本**,版本列表见 [Tags](https://github.com/didi/KnowStreaming/tags)。如果不是最新版本,请升级后再尝试有无问题。
|
||||
|
||||
常见的原因是由于工程依赖没有正常安装,导致在打包过程中缺少依赖,造成打包失败。您可以检查是否有以下文件夹,且文件夹内是否有内容
|
||||
|
||||
```
|
||||
KnowStreaming/km-console/node_modules
|
||||
KnowStreaming/km-console/packages/layout-clusters-fe/node_modules
|
||||
KnowStreaming/km-console/packages/config-manager-fe/node_modules
|
||||
```
|
||||
|
||||
如果发现没有对应的 `node_modules` 目录或着目录内容为空,说明依赖没有安装成功。请按以下步骤操作,
|
||||
|
||||
1. 手动删除上述三个文件夹(如果有)
|
||||
|
||||
2. 如果之前是通过 `mvn install` 打包 `km-console`,请到项目根目录(KnowStreaming)下重新输入该指令进行打包。观察打包过程有无报错。如有报错,请见步骤 4。
|
||||
|
||||
3. 如果是通过本地独立构建前端工程的方式(指直接执行 `npm run build`),请进入 `KnowStreaming/km-console` 目录,执行下述步骤(注意:执行时请确保您在使用 `node v12` 版本)
|
||||
|
||||
a. 执行 `npm run i`。如有报错,请见步骤 4。
|
||||
|
||||
b. 执行 `npm run build`。如有报错,请见步骤 4。
|
||||
|
||||
4. 麻烦联系我们协助解决。推荐提供以下信息,方面我们快速定位问题,示例如下。
|
||||
|
||||
```
|
||||
操作系统: Mac
|
||||
命令行终端:bash
|
||||
Node 版本: v12.22.12
|
||||
复现步骤: 1. -> 2.
|
||||
错误截图:
|
||||
```
|
||||
|
||||
## 11、在 `km-console` 目录下执行 `npm run start` 时看不到应用构建和热加载过程?如何启动单个应用?
|
||||
|
||||
需要到具体的应用中执行 `npm run start`,例如 `cd packages/layout-clusters-fe` 后,执行 `npm run start`。
|
||||
|
||||
应用启动后需要到基座应用中查看(需要启动基座应用,即 layout-clusters-fe)。
|
||||
|
||||
|
||||
## 12、权限识别失败问题
|
||||
1、使用admin账号登陆KnowStreaming时,点击系统管理-用户管理-角色管理-新增角色,查看页面是否正常。
|
||||
|
||||
<img src="http://img-ys011.didistatic.com/static/dc2img/do1_gwGfjN9N92UxzHU8dfzr" width = "400" >
|
||||
|
||||
2、查看'/logi-security/api/v1/permission/tree'接口返回值,出现如下图所示乱码现象。
|
||||

|
||||
|
||||
3、查看logi_security_permission表,看看是否出现了中文乱码现象。
|
||||
|
||||
根据以上几点,我们可以确定是由于数据库乱码造成的权限识别失败问题。
|
||||
|
||||
+ 原因:由于数据库编码和我们提供的脚本不一致,数据库里的数据发生了乱码,因此出现权限识别失败问题。
|
||||
+ 解决方案:清空数据库数据,将数据库字符集调整为utf8,最后重新执行[dml-logi.sql](https://github.com/didi/KnowStreaming/blob/master/km-dist/init/sql/dml-logi.sql)脚本导入数据即可。
|
||||
|
||||
|
||||
## 13、接入开启kerberos认证的kafka集群
|
||||
|
||||
1. 部署KnowStreaming的机器上安装krb客户端;
|
||||
2. 替换/etc/krb5.conf配置文件;
|
||||
3. 把kafka对应的keytab复制到改机器目录下;
|
||||
4. 接入集群时认证配置,配置信息根据实际情况填写;
|
||||
```json
|
||||
{
|
||||
"security.protocol": "SASL_PLAINTEXT",
|
||||
"sasl.mechanism": "GSSAPI",
|
||||
"sasl.jaas.config": "com.sun.security.auth.module.Krb5LoginModule required useKeyTab=true keyTab=\"/etc/keytab/kafka.keytab\" storeKey=true useTicketCache=false principal=\"kafka/kafka@TEST.COM\";",
|
||||
"sasl.kerberos.service.name": "kafka"
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## 14、对接Ldap的配置
|
||||
|
||||
```yaml
|
||||
# 需要在application.yml中增加如下配置。相关配置的信息,按实际情况进行调整
|
||||
account:
|
||||
ldap:
|
||||
url: ldap://127.0.0.1:8080/
|
||||
basedn: DC=senz,DC=local
|
||||
factory: com.sun.jndi.ldap.LdapCtxFactory
|
||||
filter: sAMAccountName
|
||||
security:
|
||||
authentication: simple
|
||||
principal: CN=search,DC=senz,DC=local
|
||||
credentials: xxxxxxx
|
||||
auth-user-registration: false # 是否注册到mysql,默认false
|
||||
auth-user-registration-role: 1677 # 1677是超级管理员角色的id,如果赋予想默认赋予普通角色,可以到ks新建一个。
|
||||
|
||||
# 需要在application.yml中修改如下配置
|
||||
spring:
|
||||
logi-security:
|
||||
login-extend-bean-name: ksLdapLoginService # 表示使用ldap的service
|
||||
```
|
||||
|
||||
## 15、测试时使用Testcontainers的说明
|
||||
|
||||
1. 需要docker运行环境 [Testcontainers运行环境说明](https://www.testcontainers.org/supported_docker_environment/)
|
||||
2. 如果本机没有docker,可以使用[远程访问docker](https://docs.docker.com/config/daemon/remote-access/) [Testcontainers配置说明](https://www.testcontainers.org/features/configuration/#customizing-docker-host-detection)
|
||||
|
||||
|
||||
## 16、JMX连接失败怎么办
|
||||
|
||||
详细见:[解决连接JMX失败](../dev_guide/%E8%A7%A3%E5%86%B3%E8%BF%9E%E6%8E%A5JMX%E5%A4%B1%E8%B4%A5.md)
|
||||
|
||||
|
||||
## 17、zk监控无数据问题
|
||||
|
||||
**现象:**
|
||||
zookeeper集群正常,但Ks上zk页面所有监控指标无数据,`KnowStreaming` log_error.log日志提示
|
||||
|
||||
```vim
|
||||
[MetricCollect-Shard-0-8-thread-1] ERROR class=c.x.k.s.k.c.s.h.c.z.HealthCheckZookeeperService||method=checkWatchCount||param=ZookeeperParam(zkAddressList=[Tuple{v1=192.168.xxx.xx, v2=2181}, Tuple{v1=192.168.xxx.xx, v2=2181}, Tuple{v1=192.168.xxx.xx, v2=2181}], zkConfig=null)||config=HealthAmountRatioConfig(amount=100000, ratio=0.8)||result=Result{message='mntr is not executed because it is not in the whitelist.
|
||||
', code=8031, data=null}||errMsg=get metrics failed, may be collect failed or zk mntr command not in whitelist.
|
||||
2023-04-23 14:39:07.234 [MetricCollect-Shard-0-8-thread-1] ERROR class=c.x.k.s.k.c.s.h.checker.AbstractHeal
|
||||
```
|
||||
|
||||
|
||||
原因就很明确了。需要开放zk的四字命令,在`zoo.cfg`配置文件中添加
|
||||
```
|
||||
4lw.commands.whitelist=mntr,stat,ruok,envi,srvr,envi,cons,conf,wchs,wchp
|
||||
```
|
||||
|
||||
|
||||
建议至少开放上述几个四字命令,当然,您也可以全部开放
|
||||
```
|
||||
4lw.commands.whitelist=*
|
||||
```
|
||||
|
||||
## 18、启动失败,报NoClassDefFoundError如何解决
|
||||
|
||||
**错误现象:**
|
||||
```log
|
||||
# 启动失败,报nested exception is java.lang.NoClassDefFoundError: Could not initialize class com.didiglobal.logi.job.core.WorkerSingleton$Singleton
|
||||
|
||||
|
||||
2023-08-11 22:54:29.842 [main] ERROR class=org.springframework.boot.SpringApplication||Application run failed
|
||||
org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'quartzScheduler' defined in class path resource [com/didiglobal/logi/job/LogIJobAutoConfiguration.class]: Bean instantiation via factory method failed; nested exception is org.springframework.beans.BeanInstantiationException: Failed to instantiate [com.didiglobal.logi.job.core.Scheduler]: Factory method 'quartzScheduler' threw exception; nested exception is java.lang.NoClassDefFoundError: Could not initialize class com.didiglobal.logi.job.core.WorkerSingleton$Singleton
|
||||
at org.springframework.beans.factory.support.ConstructorResolver.instantiate(ConstructorResolver.java:657)
|
||||
```
|
||||
|
||||
|
||||
**问题原因:**
|
||||
1. `KnowStreaming` 依赖的 `Logi-Job` 初始化 `WorkerSingleton$Singleton` 失败。
|
||||
2. `WorkerSingleton$Singleton` 初始化的过程中,会去获取一些操作系统的信息,如果获取时出现了异常,则会导致 `WorkerSingleton$Singleton` 初始化失败。
|
||||
|
||||
|
||||
**临时建议:**
|
||||
|
||||
`Logi-Job` 问题的修复时间不好控制,之前我们测试验证了一下,在 `Windows`、`Mac`、`CentOS` 这几个操作系统下基本上都是可以正常运行的。
|
||||
|
||||
所以,如果有条件的话,可以暂时先使用这几个系统部署 `KnowStreaming`。
|
||||
|
||||
如果在在 `Windows`、`Mac`、`CentOS` 这几个操作系统下也出现了启动失败的问题,可以重试2-3次看是否还是启动失败,或者换一台机器试试。
|
||||
|
||||
## 依赖ElasticSearch 8.0以上版本部署后指标信息无法正常显示如何解决
|
||||
**错误现象**
|
||||
```log
|
||||
Warnings: [299 Elasticsearch-8.9.1-a813d015ef1826148d9d389bd1c0d781c6e349f0 "Legacy index templates are deprecated in favor of composable templates."]
|
||||
```
|
||||
**问题原因**
|
||||
1. ES8.0和ES7.0版本存在Template模式的差异,建议使用 /_index_template 端点来管理模板;
|
||||
2. ES java client在此版本的行为很奇怪表现为读取数据为空;
|
||||
|
||||
**解决方法**
|
||||
修改`es_template_create.sh`脚本中所有的`/_template`为`/_index_template`后执行即可。
|
||||
|
||||
92
docs/user_guide/新旧对比手册.md
Normal file
@@ -0,0 +1,92 @@
|
||||
## 9.2、新旧版本对比
|
||||
|
||||
### 9.2.1、全新的设计理念
|
||||
|
||||
- 在 0 侵入、0 门槛的前提下提供直观 GUI 用于管理和观测 Apache Kafka®,帮助用户降低 Kafka CLI 操作门槛,轻松实现对原生 Kafka 集群的可管、可见、可掌控,提升 Kafka 使用体验和降低管理成本。
|
||||
- 支持海量集群一键接入,无需任何改造,即可实现集群深度纳管,真正的 0 侵入、插件化系统设计,覆盖 0.10.x-3.x.x 众多 Kafka 版本无缝纳管。
|
||||
|
||||
### 9.2.2、产品名称&协议
|
||||
|
||||
- Know Streaming V3.0
|
||||
|
||||
- 名称:Know Streaming
|
||||
- 协议:AGPL 3.0
|
||||
|
||||
- Logi-KM V2.x
|
||||
|
||||
- 名称:Logi-KM
|
||||
- 协议:Apache License 2.0
|
||||
|
||||
### 9.2.3、功能架构
|
||||
|
||||
- Know Streaming V3.0
|
||||
|
||||

|
||||
|
||||
- Logi-KM V2.x
|
||||
|
||||

|
||||
|
||||
### 9.2.4、功能变更
|
||||
|
||||
- 多集群管理
|
||||
|
||||
- 增加健康监测体系、关键组件&指标 GUI 展示
|
||||
- 增加 2.8.x 以上 Kafka 集群接入,覆盖 0.10.x-3.x
|
||||
- 删除逻辑集群、共享集群、Region 概念
|
||||
|
||||
- Cluster 管理
|
||||
|
||||
- 增加集群概览信息、集群配置变更记录
|
||||
- 增加 Cluster 健康分,健康检查规则支持自定义配置
|
||||
- 增加 Cluster 关键指标统计和 GUI 展示,支持自定义配置
|
||||
- 增加 Cluster 层 I/O、Disk 的 Load Reblance 功能,支持定时均衡任务(企业版)
|
||||
- 删除限流、鉴权功能
|
||||
- 删除 APPID 概念
|
||||
|
||||
- Broker 管理
|
||||
|
||||
- 增加 Broker 健康分
|
||||
- 增加 Broker 关键指标统计和 GUI 展示,支持自定义配置
|
||||
- 增加 Broker 参数配置功能,需重启生效
|
||||
- 增加 Controller 变更记录
|
||||
- 增加 Broker Datalogs 记录
|
||||
- 删除 Leader Rebalance 功能
|
||||
- 删除 Broker 优先副本选举
|
||||
|
||||
- Topic 管理
|
||||
|
||||
- 增加 Topic 健康分
|
||||
- 增加 Topic 关键指标统计和 GUI 展示,支持自定义配置
|
||||
- 增加 Topic 参数配置功能,可实时生效
|
||||
- 增加 Topic 批量迁移、Topic 批量扩缩副本功能
|
||||
- 增加查看系统 Topic 功能
|
||||
- 优化 Partition 分布的 GUI 展示
|
||||
- 优化 Topic Message 数据采样
|
||||
- 删除 Topic 过期概念
|
||||
- 删除 Topic 申请配额功能
|
||||
|
||||
- Consumer 管理
|
||||
|
||||
- 优化了 ConsumerGroup 展示形式,增加 Consumer Lag 的 GUI 展示
|
||||
|
||||
- ACL 管理
|
||||
|
||||
- 增加原生 ACL GUI 配置功能,可配置生产、消费、自定义多种组合权限
|
||||
- 增加 KafkaUser 功能,可自定义新增 KafkaUser
|
||||
|
||||
- 消息测试(企业版)
|
||||
|
||||
- 增加生产者消息模拟器,支持 Data、Flow、Header、Options 自定义配置(企业版)
|
||||
- 增加消费者消息模拟器,支持 Data、Flow、Header、Options 自定义配置(企业版)
|
||||
|
||||
- Job
|
||||
|
||||
- 优化 Job 模块,支持任务进度管理
|
||||
|
||||
- 系统管理
|
||||
|
||||
- 优化用户、角色管理体系,支持自定义角色配置页面及操作权限
|
||||
- 优化审计日志信息
|
||||
- 删除多租户体系
|
||||
- 删除工单流程
|
||||
848
docs/user_guide/用户使用手册.md
Normal file
@@ -0,0 +1,848 @@
|
||||
|
||||
## 5.0、产品简介
|
||||
|
||||
`Know Streaming` 是一套云原生的 Kafka 管控平台,脱胎于众多互联网内部多年的 Kafka 运营实践经验,专注于 Kafka 运维管控、监控告警、资源治理、多活容灾等核心场景,在用户体验、监控、运维管控上进行了平台化、可视化、智能化的建设,提供一系列特色的功能,极大地方便了用户和运维人员的日常使用,让普通运维人员都能成为 Kafka 专家。
|
||||
|
||||
## 5.1、功能架构
|
||||
|
||||

|
||||
|
||||
## 5.2、体验路径
|
||||
|
||||
下面是用户第一次使用我们产品的典型体验路径:
|
||||
|
||||

|
||||
|
||||
## 5.3、常用功能
|
||||
|
||||
### 5.3.1、用户管理
|
||||
|
||||
用户管理是提供给管理员进行人员管理和用户角色管理的功能模块,可以进行新增用户和分配角色。下面是一个典型的场景:
|
||||
eg:团队加入了新成员,需要给这位成员分配一个使用系统的账号,需要以下几个步骤
|
||||
|
||||
- 步骤 1:点击“系统管理”>“用户管理”>“人员管理”>“新增用户”,输入“账号”、“实名”、“密码”,根据此账号所需要的权限,选择此账号所对应的角色。如果有满足权限的角色,则用户新增成功。如果没有满足权限的角色,则需要新增角色(步骤 2)
|
||||
- 步骤 2:点击“系统管理”>“用户管理”>“角色管理”>“新增角色”。输入角色名称和描述,给此角色分配权限,点击“确定”,角色新增成功
|
||||
|
||||
- 步骤 3:根据此新增的角色,参考步骤 1,重新新增用户
|
||||
|
||||
- 步骤 4:此用户账号新增成功,可以进行登录产品使用
|
||||
|
||||

|
||||
|
||||
### 5.3.2、接入集群
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“接入集群”
|
||||
|
||||
- 步骤 2:填写相关集群信息
|
||||
|
||||
- 集群名称:支持中英文、下划线、短划线(-),最长 128 字符。平台内不能重复
|
||||
- Bootstrap Servers:输入 Bootstrap Servers 地址。输入完成之后会进行连接测试,测试完成之后会给出测试结果连接成功 or 连接失败(以及失败的原因)。
|
||||
- Zookeeper:输入 zookeeper 地址,输入完成之后会进行连接测试,测试完成之后会给出测试结果连接成功 or 连接失败(以及失败的原因)
|
||||
- Metrics 选填:JMX Port,输入 JMX 端口号;MaxConn,输入服务端最大允许的连接数
|
||||
- Security:若有 JMX 账号密码,则输入账号密码
|
||||
- Version:选择所支持的 kafka 版本,如果没有匹配则可以选择相近版本
|
||||
- 集群配置选填:输入用户创建 kafka 客户端进行信息获取的相关配置
|
||||
- 集群描述:最多 200 字符
|
||||
|
||||

|
||||
|
||||
### 5.3.3、新增 Topic
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”>“新增 Topic”按钮>“创建 Topic“抽屉
|
||||
|
||||
- 步骤 2:输入“Topic 名称(不能重复)”、“Topic 描述”、“分区数”、“副本数”、“数据保存时间”、“清理策略(删除或压缩)”
|
||||
|
||||
- 步骤 3:展开“更多配置”可以打开高级配置选项,根据自己需要输入相应配置参数
|
||||
|
||||
- 步骤 4:点击“确定”,创建 Topic 完成
|
||||
|
||||

|
||||
|
||||
### 5.3.4、Topic 扩分区
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”>“Topic 列表“>操作项”扩分区“>“扩分区”抽屉
|
||||
|
||||
- 步骤 2:扩分区抽屉展示内容为“流量的趋势图”、“当前分区数及支持的最低消息写入速率”、“扩分区后支持的最低消息写入速率”
|
||||
|
||||
- 步骤 3:输入所需的分区总数,自动计算出扩分区后支持的最低消息写入速率
|
||||
|
||||
- 步骤 4:点击确定,扩分区完成
|
||||
|
||||

|
||||
|
||||
### 5.3.5、Topic 批量扩缩副本
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”>“批量操作下拉“>“批量扩缩副本“>“批量扩缩容”抽屉
|
||||
|
||||
- 步骤 2:选择所需要进行扩缩容的 Topic,可多选,所选择的 Topic 出现在下方 Topic 列表中
|
||||
|
||||
- 步骤 3:Topic 列表展示 Topic“近三天平均流量”、“近三天峰值流量及时间”、“Partition 数”、”当前副本数“、“新副本数”
|
||||
|
||||
- 步骤 4:扩容时,选择目标节点,新增的副本会在选择的目标节点上;缩容时不需要选择目标节点,自动删除最后一个(或几个)副本
|
||||
|
||||
- 步骤 5:输入迁移任务配置参数,包含限流值和任务执行时间
|
||||
|
||||
- 步骤 6:输入任务描述
|
||||
|
||||
- 步骤 7:点击“确定”,创建 Topic 扩缩副本任务
|
||||
|
||||
- 步骤 8:去“Job”模块的 Job 列表查看创建的任务,如果已经执行则可以查看执行进度;如果未开始执行则可以编辑任务
|
||||
|
||||

|
||||
|
||||
### 5.3.6、Topic 批量迁移
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”>“批量操作下拉“>“批量迁移“>“批量迁移”抽屉
|
||||
|
||||
- 步骤 2:选择所需要进行迁移的 Topic,可多选,所选择的 Topic 出现在下方 Topic 列表中
|
||||
|
||||
- 步骤 3:选择所需要迁移的 partition 和迁移数据的时间范围
|
||||
|
||||
- 步骤 4:选择目标节点(节点数必须不小于最大副本数)
|
||||
|
||||
- 步骤 5:点击“预览任务计划”,打开“任务计划”二次抽屉,可对目标 Broker ID 进行编辑
|
||||
|
||||
- 步骤 6:输入迁移任务配置参数,包含限流值和任务执行时间
|
||||
|
||||
- 步骤 7:输入任务描述
|
||||
|
||||
- 步骤 8:点击“确定”,创建 Topic 迁移任务
|
||||
|
||||
- 步骤 9:去“Job”模块的 Job 列表查看创建的任务,如果已经执行则可以查看执行进度;如果未开始执行则可以编辑任务
|
||||
|
||||

|
||||
|
||||
### 5.3.7、设置 Cluster 健康检查规则
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Overview”>“集群健康状态旁边 icon”>“健康度设置抽屉”
|
||||
|
||||
- 步骤 2:健康度设置抽屉展示出了检查项和其对应的权重,可以修改检查项的检查规则
|
||||
|
||||
- 步骤 3:检查规则可配置,分别为
|
||||
|
||||
- Cluster:集群 controller 数不等于 1(数字不可配置)不通过
|
||||
- Broker:RequestQueueSize 大于等于 10(默认为 10,可配置数字)不通过
|
||||
- Broker:NetworkProcessorAvgIdlePercent 的 Idle 小于等于 0.8%(默认为 0.8%,可配置数字)不通过
|
||||
- Topic:无 leader 的 Topic 数量,大于等于 1(默认为 1,数字可配置)不通过
|
||||
- Topic:Topic 在 10(默认为 10,数字可配置)个周期内 8(默认为 8,数字可配置)个周期内处于未同步的状态则不通过
|
||||
- ConsumerGroup:Group 在 10(默认为 10,数字可配置)个周期内进行 8(默认为 8,数字可配置)次 re-balance 不通过
|
||||
|
||||
- 步骤 4:设置完成后,点击“确认”,健康检查规则设置成功
|
||||
|
||||

|
||||
|
||||
### 5.3.8、图表指标筛选
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Overview”>“指标筛选 icon”>“指标筛选抽屉”
|
||||
|
||||
- 步骤 2:指标筛选抽屉展示信息为以下几类“Health”、“Cluster”、“Broker”、“Consumer”、“Security”、“Job”
|
||||
|
||||
- 步骤 3:默认勾选比较重要的指标进行展示。根据需要选中/取消选中相应指标,点击”确认“,指标筛选成功,展示的图表随之变化
|
||||
|
||||

|
||||
|
||||
### 5.3.9、编辑 Broker 配置
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Brokers”>“Broker ID”>“Configuration”TAB>“编辑”按钮
|
||||
|
||||
- 步骤 2:输入配置项的新配置内容
|
||||
|
||||
- 步骤 3:(选填)点击“应用于全部 Broker”,将此配置项的修改应用于全部的 Broker
|
||||
|
||||
- 步骤 4:点击“确认”,Broker 配置修改成功
|
||||
|
||||

|
||||
|
||||
### 5.3.10、重置 consumer Offset
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Consumer”>“Consumer Group”名称>“Consumer Group 详情”抽屉>“重置 Offset”按钮>“重置 Offset”抽屉
|
||||
|
||||
- 步骤 2:选择重置 Offset 的类型,可“重置到指定时间”或“重置分区”
|
||||
|
||||
- 步骤 3:重置到指定时间,可选择“最新 Offset”或“自定义时间”
|
||||
|
||||
- 步骤 4:重置分区,可选择 partition 和其重置的 offset
|
||||
|
||||
- 步骤 5:点击“确认”,重置 Offset 开始执行
|
||||
|
||||

|
||||
|
||||
### 5.3.11、新增 ACL
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Security”>“Users”>“新增 ACL”
|
||||
|
||||
- 步骤 2:输入 ACL 配置参数
|
||||
|
||||
- ACL 用途:生产权限、消费权限、自定义权限
|
||||
- 生产权限时:可选择应用于所有 Kafka User 或者特定 Kafka User;可选择应用于所有 Topic 或者特定 Topic
|
||||
- 消费权限时:可选择应用于所有 Kafka User 或者特定 Kafka User;可选择应用于所有 Topic 或者特定 Topic;可选择应用于所有 Consumer Group 或者特定 Consumer Group
|
||||
|
||||
- 步骤 3:点击“确定”,新增 ACL 成功
|
||||
|
||||

|
||||
|
||||
## 5.4、全部功能
|
||||
|
||||
### 5.4.1、登录/退出登录
|
||||
|
||||
- 登录:输入账号密码,点击登录
|
||||
|
||||
- 退出登录:鼠标悬停右上角“头像”或者“用户名”,出现小弹窗“登出”,点击“登出”,退出登录
|
||||
|
||||
### 5.4.2、系统管理
|
||||
|
||||
用户登录完成之后,点击页面右上角【系统管理】按钮,切换到系统管理的视角,可以进行配置管理、用户管理、审计日志查看。
|
||||

|
||||
|
||||
#### 5.4.2.1、配置管理
|
||||
|
||||
配置管理是提供给管理员一个快速配置配置文件的能力,所配置的配置文件将会在对应模块生效。
|
||||
|
||||
#### 5.4.2.2、查看配置列表
|
||||
|
||||
- 步骤 1:点击”系统管理“>“配置管理”
|
||||
|
||||
- 步骤 2:列表展示配置所属模块、配置键、配置值、启用状态、更新时间、更新人。列表有操作项编辑、删除,可对配置模块、配置键、配置值、描述、启用状态进行配置,也可删除此条配置
|
||||
|
||||

|
||||
|
||||
#### 5.4.2.3、新增配置
|
||||
|
||||
- 步骤 1:点击“系统管理”>“配置管理”>“新增配置”
|
||||
|
||||
- 步骤 2:模块:下拉选择所有可配置的模块;配置键:不限制输入内容,500 字以内;配置值:代码编辑器样式,不限内容不限长度;启用状态开关:可以启用/禁用此项配置
|
||||
|
||||

|
||||
|
||||
#### 5.4.2.4、编辑配置
|
||||
|
||||
可对配置模块、配置键、配置值、描述、启用状态进行配置。
|
||||
|
||||
#### 5.4.2.5、用户管理
|
||||
|
||||
用户管理是提供给管理员进行人员管理和用户角色管理的功能模块,可以进行新增用户和分配角色。
|
||||
|
||||
#### 5.4.2.6、人员管理列表
|
||||
|
||||
- 步骤 1:点击“系统管理”>“用户管理”>“人员管理”
|
||||
|
||||
- 步骤 2:人员管理列表展示用户角色、用户实名、用户分配的角色、更新时间、编辑操作。
|
||||
|
||||
- 步骤 3:列表支持”用户账号“、“用户实名”、“角色名”筛选。
|
||||
|
||||

|
||||
|
||||
#### 5.4.2.7、新增用户
|
||||
|
||||
- 步骤 1:点击“系统管理”>“用户管理”>“人员管理”>“新增用户”
|
||||
|
||||
- 步骤 2:填写“用户账号”、“用户实名”、“用户密码”这些必填参数,可以对此账号分配已经存在的角色。
|
||||
|
||||

|
||||
|
||||
#### 5.4.2.8、编辑用户
|
||||
|
||||
- 步骤 1:点击“系统管理”>“用户管理”>“人员管理”>列表操作项“编辑”
|
||||
|
||||
- 步骤 2:用户账号不可编辑;可以编辑“用户实名”,修改“用户密码”,重新分配“用户角色“
|
||||
|
||||

|
||||
|
||||
#### 5.4.2.9、角色管理列表
|
||||
|
||||
- 步骤 1:点击“系统管理”>“用户管理”>“角色管理”
|
||||
|
||||
- 步骤 2:角色列表展示信息为“角色 ID”、“名称”、“描述”、“分配用户数”、“最后修改人”、“最后更新时间”、操作项“查看详情”、操作项”分配用户“
|
||||
|
||||
- 步骤 3:列表有筛选框,可对“角色名称”进行筛选
|
||||
|
||||
- 步骤 4:列表操作项,“查看详情”可查看到角色绑定的权限项,”分配用户“可对此项角色下绑定的用户进行增减
|
||||
|
||||

|
||||
|
||||
#### 5.4.2.10、新增角色
|
||||
|
||||
- 步骤 1:点击“系统管理”>“用户管理”>“角色管理”>“新增角色”
|
||||
|
||||
- 步骤 2:输入“角色名称”(角色名称只能由中英文大小写、数字、下划线\_组成,长度限制在 3 ~ 128 字符)、“角色描述“(不能为空)、“分配权限“(至少需要分配一项权限),点击确认,新增角色成功添加到角色列表
|
||||
|
||||

|
||||
|
||||
#### 5.4.2.11、审计日志
|
||||
|
||||
- 步骤 1:点击“系统管理”>“审计日志“
|
||||
- 步骤 2:审计日志包含所有对于系统的操作记录,操作记录列表展示信息为下
|
||||
|
||||
- “模块”:操作对象所属的功能模块
|
||||
- “操作对象”:具体哪一个集群、任务 ID、topic、broker、角色等
|
||||
- “行为”:操作记录的行为,包含“新增”、“替换”、“读取”、“禁用”、“修改”、“删除”、“编辑”等
|
||||
- “操作内容”:具体操作的内容是什么
|
||||
- “操作时间”:操作发生的时间
|
||||
- “操作人”:此项操作所属的用户
|
||||
|
||||
- 步骤 3:操作记录列表可以对“模块“、”操作对象“、“操作内容”、”操作时间“进行筛选
|
||||
|
||||

|
||||
|
||||
### 5.4.3、多集群管理
|
||||
|
||||
#### 5.4.3.1、多集群列表
|
||||
|
||||
- 步骤 1:点击顶部导航栏“多集群管理”
|
||||
|
||||
- 步骤 2:多集群管理页面包含的信息为:”集群信息总览“、“集群列表”、“列表筛选项”、“接入集群”
|
||||
|
||||
- 步骤 3:集群列表筛选项为
|
||||
|
||||
- 集群信息总览:cluster 总数、live 数、down 数
|
||||
- 版本筛选:包含所有存在的集群版本
|
||||
- 健康分筛选:筛选项为 0、10、20、30、40、50、60、70、80、90、100
|
||||
- live、down 筛选:多选
|
||||
- 下拉框筛选排序,选项维度为“接入时间”、“健康分“、”Messages“、”MessageSize“、”BytesIn“、”BytesOut“、”Brokers“;可对这些维度进行“升序”、“降序”排序
|
||||
|
||||
- 步骤 4:每个卡片代表一个集群,其所展示的集群概览信息包括“健康分及健康检查项通过数”、“broker 数量”、“ZK 数量”、“版本号”、“BytesIn 均衡状态”、“BytesOut 均衡状态”、“Disk 均衡状态”、”Messages“、“MessageSize”、“BytesIn”、“BytesOut”、“接入时间”
|
||||
|
||||

|
||||
|
||||
#### 5.4.3.2、接入集群
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“接入集群”
|
||||
|
||||
- 步骤 2:填写相关集群信息
|
||||
- 集群名称:平台内不能重复
|
||||
- Bootstrap Servers:输入 Bootstrap Servers 地址,输入完成之后会进行连接测试,测试完成之后会给出测试结果连接成功 or 连接失败(以及失败的原因)。
|
||||
- Zookeeper:输入 zookeeper 地址,输入完成之后会进行连接测试,测试完成之后会给出测试结果连接成功 or 连接失败(以及失败的原因)
|
||||
- Metrics 选填:JMX Port,输入 JMX 端口号;MaxConn,输入服务端最大允许的连接数
|
||||
- Security:若有 JMX 账号密码,则输入账号密码
|
||||
- Version:kafka 版本,如果没有匹配则可以选择相近版本
|
||||
- 集群配置选填:用户创建 kafka 客户端进行信息获取的相关配置
|
||||
|
||||

|
||||
|
||||
#### 5.4.3.3、删除集群
|
||||
|
||||
- 步骤 1:点击“多集群管理”>鼠标悬浮集群卡片>点击卡片右上角“删除 icon”>打开“删除弹窗”
|
||||
|
||||
- 步骤 2:在删除弹窗中的“集群名称”输入框,输入所要删除集群的集群名称,点击“删除”,成功删除集群,解除平台的纳管关系(集群资源不会删除)
|
||||
|
||||

|
||||
|
||||
### 5.4.4、Cluster 管理
|
||||
|
||||
#### 5.4.4.1、Cluster Overview
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>进入单集群管理界面
|
||||
|
||||
- 步骤 2:左侧导航栏
|
||||
|
||||
- 一级导航:Cluster;二级导航:Overview、Load Rebalance
|
||||
- 一级导航:Broker;二级导航:Overview、Brokers、Controller
|
||||
- 一级导航:Topic;二级导航:Overview、Topics
|
||||
- 一级导航:Consumer
|
||||
- 一级导航:Testing;二级导航:Produce、Consume
|
||||
- 一级导航:Security;二级导航:ACLs、Users
|
||||
- 一级导航:Job
|
||||
|
||||

|
||||
|
||||
#### 5.4.4.2、查看 Cluster 概览信息
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Overview”
|
||||
|
||||
- 步骤 2:cluster 概览信息包括以下内容
|
||||
|
||||
- 集群健康分,健康检查通过项
|
||||
- Cluster 信息:包含名称、版本、均衡状态
|
||||
- Broker 信息:Broker 总数、controller 信息、similar config 信息
|
||||
- Topic 信息:Topic 总数、No Leader、<Min ISR、URP
|
||||
- Consumer Group 信息:Consumer Group 总数、是否存在 Dead 情况
|
||||
- 指标图表
|
||||
- 历史变更记录:名称、时间、内容、类型
|
||||
|
||||

|
||||
|
||||
#### 5.4.4.3、设置 Cluster 健康检查规则
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Overview”>“集群健康状态旁边 icon”>“健康度设置抽屉”
|
||||
|
||||
- 步骤 2:健康度设置抽屉展示出了检查项和其对应的权重,可以修改检查项的检查规则
|
||||
|
||||
- 步骤 3:检查规则可配置,分别为
|
||||
|
||||
- Cluster:集群 controller 数不等于 1(数字不可配置)不通过
|
||||
- Broker:RequestQueueSize 大于等于 10(默认为 10,可配置数字)不通过
|
||||
- Broker:NetworkProcessorAvgIdlePercent 的 Idle 小于等于 0.8%(默认为 0.8%,可配置数字)不通过
|
||||
- Topic:无 leader 的 Topic 数量,大于等于 1(默认为 1,数字可配置)不通过
|
||||
- Topic:Topic 在 10(默认为 10,数字可配置)个周期内 8(默认为 8,数字可配置)个周期内处于未同步的状态
|
||||
- ConsumerGroup:Group 在 10(默认为 10,数字可配置)个周期内进行 8(默认为 8,数字可配置)次 re-balance 不通过
|
||||
|
||||
- 步骤 4:设置完成后,点击“确认”,健康检查规则设置成功
|
||||
|
||||

|
||||
|
||||
#### 5.4.4.4、查看 Cluster 健康检查详情
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Overview”>“集群健康状态旁边【查看详情】”>“健康检查详情抽屉”
|
||||
|
||||
- 步骤 2:健康检查详情抽屉展示信息为:“检查模块”、“检查项”、“权重”、“得分”、“检查时间”、“检查结果是否通过”,若未通过会展示未通过的对象
|
||||
|
||||

|
||||
|
||||
#### 5.4.4.5、编辑 Cluster 信息
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Overview”>“Cluster 名称旁边编辑 icon”>“编辑集群抽屉”
|
||||
|
||||
- 步骤 2:可编辑的信息包括“集群名称”、“Bootstrap Servers”、“Zookeeper”、“JMX Port”、“Maxconn(最大连接数)”、“Security(认证措施)”、“Version(版本号)”、“集群配置”、“集群描述”
|
||||
|
||||
- 步骤 3:点击“确定”,成功编辑集群信息
|
||||
|
||||

|
||||
|
||||
#### 5.4.4.6、图表指标筛选
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Overview”>“指标筛选 icon”>“指标筛选抽屉”
|
||||
|
||||
- 步骤 2:指标筛选抽屉展示信息为以下几类“Health”、“Cluster”、“Broker”、“Consumer”、“Security”、“Job”
|
||||
|
||||
- 步骤 3:默认勾选比较重要的指标进行展示。根据需要选中/取消选中相应指标,点击”确认“,指标筛选成功,展示的图表随之变化
|
||||
|
||||

|
||||
|
||||
#### 5.4.4.7、图表时间筛选
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Overview”>“时间选择下拉框”>“时间选择弹窗”
|
||||
|
||||
- 步骤 2:选择时间“最近 15 分钟”、“最近 1 小时”、“最近 6 小时”、“最近 12 小时”、“最近 1 天”,也可以自定义时间段范围
|
||||
|
||||

|
||||
|
||||
#### 5.4.4.8、查看集群历史变更记录
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Overview”>“历史变更记录”区域
|
||||
|
||||
- 步骤 2:历史变更记录区域展示了历史的配置变更,每条记录可展开收起。包含“配置对象”、“变更时间”、“变更内容”、“配置类型”
|
||||
|
||||

|
||||
|
||||
### 5.4.5、Load Rebalance(企业版)
|
||||
|
||||
#### 5.4.5.1、查看 Load Rebalance 概览信息
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Load Rebalance”
|
||||
|
||||
- 步骤 2:Load Rebalance 概览信息包含“均衡状态卡片”、“Disk 信息卡片”、“BytesIn 信息卡片”、“BytesOut 信息卡片”、“Broker 均衡状态列表”
|
||||
|
||||

|
||||
|
||||
#### 5.4.5.2、设置集群规格
|
||||
|
||||
提供对集群的每个节点的 Disk、BytesIn、BytesOut 的规格进行设置的功能
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Load Rebalance”>“State 卡片 icon“>”设置集群规格抽屉“
|
||||
|
||||
- 步骤 2:穿梭框左侧展示集群中的待选节点,穿梭框右侧展示已经选中的节点,选择自己所需设置规格的节点
|
||||
|
||||
- 步骤 3:设置“单机核数”、“单机磁盘”、“单机网络”,点击确定,完成设置
|
||||
|
||||

|
||||
|
||||
#### 5.4.5.3、均衡状态列表筛选
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Load Rebalance”>“筛选列表”按钮>筛选弹窗
|
||||
|
||||
- 步骤 2:可选择“Disk”、“BytesIn”、“BytesOut”三种维度,其各自对应“已均衡”、“未均衡”两种状态,可以组合进行筛选
|
||||
|
||||
- 步骤 3:点击“确认”,执行筛选操作
|
||||
|
||||

|
||||
|
||||
#### 5.4.5.4、立即均衡
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Load Rebalance”>“立即均衡”按钮>“立即均衡抽屉”
|
||||
|
||||
- 步骤 2:配置均衡策略
|
||||
|
||||
- 指标计算周期:默认近 10mins,可选择
|
||||
- 均衡维度:默认 Disk、BytesIn、BytesOut,可选择
|
||||
- 均衡区间:在表格内自定义配置均衡区间范围(单位:%,大于 0,小于 100)
|
||||
- Topic 黑名单:选择 topic 黑名单。通过穿梭框(支持模糊选择)选出目标 topic(本次均衡,略过已选的 topic)
|
||||
|
||||
- 步骤 3:配置运行参数
|
||||
|
||||
- 吞吐量优先:并行度 0(无限制), 策略是优先执行大小最大副本
|
||||
- 稳定性优先: 并行度 1 ,策略是优先执行大小最小副本
|
||||
- 自定义:可以自由设置并行度和优先执行的副本策略
|
||||
- 限流值:流量最大值,0-99999 自定义
|
||||
|
||||
- 步骤 4:点击“预览计划”按钮,打开执行计划弹窗。可以看到计划概览信息、计划明细信息
|
||||
|
||||
- 步骤 5:点击“预览计划弹窗”的“执行文件”,可以下载 json 格式的执行文件
|
||||
|
||||
- 步骤 6:点击“预览计划弹窗”的“立即均衡”按钮,开始执行均衡任务
|
||||
|
||||

|
||||
|
||||
#### 5.4.5.5、周期均衡
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Load Rebalance”>“周期均衡”按钮>“周期均衡抽屉”
|
||||
|
||||
- 步骤 2:配置均衡策略
|
||||
|
||||
- 指标计算周期:默认近 10mins,可选择
|
||||
- 均衡维度:默认 Disk、BytesIn、BytesOut,可选择
|
||||
- 均衡区间:在表格内自定义配置均衡区间范围(单位:%,大于 0,小于 100)
|
||||
- Topic 黑名单:选择 topic 黑名单。通过穿梭框(支持模糊选择)选出目标 topic(本次均衡,略过已选的 topic)
|
||||
|
||||
- 步骤 3:配置运行参数
|
||||
|
||||
- 任务并行度:每个节点同时迁移的副本数量
|
||||
- 任务周期:时间选择器,自定义选择运行周期
|
||||
- 稳定性优先: 并行度 1 ,策略是优先执行大小最小副本
|
||||
- 自定义:可以自由设置并行度和优先执行的副本策略
|
||||
- 限流值:流量最大值,0-99999 自定义
|
||||
|
||||
- 步骤 4:点击“预览计划”按钮,打开执行计划弹窗。可以看到计划概览信息、计划明细信息
|
||||
|
||||
- 步骤 5:点击“预览计划弹窗”的“执行文件”,可以下载 json 格式的执行文件
|
||||
|
||||
- 步骤 6:点击“预览计划弹窗”的“立即均衡”按钮,开始执行均衡任务
|
||||
|
||||

|
||||
|
||||
### 5.4.6、Broker
|
||||
|
||||
#### 5.4.6.1、查看 Broker 概览信息
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Broker”>“Overview”
|
||||
|
||||
- 步骤 2:Broker 概览信息包括以下内容
|
||||
|
||||
- 集群健康分,健康检查通过项
|
||||
- Broker 信息:包含名称、版本、均衡状态
|
||||
- Broker 信息:Broker 总数、controller 信息、similar config 信息
|
||||
- Topic 信息:Topic 总数、No Leader、<Min ISR、URP
|
||||
- Consumer Group 信息:Consumer Group 总数、是否存在 Dead 情况
|
||||
- 指标图表
|
||||
- 历史变更记录:名称、时间、内容、类型
|
||||
|
||||

|
||||
|
||||
#### 5.4.6.2、编辑 Broker 配置
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Brokers”>“Broker ID”>“Configuration”TAB>“编辑”按钮
|
||||
|
||||
- 步骤 2:输入配置项的新配置内容
|
||||
|
||||
- 步骤 3:(选填)点击“应用于全部 Broker”,将此配置项的修改应用于全部的 Broker
|
||||
|
||||
- 步骤 4:点击“确认”,Broker 配置修改成功
|
||||
|
||||

|
||||
|
||||
#### 5.4.6.3、查看 Broker DataLogs
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Brokers”>“Broker ID”>“Data Logs”TAB>“编辑”按钮
|
||||
|
||||
- 步骤 2:Broker DataLogs 列表展示的信息为“Folder”、“topic”、“Partition”、“Offset Lag”、“Size”
|
||||
|
||||
- 步骤 3:输入框输入”Topic Name“可以筛选结果
|
||||
|
||||

|
||||
|
||||
#### 5.4.6.4、查看 Controller 列表
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Broker”>“Controller”
|
||||
|
||||
- 步骤 2:Controller 列表展示的信息为“Change Time”、“Broker ID”、“Broker Host”
|
||||
|
||||
- 步骤 3:输入框输入“Broker Host“可以筛选结果
|
||||
|
||||
- 步骤 4:点击 Broker ID 可以打开 Broker 详情,进行修改配置或者查看 DataLogs
|
||||
|
||||

|
||||
|
||||
### 5.4.7、Topic
|
||||
|
||||
#### 5.4.7.1、查看 Topic 概览信息
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Overview”
|
||||
|
||||
- 步骤 2:Topic 概览信息包括以下内容
|
||||
|
||||
- 集群健康分,健康检查通过项
|
||||
- Topics:Topic 总数
|
||||
- Partitions:Partition 总数
|
||||
- PartitionNoLeader:没有 leader 的 partition 个数
|
||||
- < Min ISR:同步副本数小于 Min ISR
|
||||
- =Min ISR:同步副本数等于 Min ISR
|
||||
- Topic 指标图表
|
||||
|
||||

|
||||
|
||||
#### 5.4.7.2、查看 Topic 健康检查详情
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Overview”>“集群健康状态旁边【查看详情】”>“健康检查详情抽屉”
|
||||
|
||||
- 步骤 2:健康检查详情抽屉展示信息为:“检查项”、“权重”、“得分”、“检查时间”、“检查结果是否通过”,若未通过会展示未通过的对象
|
||||
|
||||

|
||||
|
||||
#### 5.4.7.3、查看 Topic 列表
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”
|
||||
|
||||
- 步骤 2:Topic 列表展示内容为“TopicName”、“Partitions”、“Replications”、“健康分”、“BytesIn”、“BytesOut”、“MessageSize”、“保存时间”、“描述”、操作项”扩分区“、操作项”删除“
|
||||
|
||||
- 步骤 3:筛选框输入“TopicName”可以对列表进行筛选;点击“展示系统 Topic”开关,可以筛选系统 topic
|
||||
|
||||

|
||||
|
||||
#### 5.4.7.4、新增 Topic
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”>“新增 Topic”按钮>“创建 Topic“抽屉
|
||||
|
||||
- 步骤 2:输入“Topic 名称(不能重复)”、“Topic 描述”、“分区数”、“副本数”、“数据保存时间”、“清理策略(删除或压缩)”
|
||||
|
||||
- 步骤 3:展开“更多配置”可以打开高级配置选项,根据自己需要输入相应配置参数
|
||||
|
||||
- 步骤 4:点击“确定”,创建 Topic 完成
|
||||
|
||||

|
||||
|
||||
#### 5.4.7.5、Topic 扩分区
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”>“Topic 列表“>操作项”扩分区“>“扩分区”抽屉
|
||||
|
||||
- 步骤 2:扩分区抽屉展示内容为“流量的趋势图”、“当前分区数及支持的最低消息写入速率”、“扩分区后支持的最低消息写入速率”
|
||||
|
||||
- 步骤 3:输入所需的分区总数,自动计算出扩分区后支持的最低消息写入速率
|
||||
|
||||
- 步骤 4:点击确定,扩分区完成
|
||||
|
||||

|
||||
|
||||
#### 5.4.7.6、删除 Topic
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”>“Topic 列表“>操作项”删除“>“删除 Topic”弹窗
|
||||
|
||||
- 步骤 2:输入“TopicName”进行二次确认
|
||||
|
||||
- 步骤 3:点击“删除”,删除 Topic 完成
|
||||
|
||||

|
||||
|
||||
#### 5.4.7.7、Topic 批量扩缩副本
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”>“批量操作下拉“>“批量扩缩副本“>“批量扩缩容”抽屉
|
||||
|
||||
- 步骤 2:选择所需要进行扩缩容的 Topic,可多选,所选择的 Topic 出现在下方 Topic 列表中
|
||||
|
||||
- 步骤 3:Topic 列表展示 Topic“近三天平均流量”、“近三天峰值流量及时间”、“Partition 数”、”当前副本数“、“新副本数”
|
||||
|
||||
- 步骤 4:扩容时,选择目标节点,新增的副本会在选择的目标节点上;缩容时不需要选择目标节点,自动删除最后一个(或几个)副本
|
||||
|
||||
- 步骤 5:输入迁移任务配置参数,包含限流值和任务执行时间
|
||||
|
||||
- 步骤 6:输入任务描述
|
||||
|
||||
- 步骤 7:点击“确定”,执行 Topic 扩缩容任务
|
||||
|
||||

|
||||
|
||||
#### 5.4.7.8、Topic 批量迁移
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”>“批量操作下拉“>“批量迁移“>“批量迁移”抽屉
|
||||
|
||||
- 步骤 2:选择所需要进行迁移的 Topic,可多选,所选择的 Topic 出现在下方 Topic 列表中
|
||||
|
||||
- 步骤 3:选择所需要迁移的 partition 和迁移数据的时间范围
|
||||
|
||||
- 步骤 4:选择目标节点(节点数必须不小于最大副本数)
|
||||
|
||||
- 步骤 5:点击“预览任务计划”,打开“任务计划”二次抽屉,可对每个 partition 的目标 Broker ID 进行编辑,目标 broker 应该等于副本数
|
||||
|
||||
- 步骤 6:输入迁移任务配置参数,包含限流值和任务执行时间
|
||||
|
||||
- 步骤 7:输入任务描述
|
||||
|
||||
- 步骤 8:点击“确定”,执行 Topic 迁移任务
|
||||
|
||||

|
||||
|
||||
### 5.4.8、Consumer
|
||||
|
||||
#### 5.4.8.1、Consumer Overview
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Consumer”
|
||||
|
||||
- 步骤 2:Consumer 概览信息包括以下内容
|
||||
|
||||
- 集群健康分,健康检查通过项
|
||||
- Groups:Consumer Group 总数
|
||||
- GroupsActives:活跃的 Group 总数
|
||||
- GroupsEmptys:Empty 的 Group 总数
|
||||
- GroupRebalance:进行 Rebalance 的 Group 总数
|
||||
- GroupDeads:Dead 的 Group 总数
|
||||
- Consumer Group 列表
|
||||
|
||||
- 步骤 3:输入“Consumer Group”、“Topic Name‘,可对列表进行筛选
|
||||
|
||||
- 步骤 4:点击列表“Consumer Group”名称,可以查看 Comsuer Group 详情
|
||||
|
||||

|
||||
|
||||
#### 5.4.8.2、查看 Consumer 列表
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Consumer”>“Consumer Group”名称>“Consumer Group 详情”抽屉
|
||||
|
||||
- 步骤 2:Consumer Group 详情有列表视图和图表视图
|
||||
|
||||
- 步骤 3:列表视图展示信息为 Consumer 列表,包含”Topic Partition“、”Member ID“、”Current Offset“、“Log End Offset”、”Lag“、”Host“、”Client ID“
|
||||
|
||||

|
||||
|
||||
#### 5.4.8.3、重置 Offset
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Consumer”>“Consumer Group”名称>“Consumer Group 详情”抽屉>“重置 Offset”按钮>“重置 Offset”抽屉
|
||||
|
||||
- 步骤 2:选择重置 Offset 的类型,可“重置到指定时间”或“重置分区”
|
||||
|
||||
- 步骤 3:重置到指定时间,可选择“最新 Offset”或“自定义时间”
|
||||
|
||||
- 步骤 4:重置分区,可选择 partition 和其重置的 offset
|
||||
|
||||
- 步骤 5:点击“确认”,重置 Offset 开始执行
|
||||
|
||||

|
||||
|
||||
### 5.4.9、Testing(企业版)
|
||||
|
||||
#### 5.4.9.1、生产测试
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Testing”>“Produce”
|
||||
|
||||
- 步骤 2:生产配置
|
||||
|
||||
- Data:选择数据写入的 topic,输入写入数据的 key(暂只支持 string 格式),输入写入数据的 value(暂只支持 string 格式)。其中 key 和 value 可以随机生成
|
||||
- Flow:输入单次发送的消息数量,默认为 1,可以手动修改。选择手动生产模式,代表每次点击按钮【Run】执行生产;选择周期生产模式,需要填写运行总时间和运行时间间隔。
|
||||
- Header:输入 Header 的 key,value
|
||||
- Options:选择 Froce Partition,代表消息仅发送到这些选择的 Partition。选择数据压缩格式。选择 Acks 参数,none 意思是消息发送了就认为发送成功;leader 意思是 leader 接收到消息(不管 follower 有没有同步成功)认为消息发送成功;all 意思是所有的 follower 消息同步成功认为是消息发送成功
|
||||
|
||||
- 步骤 3:点击按钮【Run】,生产测试开始,可以从右侧看到生产测试的信息
|
||||
|
||||

|
||||
|
||||
#### 5.4.9.2、消费测试
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Testing”>“Consume”
|
||||
|
||||
- 步骤 2:消费配置
|
||||
|
||||
- Topic:选择数据从哪个 topic 进行消费
|
||||
- Start From:选择数据从什么地方开始消费,可以根据时间选择或者根据 Offset 进行选择
|
||||
- Until:选择消费截止到什么地方,可以根据时间或者 offset 或者消息数等进行选择
|
||||
- Filter:选择过滤器的规则。包含/不包含某【key,value】;等于/大于/小于多少条消息
|
||||
|
||||
- 步骤 3:点击按钮【Run】,消费测试开始,可以在右边看到消费的明细信息
|
||||
|
||||

|
||||
|
||||
### 5.4.10、Security
|
||||
|
||||
注意:只有在开启集群认证的情况下才能够使用 Security 功能
|
||||
|
||||
#### 5.4.10.1、查看 ACL 概览信息
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Security”>“ACLs”
|
||||
|
||||
- 步骤 2:ACL 概览信息包括以下内容
|
||||
|
||||
- Enable:是否可用
|
||||
- ACLs:ACL 总数
|
||||
- Users:User 总数
|
||||
- Topics:Topic 总数
|
||||
- Consumer Groups:Consumer Group 总数
|
||||
- ACL 列表
|
||||
|
||||

|
||||
|
||||
#### 5.4.10.2、新增 ACl
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Security”>“Users”>“新增 ACL”
|
||||
|
||||
- 步骤 2:输入 ACL 配置参数
|
||||
|
||||
- ACL 用途:生产权限、消费权限、自定义权限
|
||||
- 生产权限时:可选择应用于所有 Kafka User 或者特定 Kafka User;可选择应用于所有 Topic 或者特定 Topic
|
||||
- 消费权限时:可选择应用于所有 Kafka User 或者特定 Kafka User;可选择应用于所有 Topic 或者特定 Topic;可选择应用于所有 Consumer Group 或者特定 Consumer Group
|
||||
|
||||
- 步骤 3:点击“确定”,新增 ACL 成功
|
||||
|
||||

|
||||
|
||||
#### 5.4.10.3、查看 User 信息
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Security”>“ACLs”
|
||||
|
||||
- 步骤 2:User 列表展示内容包括“Kafka User 名称”、“认证方式”、“passwprd”、操作项”修改密码“、”操作项“删除”
|
||||
|
||||
- 步骤 3:筛选框输入“Kafka User”可筛选出列表中相关 Kafka User
|
||||
|
||||

|
||||
|
||||
#### 5.4.10.4、新增 Kafka User
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Security”>“Users”>“新增 Kafka User”
|
||||
|
||||
- 步骤 2:输入 Kafka User 名称、认证方式、密码
|
||||
|
||||
- 步骤 3:点击“确定”,新增 Kafka User 成功
|
||||
|
||||

|
||||
|
||||
### 5.4.11、Job
|
||||
|
||||
#### 5.4.11.1、查看 Job 概览信息
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Job“
|
||||
|
||||
- 步骤 2:Job 概览信息包括以下内容
|
||||
|
||||
- Jobs:Job 总数
|
||||
- Doing:正在运行的 Job 总数
|
||||
- Prepare:准备运行的 Job 总数
|
||||
- Success:运行成功的 Job 总数
|
||||
- Fail:运行失败的 Job 总数
|
||||
- Job 列表
|
||||
|
||||

|
||||
|
||||
#### 5.4.11.2、Job 查看进度
|
||||
|
||||
Doing 状态下的任务可以查看进度
|
||||
|
||||
- 步骤 1:点击“多集群管理”>“集群卡片”>“Job”>“Job”列表>操作项“查看进度”>“查看进度”抽屉
|
||||
|
||||
- 步骤 2:
|
||||
|
||||
- 均衡任务:任务基本信息、均衡计划、任务执行明细信息
|
||||
- 扩缩副本:任务基本信息、任务执行明细信息、节点流量情况
|
||||
- Topic 迁移:任务基本信息、任务执行明细信息、节点流量情况
|
||||
|
||||

|
||||
|
||||
#### 5.4.11.3、Job 编辑任务
|
||||
|
||||
Prepare 状态下的任务可以进行编辑
|
||||
|
||||
- 点击“多集群管理”>“集群卡片”>“Job”>“Job”列表>操作项“编辑”
|
||||
|
||||
- 对任务执行的参数进行重新配置
|
||||
|
||||
- 集群均衡:可以对指标计算周期、均衡维度、topic 黑名单、运行配置等参数重新设置
|
||||
- Topic 迁移:可以对 topic 需要迁移的 partition、迁移数据的时间范围、目标 broker 节点、限流值、执行时间、描述等参数重新配置
|
||||
- topic 扩缩副本:可以对最终副本数、限流值、任务执行时间、描述等参数重新配置
|
||||
|
||||
- 点击“确定”,编辑任务成功
|
||||
|
||||

|
||||
@@ -1,108 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.xiaojukeji.kafka</groupId>
|
||||
<artifactId>kafka-manager-common</artifactId>
|
||||
<version>2.0.0-SNAPSHOT</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<parent>
|
||||
<artifactId>kafka-manager</artifactId>
|
||||
<groupId>com.xiaojukeji.kafka</groupId>
|
||||
<version>2.0.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<properties>
|
||||
<maven.test.skip>true</maven.test.skip>
|
||||
<downloadSources>true</downloadSources>
|
||||
<java_source_version>1.8</java_source_version>
|
||||
<java_target_version>1.8</java_target_version>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<file_encoding>UTF-8</file_encoding>
|
||||
|
||||
<spring-version>5.1.3.RELEASE</spring-version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework</groupId>
|
||||
<artifactId>spring-web</artifactId>
|
||||
<version>${spring-version}</version>
|
||||
</dependency>
|
||||
|
||||
<!-- http -->
|
||||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpcore</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpclient</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-httpclient</groupId>
|
||||
<artifactId>commons-httpclient</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpmime</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- zookeeper -->
|
||||
<dependency>
|
||||
<groupId>org.apache.curator</groupId>
|
||||
<artifactId>curator-recipes</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.zookeeper</groupId>
|
||||
<artifactId>zookeeper</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- swagger -->
|
||||
<dependency>
|
||||
<groupId>io.springfox</groupId>
|
||||
<artifactId>springfox-swagger2</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.springfox</groupId>
|
||||
<artifactId>springfox-swagger-ui</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.swagger</groupId>
|
||||
<artifactId>swagger-annotations</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- json -->
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
<artifactId>jackson-databind</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.alibaba</groupId>
|
||||
<artifactId>fastjson</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>commons-beanutils</groupId>
|
||||
<artifactId>commons-beanutils</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.10</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-lang</groupId>
|
||||
<artifactId>commons-lang</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-pool2</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>javax.servlet</groupId>
|
||||
<artifactId>javax.servlet-api</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
@@ -1,24 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.annotations;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.ApiLevelContent;
|
||||
|
||||
import java.lang.annotation.Documented;
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
import static java.lang.annotation.RetentionPolicy.RUNTIME;
|
||||
|
||||
/**
|
||||
* 接口分级限流
|
||||
* @author zengqiao
|
||||
* @date 2020-07-20
|
||||
*/
|
||||
@Target(ElementType.METHOD)
|
||||
@Retention(RUNTIME)
|
||||
@Documented
|
||||
public @interface ApiLevel {
|
||||
int level() default ApiLevelContent.LEVEL_DEFAULT_4;
|
||||
|
||||
int rateLimit() default Integer.MAX_VALUE;
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* 用户角色
|
||||
* @author zengqiao_cn@163.com
|
||||
* @date 19/4/15
|
||||
*/
|
||||
public enum AccountRoleEnum {
|
||||
UNKNOWN(-1, "unknown"),
|
||||
|
||||
NORMAL(0, "normal"),
|
||||
|
||||
RD(1, "rd"),
|
||||
|
||||
OP(2, "op");
|
||||
|
||||
private Integer role;
|
||||
|
||||
private String message;
|
||||
|
||||
AccountRoleEnum(Integer role, String message) {
|
||||
this.role = role;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public Integer getRole() {
|
||||
return role;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AccountRoleEnum{" +
|
||||
"role=" + role +
|
||||
", message='" + message + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
public static AccountRoleEnum getUserRoleEnum(Integer role) {
|
||||
for (AccountRoleEnum elem: AccountRoleEnum.values()) {
|
||||
if (elem.role.equals(role)) {
|
||||
return elem;
|
||||
}
|
||||
}
|
||||
return AccountRoleEnum.UNKNOWN;
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/7/27
|
||||
*/
|
||||
public enum ApiLevelEnum {
|
||||
LEVEL_0(0),
|
||||
LEVEL_1(1),
|
||||
LEVEL_2(2),
|
||||
LEVEL_3(3)
|
||||
;
|
||||
|
||||
private int level;
|
||||
|
||||
ApiLevelEnum(int level) {
|
||||
this.level = level;
|
||||
}
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/4/21
|
||||
*/
|
||||
public enum ClusterComboEnum {
|
||||
BYTES_IN_200(200*1024*1024, "200MB/s"),
|
||||
BYTES_IN_400(400*1024*1024, "400MB/s"),
|
||||
BYTES_IN_600(600*1024*1024, "600MB/s"),
|
||||
;
|
||||
|
||||
private Integer code;
|
||||
|
||||
private String message;
|
||||
|
||||
ClusterComboEnum(Integer code, String message) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ClusterComboEnum{" +
|
||||
"code=" + code +
|
||||
", message='" + message + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* 集群模式
|
||||
* @author zengqiao
|
||||
* @date 20/4/1
|
||||
*/
|
||||
public enum ClusterModeEnum {
|
||||
/**
|
||||
* 共享模式
|
||||
*/
|
||||
SHARED_MODE(0, "共享集群"),
|
||||
|
||||
/**
|
||||
* 独享模式
|
||||
*/
|
||||
EXCLUSIVE_MODE(1, "独享集群"),
|
||||
|
||||
/**
|
||||
* 独立模式
|
||||
*/
|
||||
INDEPENDENT_MODE(2, "独立集群");
|
||||
|
||||
private Integer code;
|
||||
|
||||
private String message;
|
||||
|
||||
ClusterModeEnum(Integer code, String message) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ClusterModeEnum{" +
|
||||
"code=" + code +
|
||||
", message='" + message + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/6/4
|
||||
*/
|
||||
public enum DBStatusEnum {
|
||||
DEAD(-1),
|
||||
ALIVE(0)
|
||||
;
|
||||
|
||||
private int status;
|
||||
|
||||
DBStatusEnum(int status) {
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public int getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public void setStatus(int status) {
|
||||
this.status = status;
|
||||
}
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/5/26
|
||||
*/
|
||||
public enum IDCEnum {
|
||||
CN("cn", "国内"),
|
||||
US("us", "美东"),
|
||||
RU("ru", "俄罗斯"),
|
||||
;
|
||||
|
||||
private String idc;
|
||||
|
||||
private String name;
|
||||
|
||||
IDCEnum(String idc, String name) {
|
||||
this.idc = idc;
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String getIdc() {
|
||||
return idc;
|
||||
}
|
||||
|
||||
public void setIdc(String idc) {
|
||||
this.idc = idc;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "IDCEnum{" +
|
||||
"idc='" + idc + '\'' +
|
||||
", name='" + name + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/5/20
|
||||
*/
|
||||
public enum KafkaBrokerRoleEnum {
|
||||
NORMAL("NormalBroker"),
|
||||
|
||||
COORDINATOR("Coordinator"),
|
||||
|
||||
CONTROLLER("Controller"),
|
||||
;
|
||||
private String role;
|
||||
|
||||
KafkaBrokerRoleEnum(String role) {
|
||||
this.role = role;
|
||||
}
|
||||
|
||||
public String getRole() {
|
||||
return role;
|
||||
}
|
||||
|
||||
public void setRole(String role) {
|
||||
this.role = role;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KafkaBrokerRoleEnum{" +
|
||||
"role='" + role + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/5/29
|
||||
*/
|
||||
public enum KafkaClientEnum {
|
||||
PRODUCE_CLIENT(0, "Produce"),
|
||||
|
||||
FETCH_CLIENT(1, "Fetch"),
|
||||
|
||||
;
|
||||
|
||||
private Integer code;
|
||||
|
||||
private String name;
|
||||
|
||||
KafkaClientEnum(Integer code, String name) {
|
||||
this.code = code;
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public void setCode(Integer code) {
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KafkaClientEnum{" +
|
||||
"code=" + code +
|
||||
", name='" + name + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/4/26
|
||||
*/
|
||||
public enum KafkaFileEnum {
|
||||
PACKAGE(0, "Kafka压缩包", ".tgz"),
|
||||
|
||||
SERVER_CONFIG(1, "KafkaServer配置", ".properties"),
|
||||
;
|
||||
|
||||
private Integer code;
|
||||
|
||||
private String message;
|
||||
|
||||
private String suffix;
|
||||
|
||||
KafkaFileEnum(Integer code, String message, String suffix) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
this.suffix = suffix;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
public String getSuffix() {
|
||||
return suffix;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KafkaFileEnum{" +
|
||||
"code=" + code +
|
||||
", message='" + message + '\'' +
|
||||
", suffix=" + suffix +
|
||||
'}';
|
||||
}
|
||||
|
||||
public static KafkaFileEnum getByCode(Integer code) {
|
||||
for (KafkaFileEnum elem: KafkaFileEnum.values()) {
|
||||
if (elem.getCode().equals(code)) {
|
||||
return elem;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
import com.google.common.collect.Maps;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* @author zhongyuankai_i
|
||||
* @date 20/09/03
|
||||
*/
|
||||
public enum ModuleEnum {
|
||||
TOPIC(0, "Topic"),
|
||||
|
||||
APP(1, "应用"),
|
||||
|
||||
QUOTA(2, "配额"),
|
||||
|
||||
AUTHORITY(3, "权限"),
|
||||
|
||||
CLUSTER(4, "集群"),
|
||||
|
||||
PARTITION(5, "分区"),
|
||||
|
||||
UNKNOWN(-1, "未知")
|
||||
;
|
||||
ModuleEnum(int code, String message) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
private int code;
|
||||
|
||||
private String message;
|
||||
|
||||
public int getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
public Map<String, Object> toMap() {
|
||||
Map<String, Object> map = Maps.newHashMap();
|
||||
map.put("code", code);
|
||||
map.put("message", message);
|
||||
return map;
|
||||
}
|
||||
|
||||
public static ModuleEnum valueOf(Integer code) {
|
||||
if (code == null) {
|
||||
return ModuleEnum.UNKNOWN;
|
||||
}
|
||||
for (ModuleEnum state : ModuleEnum.values()) {
|
||||
if (state.getCode() == code) {
|
||||
return state;
|
||||
}
|
||||
}
|
||||
|
||||
return ModuleEnum.UNKNOWN;
|
||||
}
|
||||
|
||||
public static boolean validate(Integer code) {
|
||||
if (code == null) {
|
||||
return false;
|
||||
}
|
||||
for (ModuleEnum state : ModuleEnum.values()) {
|
||||
if (state.getCode() == code) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* @author limeng
|
||||
* @date 2017/11/21
|
||||
*/
|
||||
public enum OffsetLocationEnum {
|
||||
/**
|
||||
* 存储于zk
|
||||
*/
|
||||
ZOOKEEPER("zookeeper"),
|
||||
|
||||
/**
|
||||
* 存储于broker
|
||||
*/
|
||||
BROKER("broker");
|
||||
|
||||
public final String location;
|
||||
|
||||
OffsetLocationEnum(String location) {
|
||||
this.location = location;
|
||||
}
|
||||
|
||||
public static OffsetLocationEnum getOffsetStoreLocation(String location) {
|
||||
if (location == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
for (OffsetLocationEnum offsetStoreLocation: OffsetLocationEnum.values()) {
|
||||
if (offsetStoreLocation.location.equals(location)) {
|
||||
return offsetStoreLocation;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* offset获取的位置
|
||||
* @author zengqiao
|
||||
* @date 19/5/29
|
||||
*/
|
||||
public enum OffsetPosEnum {
|
||||
NONE(0),
|
||||
|
||||
BEGINNING(1),
|
||||
|
||||
END(2),
|
||||
|
||||
BOTH(3);
|
||||
|
||||
public final Integer code;
|
||||
|
||||
OffsetPosEnum(Integer code) {
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public static OffsetPosEnum getOffsetPosEnum(Integer code) {
|
||||
for (OffsetPosEnum offsetPosEnum : values()) {
|
||||
if (offsetPosEnum.getCode().equals(code)) {
|
||||
return offsetPosEnum;
|
||||
}
|
||||
}
|
||||
return NONE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "OffsetPosEnum{" +
|
||||
"code=" + code +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* @author zhongyuankai
|
||||
* @date 20/09/03
|
||||
*/
|
||||
public enum OperateEnum {
|
||||
ADD(0, "新增"),
|
||||
|
||||
DELETE(1, "删除"),
|
||||
|
||||
EDIT(2, "修改"),
|
||||
|
||||
UNKNOWN(-1, "unknown"),
|
||||
;
|
||||
|
||||
OperateEnum(int code, String message) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
private int code;
|
||||
|
||||
private String message;
|
||||
|
||||
public int getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
public static OperateEnum valueOf(Integer code) {
|
||||
if (code == null) {
|
||||
return OperateEnum.UNKNOWN;
|
||||
}
|
||||
for (OperateEnum state : OperateEnum.values()) {
|
||||
if (state.getCode() == code) {
|
||||
return state;
|
||||
}
|
||||
}
|
||||
|
||||
return OperateEnum.UNKNOWN;
|
||||
}
|
||||
|
||||
public static boolean validate(Integer code) {
|
||||
if (code == null) {
|
||||
return false;
|
||||
}
|
||||
for (OperateEnum state : OperateEnum.values()) {
|
||||
if (state.getCode() == code) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* 操作状态类型
|
||||
* @author zengqiao
|
||||
* @date 19/11/21
|
||||
*/
|
||||
public enum OperationStatusEnum {
|
||||
CREATE(0, "创建"),
|
||||
UPDATE(1, "更新"),
|
||||
DELETE(2, "删除"),
|
||||
;
|
||||
|
||||
private Integer code;
|
||||
|
||||
private String message;
|
||||
|
||||
OperationStatusEnum(Integer code, String message) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* 峰值状态枚举
|
||||
* @author zengqiao
|
||||
* @date 20/5/11
|
||||
*/
|
||||
public enum PeakFlowStatusEnum {
|
||||
BETWEEN_ALL(0, "全部"),
|
||||
BETWEEN_00_60(1, "使用率0%-60%"),
|
||||
BETWEEN_60_80(2, "使用率60%-80%"),
|
||||
BETWEEN_80_100(3, "使用率80%-100%"),
|
||||
BETWEEN_100_PLUS(4, "使用率大于100%"),
|
||||
BETWEEN_EXCEPTION(5, "数据获取失败"),
|
||||
|
||||
;
|
||||
|
||||
public Integer code;
|
||||
|
||||
public String message;
|
||||
|
||||
PeakFlowStatusEnum(Integer code, String message) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public void setCode(Integer code) {
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
public void setMessage(String message) {
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "PeakFlowStatusEnum{" +
|
||||
"code=" + code +
|
||||
", message='" + message + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* 优先副本选举维度
|
||||
* @author zengqiao
|
||||
* @date 20/4/23
|
||||
*/
|
||||
public enum RebalanceDimensionEnum {
|
||||
CLUSTER(0, "Cluster维度"),
|
||||
REGION(1, "Region维度"),
|
||||
BROKER(2, "Broker维度"),
|
||||
TOPIC(3, "Topic维度"),
|
||||
;
|
||||
|
||||
private Integer code;
|
||||
|
||||
private String message;
|
||||
|
||||
RebalanceDimensionEnum(Integer code, String message) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* 是否上报监控系统
|
||||
* @author zengqiao
|
||||
* @date 20/9/25
|
||||
*/
|
||||
public enum SinkMonitorSystemEnum {
|
||||
SINK_MONITOR_SYSTEM(0, "上报监控系统"),
|
||||
NOT_SINK_MONITOR_SYSTEM(1, "不上报监控系统"),
|
||||
;
|
||||
|
||||
private Integer code;
|
||||
|
||||
private String message;
|
||||
|
||||
SinkMonitorSystemEnum(Integer code, String message) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public void setCode(Integer code) {
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
public void setMessage(String message) {
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "SinkMonitorSystemEnum{" +
|
||||
"code=" + code +
|
||||
", message='" + message + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* 任务状态
|
||||
* @author zengqiao
|
||||
* @date 2017/6/29.
|
||||
*/
|
||||
public enum TaskStatusEnum {
|
||||
UNKNOWN( -1, "未知"),
|
||||
|
||||
NEW( 0, "新建"),
|
||||
|
||||
RUNNABLE( 20, "就绪"),
|
||||
WAITING( 21, "等待"),
|
||||
|
||||
RUNNING( 30, "运行中"),
|
||||
KILLING( 31, "杀死中"),
|
||||
|
||||
BLOCKED( 40, "暂停"),
|
||||
|
||||
UNFINISHED( 99, "未完成"),
|
||||
FINISHED( 100, "完成"),
|
||||
|
||||
SUCCEED( 101, "成功"),
|
||||
FAILED( 102, "失败"),
|
||||
CANCELED( 103, "取消"),
|
||||
IGNORED( 104, "忽略"),
|
||||
TIMEOUT( 105, "超时"),
|
||||
KILL_FAILED(106, "杀死失败"),
|
||||
|
||||
;
|
||||
|
||||
private Integer code;
|
||||
|
||||
private String message;
|
||||
|
||||
TaskStatusEnum(Integer code, String message) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public void setCode(Integer code) {
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
public void setMessage(String message) {
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TaskStatusEnum{" +
|
||||
"code=" + code +
|
||||
", message='" + message + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
public static Boolean isFinished(Integer code) {
|
||||
if (code >= FINISHED.getCode()) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/6/11
|
||||
*/
|
||||
public enum TaskStatusReassignEnum {
|
||||
UNKNOWN(TaskStatusEnum.UNKNOWN),
|
||||
|
||||
NEW(TaskStatusEnum.NEW),
|
||||
|
||||
RUNNABLE(TaskStatusEnum.RUNNABLE),
|
||||
|
||||
RUNNING(TaskStatusEnum.RUNNING),
|
||||
|
||||
// FINISHED(TaskStatusEnum.FINISHED),
|
||||
SUCCEED(TaskStatusEnum.SUCCEED),
|
||||
FAILED(TaskStatusEnum.FAILED),
|
||||
CANCELED(TaskStatusEnum.CANCELED),
|
||||
;
|
||||
|
||||
private Integer code;
|
||||
|
||||
private String message;
|
||||
|
||||
TaskStatusReassignEnum(TaskStatusEnum taskStatusEnum) {
|
||||
this.code = taskStatusEnum.getCode();
|
||||
this.message = taskStatusEnum.getMessage();
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TaskStatusReassignEnum{" +
|
||||
"code=" + code +
|
||||
", message='" + message + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
public static Boolean isFinished(Integer code) {
|
||||
if (SUCCEED.getCode().equals(code)
|
||||
|| FAILED.getCode().equals(code)
|
||||
|| CANCELED.getCode().equals(code)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* topic权限
|
||||
* @author zhongyuankai
|
||||
* @date 20/4/29
|
||||
*/
|
||||
public enum TopicAuthorityEnum {
|
||||
DENY(0, "无"),
|
||||
|
||||
READ(1, "只读"),
|
||||
|
||||
WRITE(2, "只写"),
|
||||
|
||||
READ_WRITE(3, "可读可写"),
|
||||
|
||||
OWNER(4, "可管理"),
|
||||
;
|
||||
|
||||
private Integer code;
|
||||
|
||||
private String message;
|
||||
|
||||
TopicAuthorityEnum(Integer code, String message) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/8/24
|
||||
*/
|
||||
public enum TopicOffsetChangedEnum {
|
||||
UNKNOWN(-1, "unknown"),
|
||||
NO(0, "no"),
|
||||
YES(1, "yes"),
|
||||
;
|
||||
|
||||
private Integer code;
|
||||
|
||||
private String message;
|
||||
|
||||
TopicOffsetChangedEnum(Integer code, String message) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public void setCode(Integer code) {
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
public void setMessage(String message) {
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TopicOffsetChangedEnum{" +
|
||||
"code=" + code +
|
||||
", message='" + message + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* Topic迁移动作
|
||||
* @author zengqiao
|
||||
* @date 20/4/16
|
||||
*/
|
||||
public enum TopicReassignActionEnum {
|
||||
START("start"),
|
||||
MODIFY("modify"),
|
||||
CANCEL("cancel"),
|
||||
;
|
||||
|
||||
private String action;
|
||||
|
||||
TopicReassignActionEnum(String action) {
|
||||
this.action = action;
|
||||
}
|
||||
|
||||
public String getAction() {
|
||||
return action;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TopicReassignActionEnum{" +
|
||||
"action='" + action + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
public static TopicReassignActionEnum getByAction(String action) {
|
||||
for (TopicReassignActionEnum elem: TopicReassignActionEnum.values()) {
|
||||
if (elem.action.equals(action)) {
|
||||
return elem;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum.gateway;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/7/28
|
||||
*/
|
||||
public enum GatewayConfigKeyEnum {
|
||||
SD_CLUSTER_ID("SERVICE_DISCOVERY_CLUSTER_ID", "SERVICE_DISCOVERY_CLUSTER_ID"),
|
||||
SD_QUEUE_SIZE("SERVICE_DISCOVERY_QUEUE_SIZE", "SERVICE_DISCOVERY_QUEUE_SIZE"),
|
||||
SD_APP_ID_RATE("SERVICE_DISCOVERY_APPID_RATE", "SERVICE_DISCOVERY_APPID_RATE"),
|
||||
SD_IP_RATE("SERVICE_DISCOVERY_IP_RATE", "SERVICE_DISCOVERY_IP_RATE"),
|
||||
SD_SP_RATE("SERVICE_DISCOVERY_SP_RATE", "SERVICE_DISCOVERY_SP_RATE"),
|
||||
|
||||
;
|
||||
|
||||
private String configType;
|
||||
|
||||
private String configName;
|
||||
|
||||
GatewayConfigKeyEnum(String configType, String configName) {
|
||||
this.configType = configType;
|
||||
this.configName = configName;
|
||||
}
|
||||
|
||||
public String getConfigType() {
|
||||
return configType;
|
||||
}
|
||||
|
||||
public void setConfigType(String configType) {
|
||||
this.configType = configType;
|
||||
}
|
||||
|
||||
public String getConfigName() {
|
||||
return configName;
|
||||
}
|
||||
|
||||
public void setConfigName(String configName) {
|
||||
this.configName = configName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "GatewayConfigKeyEnum{" +
|
||||
"configType='" + configType + '\'' +
|
||||
", configName='" + configName + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/7/28
|
||||
*/
|
||||
public class ApiLevelContent {
|
||||
public static final int LEVEL_VIP_1 = 1;
|
||||
|
||||
public static final int LEVEL_IMPORTANT_2 = 2;
|
||||
|
||||
public static final int LEVEL_NORMAL_3 = 3;
|
||||
|
||||
public static final int LEVEL_DEFAULT_4 = 4;
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant;
|
||||
|
||||
/**
|
||||
* Api前缀
|
||||
* @author zengqiao
|
||||
* @date 20/4/16
|
||||
*/
|
||||
public class ApiPrefix {
|
||||
public static final String API_V1_SSO_PREFIX = "/api/v1/sso/";
|
||||
|
||||
public static final String API_V1_NORMAL_PREFIX = "/api/v1/normal/";
|
||||
|
||||
public static final String API_V1_RD_PREFIX = "/api/v1/rd/";
|
||||
|
||||
public static final String API_V1_OP_PREFIX = "/api/v1/op/";
|
||||
|
||||
public static final String API_V1_THIRD_PART_PREFIX = "/api/v1/third-part/";
|
||||
|
||||
public static final String API_V2_THIRD_PART_PREFIX = "/api/v2/third-part/";
|
||||
|
||||
public static final String API_V1_OBSOLETE_PREFIX = "/api/v1/";
|
||||
|
||||
public static final String API_V2_OBSOLETE_PREFIX = "/api/v2/";
|
||||
|
||||
public static final String GATEWAY_API_V1_PREFIX = "/gateway/api/v1/";
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant;
|
||||
|
||||
/**
|
||||
* 配置的常量KEY
|
||||
* @author zengqiao
|
||||
* @date 20/7/1
|
||||
*/
|
||||
public class ConfigConstant {
|
||||
/**
|
||||
* 专家服务
|
||||
*/
|
||||
public static final String REGION_HOT_TOPIC_CONFIG_KEY = "REGION_HOT_TOPIC_CONFIG";
|
||||
public static final String TOPIC_INSUFFICIENT_PARTITION_CONFIG_KEY = "TOPIC_INSUFFICIENT_PARTITION_CONFIG";
|
||||
public static final String EXPIRED_TOPIC_CONFIG_KEY = "EXPIRED_TOPIC_CONFIG";
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public static final String PRODUCE_CONSUMER_METRICS_CONFIG_KEY = "PRODUCE_CONSUMER_METRICS_CONFIG_KEY";
|
||||
|
||||
public static final String PRODUCE_TOPIC_METRICS_CONFIG_KEY = "PRODUCE_TOPIC_METRICS_CONFIG_KEY";
|
||||
|
||||
public static final long MAX_LIMIT_NUM = 200L;
|
||||
|
||||
/**
|
||||
* broker 默认最大峰值流量 100M
|
||||
*/
|
||||
public static final Long DEFAULT_BROKER_CAPACITY_LIMIT = 100 * 1024 * 1024L;
|
||||
|
||||
public static final String BROKER_CAPACITY_LIMIT_CONFIG_KEY = "BROKER_CAPACITY_LIMIT_CONFIG";
|
||||
|
||||
public static final String KAFKA_CLUSTER_DO_CONFIG_KEY = "KAFKA_CLUSTER_DO_CONFIG";
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/2/28
|
||||
*/
|
||||
public class Constant {
|
||||
public static final Integer SUCCESS = 0;
|
||||
|
||||
public static final Integer MAX_AVG_BYTES_DURATION = 10;
|
||||
|
||||
public static final Integer BATCH_INSERT_SIZE = 50;
|
||||
|
||||
public static final Integer DEFAULT_SESSION_TIMEOUT_UNIT_MS = 30000;
|
||||
|
||||
public static final Integer MAX_TOPIC_OPERATION_SIZE_PER_REQUEST = 10;
|
||||
|
||||
/**
|
||||
* 不进行过滤的BrokerId
|
||||
*/
|
||||
public static final Integer NOT_FILTER_BROKER_ID = -1;
|
||||
|
||||
/**
|
||||
* 默认最近20分钟的连接信息
|
||||
*/
|
||||
public static final Long TOPIC_CONNECTION_LATEST_TIME_MS = 20 * 60 * 1000L;
|
||||
|
||||
/**
|
||||
* 工单相关
|
||||
*/
|
||||
public static final String HANDLE_APP_APPLY_MAX_NUM = "handle_app_apply_order_num";
|
||||
|
||||
public static final Integer HANDLE_APP_APPLY_MAX_NUM_DEFAULT = 10;
|
||||
|
||||
public static final String AUTO_HANDLE_USER_NAME = "auto_handle";
|
||||
|
||||
public static final String AUTO_HANDLE_CHINESE_NAME = "自动审批";
|
||||
|
||||
public static final String UNKNOWN_VERSION = "unknownVersion";
|
||||
|
||||
public static final String UNKNOWN_USER = "UNKNOWN_USER";
|
||||
|
||||
public static final String DEFAULT_USER_NAME = "kafka-admin";
|
||||
|
||||
public static final Integer DEFAULT_MAX_CAL_TOPIC_EXPIRED_DAY = 90;
|
||||
|
||||
public static final Integer INVALID_CODE = -1;
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/5/20
|
||||
*/
|
||||
public class KafkaConstant {
|
||||
public static final String COORDINATOR_TOPIC_NAME = "__consumer_offsets";
|
||||
|
||||
public static final String BROKER_HOST_NAME_SUFFIX = ".diditaxi.com";
|
||||
|
||||
public static final String CLIENT_VERSION_CODE_UNKNOWN = "-1";
|
||||
|
||||
public static final String CLIENT_VERSION_NAME_UNKNOWN = "unknown";
|
||||
|
||||
public static final String RETENTION_MS_KEY = "retention.ms";
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author zengqiao
|
||||
* @date 20/4/22
|
||||
*/
|
||||
public class KafkaMetricsCollections {
|
||||
public static final int COMMON_DETAIL_METRICS = 0;
|
||||
|
||||
/**
|
||||
* Broker流量详情
|
||||
*/
|
||||
public static final int BROKER_TO_DB_METRICS = 101; // Broker入DB的Metrics指标
|
||||
public static final int BROKER_OVERVIEW_PAGE_METRICS = 103; // Broker状态概览的指标
|
||||
public static final int BROKER_ANALYSIS_METRICS = 105; // Broker分析的指标
|
||||
public static final int BROKER_TOPIC_ANALYSIS_METRICS = 106; // Broker分析的指标
|
||||
public static final int BROKER_BASIC_PAGE_METRICS = 107; // Broker基本信息页面的指标
|
||||
public static final int BROKER_STATUS_PAGE_METRICS = 108; // Broker状态
|
||||
public static final int BROKER_HEALTH_SCORE_METRICS = 109; // Broker健康分
|
||||
|
||||
/**
|
||||
* Topic流量详情
|
||||
*/
|
||||
public static final int TOPIC_FLOW_OVERVIEW = 201;
|
||||
public static final int TOPIC_METRICS_TO_DB = 202;
|
||||
public static final int TOPIC_REQUEST_TIME_METRICS_TO_DB = 203;
|
||||
public static final int TOPIC_BASIC_PAGE_METRICS = 204;
|
||||
public static final int TOPIC_REQUEST_TIME_DETAIL_PAGE_METRICS = 205;
|
||||
public static final int TOPIC_THROTTLED_METRICS_TO_DB = 206;
|
||||
|
||||
|
||||
/**
|
||||
* App+Topic流量详情
|
||||
*/
|
||||
public static final int APP_TOPIC_METRICS_TO_DB = 300;
|
||||
|
||||
/**
|
||||
* Broker信息
|
||||
*/
|
||||
public static final int BROKER_VERSION = 400;
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/8/10
|
||||
*/
|
||||
public class LogConstant {
|
||||
public static final String COLLECTOR_METRICS_LOGGER = "COLLECTOR_METRICS_LOGGER";
|
||||
|
||||
public static final String API_METRICS_LOGGER = "API_METRICS_LOGGER";
|
||||
|
||||
public static final String SCHEDULED_TASK_LOGGER = "SCHEDULED_TASK_LOGGER";
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant;
|
||||
|
||||
/**
|
||||
* 登录常量
|
||||
* @author zengqiao
|
||||
* @date 20/5/8
|
||||
*/
|
||||
public class LoginConstant {
|
||||
public static final String SESSION_USERNAME_KEY = "username";
|
||||
|
||||
public static final String COOKIE_CHINESE_USERNAME_KEY = "chineseName";
|
||||
|
||||
public static final Integer COOKIE_OR_SESSION_MAX_AGE_UNIT_MS = 24 * 60 * 60 * 1000;
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/7/28
|
||||
*/
|
||||
public class SystemCodeConstant {
|
||||
public static final String LOG_X = "LogX";
|
||||
|
||||
public static final String LEO = "leo";
|
||||
|
||||
public static final String DATA_DREAM = "datadream";
|
||||
|
||||
public static final String KAFKA_MANAGER = "kafka-manager";
|
||||
|
||||
public static final String CHORUS = "chorus"; // 治理平台-服务治理
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/7/28
|
||||
*/
|
||||
public class TopicCreationConstant {
|
||||
/**
|
||||
* LogX创建Topic配置KEY
|
||||
*/
|
||||
public static final String LOG_X_CREATE_TOPIC_CONFIG_KEY_NAME = "LOG_X_CREATE_TOPIC_CONFIG";
|
||||
|
||||
/**
|
||||
* 治理平台创建Topic配置KEY
|
||||
*/
|
||||
public static final String CHORUS_CREATE_TOPIC_CONFIG_KEY_NAME = "CHORUS_CREATE_TOPIC_CONFIG";
|
||||
|
||||
/**
|
||||
* 内部创建Topic配置KEY
|
||||
*/
|
||||
public static final String INNER_CREATE_TOPIC_CONFIG_KEY = "INNER_CREATE_TOPIC_CONFIG_KEY";
|
||||
|
||||
public static final Integer DEFAULT_REPLICA = 3;
|
||||
|
||||
public static final Integer DEFAULT_PARTITION_NUM = 1;
|
||||
|
||||
public static final Integer DEFAULT_RETENTION_TIME_UNIT_HOUR = 24;
|
||||
|
||||
public static final String TOPIC_RETENTION_TIME_KEY_NAME = "retention.ms";
|
||||
|
||||
public static Properties createNewProperties(Long retentionTime) {
|
||||
Properties properties = new Properties();
|
||||
properties.put(TOPIC_RETENTION_TIME_KEY_NAME, String.valueOf(retentionTime));
|
||||
return properties;
|
||||
}
|
||||
|
||||
public static final Long AUTO_EXEC_MAX_BYTES_IN_UNIT_B = 30 * 1024 * 1024L;
|
||||
|
||||
/**
|
||||
* Topic 前缀
|
||||
*/
|
||||
public static final String TOPIC_NAME_PREFIX_US = "us01_";
|
||||
|
||||
public static final String TOPIC_NAME_PREFIX_RU = "ru01_";
|
||||
|
||||
public static final Integer TOPIC_NAME_MAX_LENGTH = 255;
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.constant;
|
||||
|
||||
/**
|
||||
* 采样相关配置
|
||||
* @author zengqiao
|
||||
* @date 20/5/8
|
||||
*/
|
||||
public class TopicSampleConstant {
|
||||
/**
|
||||
* TOPIC_SAMPLE_MAX_MSG_NUM: 最大采样条数
|
||||
* TOPIC_SAMPLE_MAX_TIMEOUT_MS:采样超时时间
|
||||
* TOPIC_SAMPLE_POLL_TIME_OUT_MS:采样单次poll超时时间
|
||||
* TOPIC_SAMPLE_MAX_DATA_LENGTH:截断情况下, 采样的数据最大长度
|
||||
*/
|
||||
public static final Integer MAX_MSG_NUM = 100;
|
||||
public static final Integer MAX_TIMEOUT_UNIT_MS = 10000;
|
||||
public static final Integer POLL_TIME_OUT_UNIT_MS = 2000;
|
||||
public static final Integer MAX_DATA_LENGTH_UNIT_BYTE = 2048;
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity;
|
||||
|
||||
import kafka.admin.AdminClient;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/5/14
|
||||
*/
|
||||
public class ConsumerMetadata {
|
||||
private Set<String> consumerGroupSet = new HashSet<>();
|
||||
|
||||
private Map<String, Set<String>> topicNameConsumerGroupMap = new HashMap<>();
|
||||
|
||||
private Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap = new HashMap<>();
|
||||
|
||||
private Map<String, List<String>> consumerGroupAppMap = new ConcurrentHashMap<>();
|
||||
|
||||
|
||||
public ConsumerMetadata(Set<String> consumerGroupSet,
|
||||
Map<String, Set<String>> topicNameConsumerGroupMap,
|
||||
Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap,
|
||||
Map<String, List<String>> consumerGroupAppMap) {
|
||||
this.consumerGroupSet = consumerGroupSet;
|
||||
this.topicNameConsumerGroupMap = topicNameConsumerGroupMap;
|
||||
this.consumerGroupSummaryMap = consumerGroupSummaryMap;
|
||||
this.consumerGroupAppMap = consumerGroupAppMap;
|
||||
}
|
||||
|
||||
public Set<String> getConsumerGroupSet() {
|
||||
return consumerGroupSet;
|
||||
}
|
||||
|
||||
public Map<String, Set<String>> getTopicNameConsumerGroupMap() {
|
||||
return topicNameConsumerGroupMap;
|
||||
}
|
||||
|
||||
public Map<String, AdminClient.ConsumerGroupSummary> getConsumerGroupSummaryMap() {
|
||||
return consumerGroupSummaryMap;
|
||||
}
|
||||
|
||||
public Map<String, List<String>> getConsumerGroupAppMap() {
|
||||
return consumerGroupAppMap;
|
||||
}
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/7/27
|
||||
*/
|
||||
public class DeprecatedResponseResult<T> {
|
||||
public static final String SUCCESS_STATUS = "success";
|
||||
|
||||
public static final String FAILED_STATUS = "failure";
|
||||
|
||||
public static final String SUCCESS_MESSAGE = "process succeeded!";
|
||||
|
||||
public static final String FAILED_MESSAGE = "process failed!";
|
||||
|
||||
private String status;
|
||||
|
||||
private String message;
|
||||
|
||||
private T data;
|
||||
|
||||
public static <T> DeprecatedResponseResult<T> success(T data) {
|
||||
DeprecatedResponseResult<T> responseCommonResult = new DeprecatedResponseResult<T>();
|
||||
responseCommonResult.setMessage(SUCCESS_MESSAGE);
|
||||
responseCommonResult.setStatus(SUCCESS_STATUS);
|
||||
responseCommonResult.setData(data);
|
||||
return responseCommonResult;
|
||||
}
|
||||
|
||||
public static <T> DeprecatedResponseResult<T> success() {
|
||||
DeprecatedResponseResult<T> responseCommonResult = new DeprecatedResponseResult<T>();
|
||||
responseCommonResult.setStatus(SUCCESS_STATUS);
|
||||
responseCommonResult.setMessage(SUCCESS_MESSAGE);
|
||||
return responseCommonResult;
|
||||
}
|
||||
|
||||
public static <T> DeprecatedResponseResult<T> failure() {
|
||||
DeprecatedResponseResult<T> responseCommonResult = new DeprecatedResponseResult<T>();
|
||||
responseCommonResult.setMessage(FAILED_MESSAGE);
|
||||
responseCommonResult.setStatus(FAILED_STATUS);
|
||||
return responseCommonResult;
|
||||
}
|
||||
|
||||
public static <T> DeprecatedResponseResult<T> failure(String message) {
|
||||
DeprecatedResponseResult<T> responseCommonResult = new DeprecatedResponseResult<T>();
|
||||
responseCommonResult.setMessage(message);
|
||||
responseCommonResult.setStatus(FAILED_STATUS);
|
||||
return responseCommonResult;
|
||||
}
|
||||
|
||||
public String getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public void setStatus(String status) {
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
public void setMessage(String message) {
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public T getData() {
|
||||
return data;
|
||||
}
|
||||
|
||||
public void setData(T data) {
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "DeprecatedResponseResult{" +
|
||||
"status='" + status + '\'' +
|
||||
", message='" + message + '\'' +
|
||||
", data=" + data +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,82 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/6/15
|
||||
*/
|
||||
public class KafkaVersion {
|
||||
private static final String DIDI_VERSION_EXTEND = "d";
|
||||
|
||||
public static final Long VERSION_0_10_3 = 10030000L; // 0.10.2+
|
||||
public static final Long VERSION_MAX = Long.MAX_VALUE;
|
||||
|
||||
private volatile String version = null;
|
||||
|
||||
private volatile long versionNum = Long.MAX_VALUE;
|
||||
|
||||
public boolean initialized() {
|
||||
if (ValidateUtils.isNull(version)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public String getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public long getVersionNum() {
|
||||
return versionNum;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KafkaVersion{" +
|
||||
"version='" + version + '\'' +
|
||||
", versionNum=" + versionNum +
|
||||
'}';
|
||||
}
|
||||
|
||||
public long init(String version) {
|
||||
version = version.toLowerCase();
|
||||
String[] splitElems = version.split("-");
|
||||
int splitElemLength = splitElems.length;
|
||||
if (splitElemLength <= 0) {
|
||||
versionNum = Long.MAX_VALUE;
|
||||
return versionNum;
|
||||
}
|
||||
|
||||
try {
|
||||
// kafka的version
|
||||
String[] kafkaVersion = splitElems[0].split("\\.");
|
||||
int kafkaVersionLength = kafkaVersion.length;
|
||||
|
||||
versionNum = kafkaVersionLength > 0? Integer.valueOf(kafkaVersion[0]): 0;
|
||||
versionNum = versionNum * 100 + (kafkaVersionLength > 1? Integer.valueOf(kafkaVersion[1]): 0);
|
||||
versionNum = versionNum * 100 + (kafkaVersionLength > 2? Integer.valueOf(kafkaVersion[2]): 0);
|
||||
} catch (Exception e) {
|
||||
// Kafka版本信息获取不到时, 直接返回空
|
||||
this.versionNum = Long.MAX_VALUE;
|
||||
return versionNum;
|
||||
}
|
||||
|
||||
// 成功获取版本信息
|
||||
versionNum = versionNum * 10000;
|
||||
this.version = version;
|
||||
|
||||
// 补充扩展信息
|
||||
try {
|
||||
for (int idx = 0; idx < splitElemLength; ++idx) {
|
||||
if (splitElems[idx].equals(DIDI_VERSION_EXTEND) && idx < splitElemLength - 1) {
|
||||
versionNum = versionNum + (Integer.valueOf(splitElems[idx + 1]));
|
||||
return versionNum;
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
// 扩展版本信息获取不到时, 忽略
|
||||
}
|
||||
return versionNum;
|
||||
}
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* @author huangyiminghappy@163.com
|
||||
* @date 2019-07-08
|
||||
*/
|
||||
public class Result<T> implements Serializable {
|
||||
private static final long serialVersionUID = -2772975319944108658L;
|
||||
|
||||
private T data;
|
||||
private String message;
|
||||
private String tips;
|
||||
private int code;
|
||||
|
||||
public Result(T data) {
|
||||
this.data = data;
|
||||
this.code = ResultStatus.SUCCESS.getCode();
|
||||
this.message = ResultStatus.SUCCESS.getMessage();
|
||||
}
|
||||
|
||||
public Result() {
|
||||
this(null);
|
||||
}
|
||||
|
||||
public Result(Integer code, String message) {
|
||||
this.message = message;
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
public Result(Integer code, T data, String message) {
|
||||
this.data = data;
|
||||
this.message = message;
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
public T getData()
|
||||
{
|
||||
return (T)this.data;
|
||||
}
|
||||
|
||||
public void setData(T data)
|
||||
{
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
public String getMessage()
|
||||
{
|
||||
return this.message;
|
||||
}
|
||||
|
||||
public void setMessage(String message)
|
||||
{
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public String getTips() {
|
||||
return tips;
|
||||
}
|
||||
|
||||
public void setTips(String tips) {
|
||||
this.tips = tips;
|
||||
}
|
||||
|
||||
public int getCode()
|
||||
{
|
||||
return this.code;
|
||||
}
|
||||
|
||||
public void setCode(int code)
|
||||
{
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
return JSON.toJSONString(this);
|
||||
}
|
||||
|
||||
public static Result buildSuc() {
|
||||
Result result = new Result();
|
||||
result.setCode(ResultStatus.SUCCESS.getCode());
|
||||
result.setMessage(ResultStatus.SUCCESS.getMessage());
|
||||
return result;
|
||||
}
|
||||
|
||||
public static Result buildFrom(ResultStatus resultStatus) {
|
||||
Result result = new Result();
|
||||
result.setCode(resultStatus.getCode());
|
||||
result.setMessage(resultStatus.getMessage());
|
||||
return result;
|
||||
}
|
||||
|
||||
public static Result buildFrom(ResultStatus resultStatus, Object data) {
|
||||
Result result = new Result();
|
||||
result.setCode(resultStatus.getCode());
|
||||
result.setMessage(resultStatus.getMessage());
|
||||
result.setData(data);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -1,154 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||
|
||||
/**
|
||||
* 返回状态
|
||||
* @author zengqiao
|
||||
* @date 20/4/16
|
||||
*/
|
||||
public enum ResultStatus {
|
||||
SUCCESS(Constant.SUCCESS, "success"),
|
||||
LOGIN_FAILED(1, "login failed, please check username and password"),
|
||||
|
||||
|
||||
/**
|
||||
* 内部依赖错误, [1000, 1200)
|
||||
* ------------------------------------------------------------------------------------------
|
||||
*/
|
||||
MYSQL_ERROR(1000, "operate database failed"),
|
||||
|
||||
CONNECT_ZOOKEEPER_FAILED(1000, "connect zookeeper failed"),
|
||||
READ_ZOOKEEPER_FAILED(1000, "read zookeeper failed"),
|
||||
READ_JMX_FAILED(1000, "read jmx failed"),
|
||||
|
||||
|
||||
// 内部依赖错误 —— Kafka特定错误, [1000, 1100)
|
||||
BROKER_NUM_NOT_ENOUGH(1000, "broker not enough"),
|
||||
CONTROLLER_NOT_ALIVE(1000, "controller not alive"),
|
||||
CLUSTER_METADATA_ERROR(1000, "cluster metadata error"),
|
||||
TOPIC_CONFIG_ERROR(1000, "topic config error"),
|
||||
|
||||
|
||||
/**
|
||||
* 外部依赖错误, [1200, 1400)
|
||||
* ------------------------------------------------------------------------------------------
|
||||
*/
|
||||
CALL_CLUSTER_TASK_AGENT_FAILED(1000, " call cluster task agent failed"),
|
||||
CALL_MONITOR_SYSTEM_ERROR(1000, " call monitor-system failed"),
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* 外部用户操作错误, [1400, 1600)
|
||||
* ------------------------------------------------------------------------------------------
|
||||
*/
|
||||
PARAM_ILLEGAL(1400, "param illegal"),
|
||||
OPERATION_FAILED(1401, "operation failed"),
|
||||
OPERATION_FORBIDDEN(1402, "operation forbidden"),
|
||||
API_CALL_EXCEED_LIMIT(1403, "api call exceed limit"),
|
||||
|
||||
// 资源不存在
|
||||
CLUSTER_NOT_EXIST(10000, "cluster not exist"),
|
||||
BROKER_NOT_EXIST(10000, "broker not exist"),
|
||||
TOPIC_NOT_EXIST(10000, "topic not exist"),
|
||||
PARTITION_NOT_EXIST(10000, "partition not exist"),
|
||||
|
||||
ACCOUNT_NOT_EXIST(10000, "account not exist"),
|
||||
APP_NOT_EXIST(1000, "app not exist"),
|
||||
ORDER_NOT_EXIST(1000, "order not exist"),
|
||||
CONFIG_NOT_EXIST(1000, "config not exist"),
|
||||
IDC_NOT_EXIST(1000, "idc not exist"),
|
||||
TASK_NOT_EXIST(1110, "task not exist"),
|
||||
|
||||
AUTHORITY_NOT_EXIST(1000, "authority not exist"),
|
||||
|
||||
MONITOR_NOT_EXIST(1110, "monitor not exist"),
|
||||
|
||||
QUOTA_NOT_EXIST(1000, "quota not exist, please check clusterId, topicName and appId"),
|
||||
|
||||
// 资源不存在, 已存在, 已被使用
|
||||
RESOURCE_NOT_EXIST(1200, "资源不存在"),
|
||||
RESOURCE_ALREADY_EXISTED(1200, "资源已经存在"),
|
||||
RESOURCE_NAME_DUPLICATED(1200, "资源名称重复"),
|
||||
RESOURCE_ALREADY_USED(1000, "资源早已被使用"),
|
||||
|
||||
|
||||
/**
|
||||
* 资源参数错误
|
||||
*/
|
||||
CG_LOCATION_ILLEGAL(10000, "consumer group location illegal"),
|
||||
ORDER_ALREADY_HANDLED(1000, "order already handled"),
|
||||
|
||||
APP_ID_OR_PASSWORD_ILLEGAL(1000, "app or password illegal"),
|
||||
SYSTEM_CODE_ILLEGAL(1000, "system code illegal"),
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
|
||||
USER_WITHOUT_AUTHORITY(1000, "user without authority"),
|
||||
|
||||
|
||||
|
||||
JSON_PARSER_ERROR(1000, "json parser error"),
|
||||
|
||||
|
||||
TOPIC_OPERATION_PARAM_NULL_POINTER(2, "参数错误"),
|
||||
TOPIC_OPERATION_PARTITION_NUM_ILLEGAL(3, "分区数错误"),
|
||||
TOPIC_OPERATION_BROKER_NUM_NOT_ENOUGH(4, "Broker数不足错误"),
|
||||
TOPIC_OPERATION_TOPIC_NAME_ILLEGAL(5, "Topic名称非法"),
|
||||
TOPIC_OPERATION_TOPIC_EXISTED(6, "Topic已存在"),
|
||||
TOPIC_OPERATION_UNKNOWN_TOPIC_PARTITION(7, "Topic未知"),
|
||||
TOPIC_OPERATION_TOPIC_CONFIG_ILLEGAL(8, "Topic配置错误"),
|
||||
TOPIC_OPERATION_TOPIC_IN_DELETING(9, "Topic正在删除"),
|
||||
TOPIC_OPERATION_UNKNOWN_ERROR(10, "未知错误"),
|
||||
TOPIC_EXIST_CONNECT_CANNOT_DELETE(10, "topic exist connect cannot delete"),
|
||||
EXIST_TOPIC_CANNOT_DELETE(10, "exist topic cannot delete"),
|
||||
|
||||
|
||||
/**
|
||||
* 工单
|
||||
*/
|
||||
CHANGE_ZOOKEEPER_FORBIDEN(100, "change zookeeper forbiden"),
|
||||
// APP_EXIST_TOPIC_AUTHORITY_CANNOT_DELETE(1000, "app exist topic authority cannot delete"),
|
||||
|
||||
UPLOAD_FILE_FAIL(1000, "upload file fail"),
|
||||
FILE_TYPE_NOT_SUPPORT(1000, "File type not support"),
|
||||
DOWNLOAD_FILE_FAIL(1000, "download file fail"),
|
||||
|
||||
|
||||
TOPIC_ALREADY_EXIST(17400, "topic already existed"),
|
||||
CONSUMER_GROUP_NOT_EXIST(17411, "consumerGroup not exist"),
|
||||
;
|
||||
|
||||
private int code;
|
||||
private String message;
|
||||
|
||||
ResultStatus(int code, String message) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public int getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public void setCode(int code) {
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
public void setMessage(String message) {
|
||||
this.message = message;
|
||||
}
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity;
|
||||
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/4/2
|
||||
*/
|
||||
public class TopicOperationResult {
|
||||
@ApiModelProperty(value = "集群ID")
|
||||
private Long clusterId;
|
||||
|
||||
@ApiModelProperty(value = "Topic名称")
|
||||
private String topicName;
|
||||
|
||||
@ApiModelProperty(value = "状态码, 0:成功, 其他失败")
|
||||
private Integer code;
|
||||
|
||||
@ApiModelProperty(value = "信息")
|
||||
private String message;
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public String getTopicName() {
|
||||
return topicName;
|
||||
}
|
||||
|
||||
public void setTopicName(String topicName) {
|
||||
this.topicName = topicName;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public void setCode(Integer code) {
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
public void setMessage(String message) {
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TopicOperationResult{" +
|
||||
"clusterId=" + clusterId +
|
||||
", topicName='" + topicName + '\'' +
|
||||
", code=" + code +
|
||||
", message='" + message + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
public static TopicOperationResult buildFrom(Long clusterId, String topicName, Result rs) {
|
||||
return buildFrom(clusterId, topicName, rs.getCode(), rs.getMessage());
|
||||
}
|
||||
|
||||
public static TopicOperationResult buildFrom(Long clusterId, String topicName, ResultStatus rs) {
|
||||
return buildFrom(clusterId, topicName, rs.getCode(), rs.getMessage());
|
||||
}
|
||||
|
||||
private static TopicOperationResult buildFrom(Long clusterId,
|
||||
String topicName,
|
||||
Integer code,
|
||||
String message) {
|
||||
TopicOperationResult result = new TopicOperationResult();
|
||||
result.setClusterId(clusterId);
|
||||
result.setTopicName(topicName);
|
||||
result.setCode(code);
|
||||
result.setMessage(message);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.ao;
|
||||
|
||||
/**
|
||||
* AppTopic信息
|
||||
* @author zengqiao
|
||||
* @date 20/5/11
|
||||
*/
|
||||
public class AppTopicDTO {
|
||||
private Long logicalClusterId;
|
||||
|
||||
private String logicalClusterName;
|
||||
|
||||
private Long physicalClusterId;
|
||||
|
||||
private String topicName;
|
||||
|
||||
private Integer access;
|
||||
|
||||
private String operator;
|
||||
|
||||
private Long gmtCreate;
|
||||
|
||||
public Long getLogicalClusterId() {
|
||||
return logicalClusterId;
|
||||
}
|
||||
|
||||
public void setLogicalClusterId(Long logicalClusterId) {
|
||||
this.logicalClusterId = logicalClusterId;
|
||||
}
|
||||
|
||||
public String getLogicalClusterName() {
|
||||
return logicalClusterName;
|
||||
}
|
||||
|
||||
public void setLogicalClusterName(String logicalClusterName) {
|
||||
this.logicalClusterName = logicalClusterName;
|
||||
}
|
||||
|
||||
public Long getPhysicalClusterId() {
|
||||
return physicalClusterId;
|
||||
}
|
||||
|
||||
public void setPhysicalClusterId(Long physicalClusterId) {
|
||||
this.physicalClusterId = physicalClusterId;
|
||||
}
|
||||
|
||||
public String getTopicName() {
|
||||
return topicName;
|
||||
}
|
||||
|
||||
public void setTopicName(String topicName) {
|
||||
this.topicName = topicName;
|
||||
}
|
||||
|
||||
public Integer getAccess() {
|
||||
return access;
|
||||
}
|
||||
|
||||
public void setAccess(Integer access) {
|
||||
this.access = access;
|
||||
}
|
||||
|
||||
public String getOperator() {
|
||||
return operator;
|
||||
}
|
||||
|
||||
public void setOperator(String operator) {
|
||||
this.operator = operator;
|
||||
}
|
||||
|
||||
public Long getGmtCreate() {
|
||||
return gmtCreate;
|
||||
}
|
||||
|
||||
public void setGmtCreate(Long gmtCreate) {
|
||||
this.gmtCreate = gmtCreate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AppTopicDTO{" +
|
||||
"logicalClusterId=" + logicalClusterId +
|
||||
", logicalClusterName='" + logicalClusterName + '\'' +
|
||||
", physicalClusterId=" + physicalClusterId +
|
||||
", topicName='" + topicName + '\'' +
|
||||
", access=" + access +
|
||||
", operator='" + operator + '\'' +
|
||||
", gmtCreate=" + gmtCreate +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.ao;
|
||||
|
||||
/**
|
||||
* Broker基本信息
|
||||
* @author zengqiao_cn@163.com
|
||||
* @date 19/4/8
|
||||
*/
|
||||
public class BrokerBasicDTO {
|
||||
private String host;
|
||||
|
||||
private Integer port;
|
||||
|
||||
private Integer jmxPort;
|
||||
|
||||
private Integer topicNum;
|
||||
|
||||
private Integer partitionCount;
|
||||
|
||||
private Long startTime;
|
||||
|
||||
private Integer leaderCount;
|
||||
|
||||
public String getHost() {
|
||||
return host;
|
||||
}
|
||||
|
||||
public void setHost(String host) {
|
||||
this.host = host;
|
||||
}
|
||||
|
||||
public Integer getPort() {
|
||||
return port;
|
||||
}
|
||||
|
||||
public void setPort(Integer port) {
|
||||
this.port = port;
|
||||
}
|
||||
|
||||
public Integer getJmxPort() {
|
||||
return jmxPort;
|
||||
}
|
||||
|
||||
public void setJmxPort(Integer jmxPort) {
|
||||
this.jmxPort = jmxPort;
|
||||
}
|
||||
|
||||
public Integer getTopicNum() {
|
||||
return topicNum;
|
||||
}
|
||||
|
||||
public void setTopicNum(Integer topicNum) {
|
||||
this.topicNum = topicNum;
|
||||
}
|
||||
|
||||
public Integer getPartitionCount() {
|
||||
return partitionCount;
|
||||
}
|
||||
|
||||
public void setPartitionCount(Integer partitionCount) {
|
||||
this.partitionCount = partitionCount;
|
||||
}
|
||||
|
||||
public Long getStartTime() {
|
||||
return startTime;
|
||||
}
|
||||
|
||||
public void setStartTime(Long startTime) {
|
||||
this.startTime = startTime;
|
||||
}
|
||||
|
||||
public Integer getLeaderCount() {
|
||||
return leaderCount;
|
||||
}
|
||||
|
||||
public void setLeaderCount(Integer leaderCount) {
|
||||
this.leaderCount = leaderCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "BrokerBasicInfoDTO{" +
|
||||
"host='" + host + '\'' +
|
||||
", port=" + port +
|
||||
", jmxPort=" + jmxPort +
|
||||
", topicNum=" + topicNum +
|
||||
", partitionCount=" + partitionCount +
|
||||
", startTime=" + startTime +
|
||||
", leaderCount=" + leaderCount +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,189 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.ao;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.BrokerMetadata;
|
||||
|
||||
/**
|
||||
* @author zengqiao_cn@163.com
|
||||
* @date 19/4/21
|
||||
*/
|
||||
public class BrokerOverviewDTO {
|
||||
private Integer brokerId;
|
||||
|
||||
private String host;
|
||||
|
||||
private Integer port;
|
||||
|
||||
private Integer jmxPort;
|
||||
|
||||
private Long startTime;
|
||||
|
||||
private Object byteIn;
|
||||
|
||||
private Object byteOut;
|
||||
|
||||
private Integer partitionCount;
|
||||
|
||||
private Integer underReplicatedPartitions;
|
||||
|
||||
private Boolean underReplicated;
|
||||
|
||||
private Integer status;
|
||||
|
||||
private Integer peakFlowStatus;
|
||||
|
||||
private String kafkaVersion;
|
||||
|
||||
private Integer leaderCount;
|
||||
|
||||
public Integer getBrokerId() {
|
||||
return brokerId;
|
||||
}
|
||||
|
||||
public void setBrokerId(Integer brokerId) {
|
||||
this.brokerId = brokerId;
|
||||
}
|
||||
|
||||
public String getHost() {
|
||||
return host;
|
||||
}
|
||||
|
||||
public void setHost(String host) {
|
||||
this.host = host;
|
||||
}
|
||||
|
||||
public Integer getPort() {
|
||||
return port;
|
||||
}
|
||||
|
||||
public void setPort(Integer port) {
|
||||
this.port = port;
|
||||
}
|
||||
|
||||
public Integer getJmxPort() {
|
||||
return jmxPort;
|
||||
}
|
||||
|
||||
public void setJmxPort(Integer jmxPort) {
|
||||
this.jmxPort = jmxPort;
|
||||
}
|
||||
|
||||
public Long getStartTime() {
|
||||
return startTime;
|
||||
}
|
||||
|
||||
public void setStartTime(Long startTime) {
|
||||
this.startTime = startTime;
|
||||
}
|
||||
|
||||
public Object getByteIn() {
|
||||
return byteIn;
|
||||
}
|
||||
|
||||
public void setByteIn(Object byteIn) {
|
||||
this.byteIn = byteIn;
|
||||
}
|
||||
|
||||
public Object getByteOut() {
|
||||
return byteOut;
|
||||
}
|
||||
|
||||
public void setByteOut(Object byteOut) {
|
||||
this.byteOut = byteOut;
|
||||
}
|
||||
|
||||
public Integer getPartitionCount() {
|
||||
return partitionCount;
|
||||
}
|
||||
|
||||
public void setPartitionCount(Integer partitionCount) {
|
||||
this.partitionCount = partitionCount;
|
||||
}
|
||||
|
||||
public Integer getUnderReplicatedPartitions() {
|
||||
return underReplicatedPartitions;
|
||||
}
|
||||
|
||||
public void setUnderReplicatedPartitions(Integer underReplicatedPartitions) {
|
||||
this.underReplicatedPartitions = underReplicatedPartitions;
|
||||
}
|
||||
|
||||
public Boolean getUnderReplicated() {
|
||||
return underReplicated;
|
||||
}
|
||||
|
||||
public void setUnderReplicated(Boolean underReplicated) {
|
||||
this.underReplicated = underReplicated;
|
||||
}
|
||||
|
||||
public Integer getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public void setStatus(Integer status) {
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public Integer getPeakFlowStatus() {
|
||||
return peakFlowStatus;
|
||||
}
|
||||
|
||||
public void setPeakFlowStatus(Integer peakFlowStatus) {
|
||||
this.peakFlowStatus = peakFlowStatus;
|
||||
}
|
||||
|
||||
public String getKafkaVersion() {
|
||||
return kafkaVersion;
|
||||
}
|
||||
|
||||
public void setKafkaVersion(String kafkaVersion) {
|
||||
this.kafkaVersion = kafkaVersion;
|
||||
}
|
||||
|
||||
public Integer getLeaderCount() {
|
||||
return leaderCount;
|
||||
}
|
||||
|
||||
public void setLeaderCount(Integer leaderCount) {
|
||||
this.leaderCount = leaderCount;
|
||||
}
|
||||
|
||||
public static BrokerOverviewDTO newInstance(BrokerMetadata brokerMetadata,
|
||||
BrokerMetrics brokerMetrics,
|
||||
String kafkaVersion) {
|
||||
BrokerOverviewDTO brokerOverviewDTO = new BrokerOverviewDTO();
|
||||
brokerOverviewDTO.setBrokerId(brokerMetadata.getBrokerId());
|
||||
brokerOverviewDTO.setHost(brokerMetadata.getHost());
|
||||
brokerOverviewDTO.setPort(brokerMetadata.getPort());
|
||||
brokerOverviewDTO.setJmxPort(brokerMetadata.getJmxPort());
|
||||
brokerOverviewDTO.setStartTime(brokerMetadata.getTimestamp());
|
||||
brokerOverviewDTO.setStatus(0);
|
||||
if (brokerMetrics == null) {
|
||||
return brokerOverviewDTO;
|
||||
}
|
||||
brokerOverviewDTO.setByteIn(
|
||||
brokerMetrics.getSpecifiedMetrics("BytesInPerSecOneMinuteRate")
|
||||
);
|
||||
brokerOverviewDTO.setByteOut(
|
||||
brokerMetrics.getSpecifiedMetrics("BytesOutPerSecOneMinuteRate")
|
||||
);
|
||||
brokerOverviewDTO.setPartitionCount(
|
||||
brokerMetrics.getSpecifiedMetrics("PartitionCountValue", Integer.class)
|
||||
);
|
||||
brokerOverviewDTO.setUnderReplicatedPartitions(
|
||||
brokerMetrics.getSpecifiedMetrics("UnderReplicatedPartitionsValue", Integer.class)
|
||||
);
|
||||
|
||||
if (!ValidateUtils.isNull(brokerOverviewDTO.getUnderReplicatedPartitions())) {
|
||||
brokerOverviewDTO.setUnderReplicated(brokerOverviewDTO.getUnderReplicatedPartitions() > 0);
|
||||
}
|
||||
brokerOverviewDTO.setLeaderCount(
|
||||
brokerMetrics.getSpecifiedMetrics("LeaderCountValue", Integer.class)
|
||||
);
|
||||
brokerOverviewDTO.setKafkaVersion(kafkaVersion);
|
||||
return brokerOverviewDTO;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
@@ -1,191 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.ao;
|
||||
|
||||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/4/23
|
||||
*/
|
||||
public class ClusterDetailDTO {
|
||||
private Long clusterId;
|
||||
|
||||
private String clusterName;
|
||||
|
||||
private String zookeeper;
|
||||
|
||||
private String bootstrapServers;
|
||||
|
||||
private String kafkaVersion;
|
||||
|
||||
private String idc;
|
||||
|
||||
private Integer mode;
|
||||
|
||||
private String securityProperties;
|
||||
|
||||
private Integer status;
|
||||
|
||||
private Date gmtCreate;
|
||||
|
||||
private Date gmtModify;
|
||||
|
||||
private Integer brokerNum;
|
||||
|
||||
private Integer topicNum;
|
||||
|
||||
private Integer consumerGroupNum;
|
||||
|
||||
private Integer controllerId;
|
||||
|
||||
private Integer regionNum;
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public String getClusterName() {
|
||||
return clusterName;
|
||||
}
|
||||
|
||||
public void setClusterName(String clusterName) {
|
||||
this.clusterName = clusterName;
|
||||
}
|
||||
|
||||
public String getZookeeper() {
|
||||
return zookeeper;
|
||||
}
|
||||
|
||||
public void setZookeeper(String zookeeper) {
|
||||
this.zookeeper = zookeeper;
|
||||
}
|
||||
|
||||
public String getBootstrapServers() {
|
||||
return bootstrapServers;
|
||||
}
|
||||
|
||||
public void setBootstrapServers(String bootstrapServers) {
|
||||
this.bootstrapServers = bootstrapServers;
|
||||
}
|
||||
|
||||
public String getKafkaVersion() {
|
||||
return kafkaVersion;
|
||||
}
|
||||
|
||||
public void setKafkaVersion(String kafkaVersion) {
|
||||
this.kafkaVersion = kafkaVersion;
|
||||
}
|
||||
|
||||
public String getIdc() {
|
||||
return idc;
|
||||
}
|
||||
|
||||
public void setIdc(String idc) {
|
||||
this.idc = idc;
|
||||
}
|
||||
|
||||
public Integer getMode() {
|
||||
return mode;
|
||||
}
|
||||
|
||||
public void setMode(Integer mode) {
|
||||
this.mode = mode;
|
||||
}
|
||||
|
||||
public String getSecurityProperties() {
|
||||
return securityProperties;
|
||||
}
|
||||
|
||||
public void setSecurityProperties(String securityProperties) {
|
||||
this.securityProperties = securityProperties;
|
||||
}
|
||||
|
||||
public Integer getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public void setStatus(Integer status) {
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public Date getGmtCreate() {
|
||||
return gmtCreate;
|
||||
}
|
||||
|
||||
public void setGmtCreate(Date gmtCreate) {
|
||||
this.gmtCreate = gmtCreate;
|
||||
}
|
||||
|
||||
public Date getGmtModify() {
|
||||
return gmtModify;
|
||||
}
|
||||
|
||||
public void setGmtModify(Date gmtModify) {
|
||||
this.gmtModify = gmtModify;
|
||||
}
|
||||
|
||||
public Integer getBrokerNum() {
|
||||
return brokerNum;
|
||||
}
|
||||
|
||||
public void setBrokerNum(Integer brokerNum) {
|
||||
this.brokerNum = brokerNum;
|
||||
}
|
||||
|
||||
public Integer getTopicNum() {
|
||||
return topicNum;
|
||||
}
|
||||
|
||||
public void setTopicNum(Integer topicNum) {
|
||||
this.topicNum = topicNum;
|
||||
}
|
||||
|
||||
public Integer getConsumerGroupNum() {
|
||||
return consumerGroupNum;
|
||||
}
|
||||
|
||||
public void setConsumerGroupNum(Integer consumerGroupNum) {
|
||||
this.consumerGroupNum = consumerGroupNum;
|
||||
}
|
||||
|
||||
public Integer getControllerId() {
|
||||
return controllerId;
|
||||
}
|
||||
|
||||
public void setControllerId(Integer controllerId) {
|
||||
this.controllerId = controllerId;
|
||||
}
|
||||
|
||||
public Integer getRegionNum() {
|
||||
return regionNum;
|
||||
}
|
||||
|
||||
public void setRegionNum(Integer regionNum) {
|
||||
this.regionNum = regionNum;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ClusterDetailDTO{" +
|
||||
"clusterId=" + clusterId +
|
||||
", clusterName='" + clusterName + '\'' +
|
||||
", zookeeper='" + zookeeper + '\'' +
|
||||
", bootstrapServers='" + bootstrapServers + '\'' +
|
||||
", kafkaVersion='" + kafkaVersion + '\'' +
|
||||
", idc='" + idc + '\'' +
|
||||
", mode='" + mode + '\'' +
|
||||
", securityProperties='" + securityProperties + '\'' +
|
||||
", status=" + status +
|
||||
", gmtCreate=" + gmtCreate +
|
||||
", gmtModify=" + gmtModify +
|
||||
", brokerNum=" + brokerNum +
|
||||
", topicNum=" + topicNum +
|
||||
", consumerGroupNum=" + consumerGroupNum +
|
||||
", controllerId=" + controllerId +
|
||||
", regionNum=" + regionNum +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.ao;
|
||||
|
||||
/**
|
||||
* @author zhongyuankai
|
||||
* @date 2020/5/26
|
||||
*/
|
||||
public class PartitionAttributeDTO {
|
||||
private Long logSize;
|
||||
|
||||
public Long getLogSize() {
|
||||
return logSize;
|
||||
}
|
||||
|
||||
public void setLogSize(Long logSize) {
|
||||
this.logSize = logSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "PartitionAttributeDTO{" +
|
||||
"logSize=" + logSize +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.ao;
|
||||
|
||||
/**
|
||||
* Topic Offset
|
||||
* @author zengqiao
|
||||
* @date 19/6/2
|
||||
*/
|
||||
public class PartitionOffsetDTO {
|
||||
private Integer partitionId;
|
||||
|
||||
private Long offset;
|
||||
|
||||
private Long timestamp;
|
||||
|
||||
public PartitionOffsetDTO() {
|
||||
}
|
||||
|
||||
public PartitionOffsetDTO(Integer partitionId, Long offset) {
|
||||
this.partitionId = partitionId;
|
||||
this.offset = offset;
|
||||
}
|
||||
|
||||
public PartitionOffsetDTO(Integer partitionId, Long offset, Long timestamp) {
|
||||
this.partitionId = partitionId;
|
||||
this.offset = offset;
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
|
||||
public Integer getPartitionId() {
|
||||
return partitionId;
|
||||
}
|
||||
|
||||
public void setPartitionId(Integer partitionId) {
|
||||
this.partitionId = partitionId;
|
||||
}
|
||||
|
||||
public Long getOffset() {
|
||||
return offset;
|
||||
}
|
||||
|
||||
public void setOffset(Long offset) {
|
||||
this.offset = offset;
|
||||
}
|
||||
|
||||
public Long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public void setTimestamp(Long timestamp) {
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TopicOffsetDTO{" +
|
||||
", partitionId=" + partitionId +
|
||||
", offset=" + offset +
|
||||
", timestamp=" + timestamp +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||