Compare commits
460 Commits
v3.0.0-bet
...
v3.0.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
508402d8ec | ||
|
|
eb3e573b22 | ||
|
|
a96853db90 | ||
|
|
c1502152c0 | ||
|
|
afda292796 | ||
|
|
163cab78ae | ||
|
|
8f4ff36c09 | ||
|
|
47b6b3577a | ||
|
|
f3eca3b214 | ||
|
|
62f7d3f72f | ||
|
|
26e60d8a64 | ||
|
|
5e7fbcf078 | ||
|
|
3fb35d1fcc | ||
|
|
538d54cae0 | ||
|
|
78b02f80ba | ||
|
|
f9ec890e1d | ||
|
|
af1bb2ccbd | ||
|
|
714e9a56a3 | ||
|
|
88d0a60182 | ||
|
|
05c52cd672 | ||
|
|
586b37caa0 | ||
|
|
d8aa3d64df | ||
|
|
13d8fd55c8 | ||
|
|
4133981048 | ||
|
|
2f0b18b005 | ||
|
|
44134ce0d6 | ||
|
|
5f21e5a728 | ||
|
|
d5079a1b75 | ||
|
|
656dfc2285 | ||
|
|
99be2d704f | ||
|
|
d071e31106 | ||
|
|
55b34d08dd | ||
|
|
7a29e58453 | ||
|
|
8892b5250e | ||
|
|
75e53a9617 | ||
|
|
7294aba59f | ||
|
|
a8c779675a | ||
|
|
facae65f61 | ||
|
|
0c6475b063 | ||
|
|
92d6214f4f | ||
|
|
6ad29b9565 | ||
|
|
f3b64ca463 | ||
|
|
9340e07662 | ||
|
|
50482c40d5 | ||
|
|
12ebc32cec | ||
|
|
215602bb84 | ||
|
|
5355c5c1f3 | ||
|
|
e13d77c81d | ||
|
|
103db39460 | ||
|
|
750da7c9d7 | ||
|
|
0fea002142 | ||
|
|
7163c74cba | ||
|
|
2fb3aa1c14 | ||
|
|
dc8604ad81 | ||
|
|
9c67afd170 | ||
|
|
bd48bc6a3d | ||
|
|
b75e630bac | ||
|
|
ebd4e4735d | ||
|
|
b3ad6a71ca | ||
|
|
91e2189864 | ||
|
|
ddd5d1b892 | ||
|
|
8aa877071c | ||
|
|
efa253fac8 | ||
|
|
3744c0e97d | ||
|
|
d510640e43 | ||
|
|
d7986ad8dd | ||
|
|
fbc4d4a540 | ||
|
|
bc32c71048 | ||
|
|
c4910964db | ||
|
|
1bc725bd62 | ||
|
|
34b7c6746b | ||
|
|
20d5b27bb6 | ||
|
|
a4abb4069d | ||
|
|
c73cfce780 | ||
|
|
dfb9b6136b | ||
|
|
341bd58d51 | ||
|
|
4386181304 | ||
|
|
fb21d8135c | ||
|
|
b4580277a9 | ||
|
|
df655a250c | ||
|
|
811fc9b400 | ||
|
|
83df02783c | ||
|
|
6a5efce874 | ||
|
|
fa0ae5e474 | ||
|
|
cafd665a2d | ||
|
|
e8f77a456b | ||
|
|
4510c62ebd | ||
|
|
79864955e1 | ||
|
|
ff26a8d46c | ||
|
|
cc226d552e | ||
|
|
962f89475b | ||
|
|
ec204a1605 | ||
|
|
58d7623938 | ||
|
|
8f4ecfcdc0 | ||
|
|
ef719cedbc | ||
|
|
b7856c892b | ||
|
|
7435a78883 | ||
|
|
f49206b316 | ||
|
|
7d500a0721 | ||
|
|
98a519f20b | ||
|
|
39b655bb43 | ||
|
|
78d56a49fe | ||
|
|
d2e9d1fa01 | ||
|
|
41ff914dc3 | ||
|
|
3ba447fac2 | ||
|
|
e9cc380a2e | ||
|
|
017cac9bbe | ||
|
|
9ad72694af | ||
|
|
e8f9821870 | ||
|
|
bb167b9f8d | ||
|
|
28fbb5e130 | ||
|
|
16101e81e8 | ||
|
|
aced504d2a | ||
|
|
abb064d9d1 | ||
|
|
dc1899a1cd | ||
|
|
442f34278c | ||
|
|
a6dcbcd35b | ||
|
|
2b600e96eb | ||
|
|
177bb80f31 | ||
|
|
63fbe728c4 | ||
|
|
b33020840b | ||
|
|
c5caf7c0d6 | ||
|
|
0f0473db4c | ||
|
|
beadde3e06 | ||
|
|
a423a20480 | ||
|
|
79f0a23813 | ||
|
|
780fdea2cc | ||
|
|
1c0fda1adf | ||
|
|
9cf13e9b30 | ||
|
|
87cd058fd8 | ||
|
|
81b1ec48c2 | ||
|
|
66dd82f4fd | ||
|
|
ce35b23911 | ||
|
|
e79342acf5 | ||
|
|
3fc9f39d24 | ||
|
|
0221fb3a4a | ||
|
|
f009f8b7ba | ||
|
|
b76959431a | ||
|
|
975370b593 | ||
|
|
7275030971 | ||
|
|
99b0be5a95 | ||
|
|
edd3f95fc4 | ||
|
|
479f983b09 | ||
|
|
7650332252 | ||
|
|
8f1a021851 | ||
|
|
ce4df4d5fd | ||
|
|
bd43ae1b5d | ||
|
|
8fa34116b9 | ||
|
|
7e92553017 | ||
|
|
b7e243a693 | ||
|
|
35d4888afb | ||
|
|
b3e8a4f0f6 | ||
|
|
321125caee | ||
|
|
e01427aa4f | ||
|
|
14652e7f7a | ||
|
|
7c05899dbd | ||
|
|
56726b703f | ||
|
|
6237b0182f | ||
|
|
be5b662f65 | ||
|
|
224698355c | ||
|
|
8f47138ecd | ||
|
|
d159746391 | ||
|
|
63df93ea5e | ||
|
|
38948c0daa | ||
|
|
6c610427b6 | ||
|
|
b4cc31c459 | ||
|
|
7d781712c9 | ||
|
|
dd61ce9b2a | ||
|
|
69a7212986 | ||
|
|
ff05a951fd | ||
|
|
89d5357b40 | ||
|
|
7ca3d65c42 | ||
|
|
7b5c2d800f | ||
|
|
045f65204b | ||
|
|
f414b47a78 | ||
|
|
44f4e2f0f9 | ||
|
|
2361008bdf | ||
|
|
7377ef3ec5 | ||
|
|
a28d064b7a | ||
|
|
e2e57e8575 | ||
|
|
9d90bd2835 | ||
|
|
7445e68df4 | ||
|
|
ab42625ad2 | ||
|
|
18789a0a53 | ||
|
|
68a37bb56a | ||
|
|
3b33652c47 | ||
|
|
1e0c4c3904 | ||
|
|
04e223de16 | ||
|
|
c4a691aa8a | ||
|
|
ff9dde163a | ||
|
|
eb7efbd1a5 | ||
|
|
8c8c362c54 | ||
|
|
66e119ad5d | ||
|
|
6dedc04a05 | ||
|
|
0cf8bad0df | ||
|
|
95c9582d8b | ||
|
|
7815126ff5 | ||
|
|
a5fa9de54b | ||
|
|
95f1a2c630 | ||
|
|
1e256ae1fd | ||
|
|
9fc9c54fa1 | ||
|
|
1b362b1e02 | ||
|
|
04e3172cca | ||
|
|
1caab7f3f7 | ||
|
|
9d33c725ad | ||
|
|
6ed1d38106 | ||
|
|
0f07ddedaf | ||
|
|
289945b471 | ||
|
|
f331a6d144 | ||
|
|
0c8c12a651 | ||
|
|
028c3bb2fa | ||
|
|
d7a5a0d405 | ||
|
|
5ef5f6e531 | ||
|
|
1d205734b3 | ||
|
|
5edd43884f | ||
|
|
c1992373bc | ||
|
|
ed562f9c8a | ||
|
|
b4d44ef8c7 | ||
|
|
ad0c16a1b4 | ||
|
|
7eabe66853 | ||
|
|
3983d73695 | ||
|
|
161d4c4562 | ||
|
|
9a1e89564e | ||
|
|
0c18c5b4f6 | ||
|
|
3e12ba34f7 | ||
|
|
e71e29391b | ||
|
|
9b7b9a7af0 | ||
|
|
a23819c308 | ||
|
|
6cb1825d96 | ||
|
|
77b8c758dc | ||
|
|
e5a582cfad | ||
|
|
ec83db267e | ||
|
|
bfd026cae7 | ||
|
|
35f1dd8082 | ||
|
|
7ed0e7dd23 | ||
|
|
1a3cbf7a9d | ||
|
|
d9e4abc3de | ||
|
|
a4186085d3 | ||
|
|
26b1846bb4 | ||
|
|
1aa89527a6 | ||
|
|
eac76d7ad0 | ||
|
|
cea0cd56f6 | ||
|
|
c4b897f282 | ||
|
|
47389dbabb | ||
|
|
a2f8b1a851 | ||
|
|
feac0a058f | ||
|
|
27eeac9fd4 | ||
|
|
a14db4b194 | ||
|
|
54ee271a47 | ||
|
|
a3a9be4f7f | ||
|
|
d4f0a832f3 | ||
|
|
7dc533372c | ||
|
|
1737d87713 | ||
|
|
dbb98dea11 | ||
|
|
802b382b36 | ||
|
|
fc82999d45 | ||
|
|
08aa000c07 | ||
|
|
39015b5100 | ||
|
|
0d635ad419 | ||
|
|
9133205915 | ||
|
|
725ac10c3d | ||
|
|
2b76358c8f | ||
|
|
833c360698 | ||
|
|
7da1e67b01 | ||
|
|
7eb86a47dd | ||
|
|
d67e383c28 | ||
|
|
8749d3e1f5 | ||
|
|
30fba21c48 | ||
|
|
d83d35aee9 | ||
|
|
1d3caeea7d | ||
|
|
c8806dbb4d | ||
|
|
e5802c7f50 | ||
|
|
590f684d66 | ||
|
|
8e5a67f565 | ||
|
|
8d2fbce11e | ||
|
|
26916f6632 | ||
|
|
fbfa0d2d2a | ||
|
|
e626b99090 | ||
|
|
203859b71b | ||
|
|
9a25c22f3a | ||
|
|
0a03f41a7c | ||
|
|
56191939c8 | ||
|
|
beb754aaaa | ||
|
|
f234f740ca | ||
|
|
e14679694c | ||
|
|
e06712397e | ||
|
|
b6c6df7ffc | ||
|
|
375c6f56c9 | ||
|
|
0bf85c97b5 | ||
|
|
630e582321 | ||
|
|
a89fe23bdd | ||
|
|
a7a5fa9a31 | ||
|
|
c73a7eee2f | ||
|
|
121f8468d5 | ||
|
|
7b0b6936e0 | ||
|
|
597ea04a96 | ||
|
|
f7f90aeaaa | ||
|
|
227479f695 | ||
|
|
6477fb3fe0 | ||
|
|
4223f4f3c4 | ||
|
|
7288874d72 | ||
|
|
68f76f2daf | ||
|
|
fe6ddebc49 | ||
|
|
12b5acd073 | ||
|
|
a6f1fe07b3 | ||
|
|
85e3f2a946 | ||
|
|
d4f416de14 | ||
|
|
0d9a6702c1 | ||
|
|
d11285cdbf | ||
|
|
5f1f33d2b9 | ||
|
|
474daf752d | ||
|
|
27d1b92690 | ||
|
|
993afa4c19 | ||
|
|
028d891c32 | ||
|
|
0df55ec22d | ||
|
|
579f64774d | ||
|
|
792f8d939d | ||
|
|
e4fb02fcda | ||
|
|
0c14c641d0 | ||
|
|
dba671fd1e | ||
|
|
80d1693722 | ||
|
|
26014a11b2 | ||
|
|
848fddd55a | ||
|
|
97f5f05f1a | ||
|
|
25b82810f2 | ||
|
|
9b1e506fa7 | ||
|
|
7a42996e97 | ||
|
|
dbfcebcf67 | ||
|
|
37c3f69a28 | ||
|
|
5d412890b4 | ||
|
|
1e318a4c40 | ||
|
|
d4549176ec | ||
|
|
61efdf492f | ||
|
|
67ea4d44c8 | ||
|
|
fdae05a4aa | ||
|
|
5efb837ee8 | ||
|
|
584b626d93 | ||
|
|
de25a4ed8e | ||
|
|
2e852e5ca6 | ||
|
|
b11000715a | ||
|
|
b3f8b46f0f | ||
|
|
8d22a0664a | ||
|
|
20756a3453 | ||
|
|
c9b4d45a64 | ||
|
|
83f7f5468b | ||
|
|
59c042ad67 | ||
|
|
d550fc5068 | ||
|
|
6effba69a0 | ||
|
|
9b46956259 | ||
|
|
b5a4a732da | ||
|
|
487862367e | ||
|
|
5b63b9ce67 | ||
|
|
afbcd3e1df | ||
|
|
12b82c1395 | ||
|
|
863b765e0d | ||
|
|
731429c51c | ||
|
|
66f3bc61fe | ||
|
|
4efe35dd51 | ||
|
|
c92461ef93 | ||
|
|
405e6e0c1d | ||
|
|
0d227aef49 | ||
|
|
0e49002f42 | ||
|
|
2e016800e0 | ||
|
|
09f317b991 | ||
|
|
5a48cb1547 | ||
|
|
f632febf33 | ||
|
|
3c53467943 | ||
|
|
d358c0f4f7 | ||
|
|
de977a5b32 | ||
|
|
703d685d59 | ||
|
|
31a5f17408 | ||
|
|
c40ae3c455 | ||
|
|
b71a34279e | ||
|
|
8f8c0c4eda | ||
|
|
3a384f0e34 | ||
|
|
cf7bc11cbd | ||
|
|
be60ae8399 | ||
|
|
8e50d145d5 | ||
|
|
7a3d15525c | ||
|
|
64f32d8b24 | ||
|
|
949d6ba605 | ||
|
|
ceb8db09f4 | ||
|
|
ed05a0ebb8 | ||
|
|
a7cbb76655 | ||
|
|
93cbfa0b1f | ||
|
|
6120613a98 | ||
|
|
dbd00db159 | ||
|
|
befde952f5 | ||
|
|
1aa759e5be | ||
|
|
2de27719c1 | ||
|
|
21db57b537 | ||
|
|
dfe8d09477 | ||
|
|
90dfa22c64 | ||
|
|
0f35427645 | ||
|
|
7909f60ff8 | ||
|
|
9a1a8a4c30 | ||
|
|
fa7ad64140 | ||
|
|
8a0c23339d | ||
|
|
e7ab3aff16 | ||
|
|
d0948797b9 | ||
|
|
04a5e17451 | ||
|
|
47065c8042 | ||
|
|
488c778736 | ||
|
|
d10a7bcc75 | ||
|
|
afe44a2537 | ||
|
|
9eadafe850 | ||
|
|
dab3eefcc0 | ||
|
|
2b9a6b28d8 | ||
|
|
465f98ca2b | ||
|
|
a0312be4fd | ||
|
|
4a5161372b | ||
|
|
4c9921f752 | ||
|
|
6dd72d40ee | ||
|
|
db49c234bb | ||
|
|
4a9df0c4d9 | ||
|
|
461573c2ba | ||
|
|
291992753f | ||
|
|
fcefe7ac38 | ||
|
|
7da712fcff | ||
|
|
2fd8687624 | ||
|
|
639b1f8336 | ||
|
|
ab3b83e42a | ||
|
|
4818629c40 | ||
|
|
61784c860a | ||
|
|
d5667254f2 | ||
|
|
af2b93983f | ||
|
|
8281301cbd | ||
|
|
0043ab8371 | ||
|
|
500eaace82 | ||
|
|
28e8540c78 | ||
|
|
69adf682e2 | ||
|
|
69cd1ff6e1 | ||
|
|
415d67cc32 | ||
|
|
46a2fec79b | ||
|
|
560b322fca | ||
|
|
effe17ac85 | ||
|
|
7699acfc1b | ||
|
|
6e058240b3 | ||
|
|
f005c6bc44 | ||
|
|
7be462599f | ||
|
|
271ab432d9 | ||
|
|
4114777a4e | ||
|
|
9189a54442 | ||
|
|
b95ee762e3 | ||
|
|
9e3c4dc06b | ||
|
|
1891a3ac86 | ||
|
|
9ecdcac06d | ||
|
|
790cb6a2e1 | ||
|
|
4a98e5f025 | ||
|
|
507abc1d84 | ||
|
|
9b732fbbad | ||
|
|
220f1c6fc3 | ||
|
|
7a950c67b6 | ||
|
|
78f625dc8c | ||
|
|
211d26a3ed | ||
|
|
dce2bc6326 | ||
|
|
90e5d7f6f0 | ||
|
|
fc835e09c6 | ||
|
|
c6e782a637 | ||
|
|
1ddfbfc833 |
51
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
---
|
||||||
|
name: 报告Bug
|
||||||
|
about: 报告KnowStreaming的相关Bug
|
||||||
|
title: ''
|
||||||
|
labels: bug
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
- [ ] 我已经在 [issues](https://github.com/didi/KnowStreaming/issues) 搜索过相关问题了,并没有重复的。
|
||||||
|
|
||||||
|
你是否希望来认领这个Bug。
|
||||||
|
|
||||||
|
「 Y / N 」
|
||||||
|
|
||||||
|
### 环境信息
|
||||||
|
|
||||||
|
* KnowStreaming version : <font size=4 color =red> xxx </font>
|
||||||
|
* Operating System version : <font size=4 color =red> xxx </font>
|
||||||
|
* Java version : <font size=4 color =red> xxx </font>
|
||||||
|
|
||||||
|
|
||||||
|
### 重现该问题的步骤
|
||||||
|
|
||||||
|
1. xxx
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
2. xxx
|
||||||
|
|
||||||
|
|
||||||
|
3. xxx
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### 预期结果
|
||||||
|
|
||||||
|
<!-- 写下应该出现的预期结果?-->
|
||||||
|
|
||||||
|
### 实际结果
|
||||||
|
|
||||||
|
<!-- 实际发生了什么? -->
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
如果有异常,请附上异常Trace:
|
||||||
|
|
||||||
|
```
|
||||||
|
Just put your stack trace here!
|
||||||
|
```
|
||||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
blank_issues_enabled: true
|
||||||
|
contact_links:
|
||||||
|
- name: 讨论问题
|
||||||
|
url: https://github.com/didi/KnowStreaming/discussions/new
|
||||||
|
about: 发起问题、讨论 等等
|
||||||
|
- name: KnowStreaming官网
|
||||||
|
url: https://knowstreaming.com/
|
||||||
|
about: KnowStreaming website
|
||||||
26
.github/ISSUE_TEMPLATE/detail_optimizing.md
vendored
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
name: 优化建议
|
||||||
|
about: 相关功能优化建议
|
||||||
|
title: ''
|
||||||
|
labels: Optimization Suggestions
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
- [ ] 我已经在 [issues](https://github.com/didi/KnowStreaming/issues) 搜索过相关问题了,并没有重复的。
|
||||||
|
|
||||||
|
你是否希望来认领这个优化建议。
|
||||||
|
|
||||||
|
「 Y / N 」
|
||||||
|
|
||||||
|
### 环境信息
|
||||||
|
|
||||||
|
* KnowStreaming version : <font size=4 color =red> xxx </font>
|
||||||
|
* Operating System version : <font size=4 color =red> xxx </font>
|
||||||
|
* Java version : <font size=4 color =red> xxx </font>
|
||||||
|
|
||||||
|
### 需要优化的功能点
|
||||||
|
|
||||||
|
|
||||||
|
### 建议如何优化
|
||||||
|
|
||||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
name: 提议新功能/需求
|
||||||
|
about: 给KnowStreaming提一个功能需求
|
||||||
|
title: ''
|
||||||
|
labels: feature
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
- [ ] 我在 [issues](https://github.com/didi/KnowStreaming/issues) 中并未搜索到与此相关的功能需求。
|
||||||
|
- [ ] 我在 [release note](https://github.com/didi/KnowStreaming/releases) 已经发布的版本中并没有搜到相关功能.
|
||||||
|
|
||||||
|
你是否希望来认领这个Feature。
|
||||||
|
|
||||||
|
「 Y / N 」
|
||||||
|
|
||||||
|
|
||||||
|
## 这里描述需求
|
||||||
|
<!--请尽可能的描述清楚您的需求 -->
|
||||||
|
|
||||||
12
.github/ISSUE_TEMPLATE/question.md
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
name: 提个问题
|
||||||
|
about: 问KnowStreaming相关问题
|
||||||
|
title: ''
|
||||||
|
labels: question
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
- [ ] 我已经在 [issues](https://github.com/didi/KnowStreaming/issues) 搜索过相关问题了,并没有重复的。
|
||||||
|
|
||||||
|
## 在这里提出你的问题
|
||||||
22
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
请不要在没有先创建Issue的情况下创建Pull Request。
|
||||||
|
|
||||||
|
## 变更的目的是什么
|
||||||
|
|
||||||
|
XXXXX
|
||||||
|
|
||||||
|
## 简短的更新日志
|
||||||
|
|
||||||
|
XX
|
||||||
|
|
||||||
|
## 验证这一变化
|
||||||
|
|
||||||
|
XXXX
|
||||||
|
|
||||||
|
请遵循此清单,以帮助我们快速轻松地整合您的贡献:
|
||||||
|
|
||||||
|
* [ ] 确保有针对更改提交的 Github issue(通常在您开始处理之前)。诸如拼写错误之类的琐碎更改不需要 Github issue。您的Pull Request应该只解决这个问题,而不需要进行其他更改—— 一个 PR 解决一个问题。
|
||||||
|
* [ ] 格式化 Pull Request 标题,如[ISSUE #123] support Confluent Schema Registry。 Pull Request 中的每个提交都应该有一个有意义的主题行和正文。
|
||||||
|
* [ ] 编写足够详细的Pull Request描述,以了解Pull Request的作用、方式和原因。
|
||||||
|
* [ ] 编写必要的单元测试来验证您的逻辑更正。如果提交了新功能或重大更改,请记住在test 模块中添加 integration-test
|
||||||
|
* [ ] 确保编译通过,集成测试通过
|
||||||
|
|
||||||
74
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
|
||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
In the interest of fostering an open and welcoming environment, we as
|
||||||
|
contributors and maintainers pledge to making participation in our project and
|
||||||
|
our community a harassment-free experience for everyone, regardless of age, body
|
||||||
|
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||||
|
education, socio-economic status, nationality, personal appearance, race,
|
||||||
|
religion, or sexual identity and orientation.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to creating a positive environment
|
||||||
|
include:
|
||||||
|
|
||||||
|
* Using welcoming and inclusive language
|
||||||
|
* Being respectful of differing viewpoints and experiences
|
||||||
|
* Gracefully accepting constructive criticism
|
||||||
|
* Focusing on what is best for the community
|
||||||
|
* Showing empathy towards other community members
|
||||||
|
|
||||||
|
Examples of unacceptable behavior by participants include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||||
|
advances
|
||||||
|
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or electronic
|
||||||
|
address, without explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a
|
||||||
|
professional setting
|
||||||
|
|
||||||
|
## Our Responsibilities
|
||||||
|
|
||||||
|
Project maintainers are responsible for clarifying the standards of acceptable
|
||||||
|
behavior and are expected to take appropriate and fair corrective action in
|
||||||
|
response to any instances of unacceptable behavior.
|
||||||
|
|
||||||
|
Project maintainers have the right and responsibility to remove, edit, or
|
||||||
|
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||||
|
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||||
|
permanently any contributor for other behaviors that they deem inappropriate,
|
||||||
|
threatening, offensive, or harmful.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies both within project spaces and in public spaces
|
||||||
|
when an individual is representing the project or its community. Examples of
|
||||||
|
representing a project or community include using an official project e-mail
|
||||||
|
address, posting via an official social media account, or acting as an appointed
|
||||||
|
representative at an online or offline event. Representation of a project may be
|
||||||
|
further defined and clarified by project maintainers.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
|
reported by contacting the project team at shirenchuang@didiglobal.com . All
|
||||||
|
complaints will be reviewed and investigated and will result in a response that
|
||||||
|
is deemed necessary and appropriate to the circumstances. The project team is
|
||||||
|
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||||
|
Further details of specific enforcement policies may be posted separately.
|
||||||
|
|
||||||
|
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||||
|
faith may face temporary or permanent repercussions as determined by other
|
||||||
|
members of the project's leadership.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||||
|
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||||
|
|
||||||
|
[homepage]: https://www.contributor-covenant.org
|
||||||
158
CONTRIBUTING.md
@@ -1,28 +1,150 @@
|
|||||||
# Contribution Guideline
|
|
||||||
|
|
||||||
Thanks for considering to contribute this project. All issues and pull requests are highly appreciated.
|
|
||||||
|
|
||||||
## Pull Requests
|
|
||||||
|
|
||||||
Before sending pull request to this project, please read and follow guidelines below.
|
# 为KnowStreaming做贡献
|
||||||
|
|
||||||
1. Branch: We only accept pull request on `dev` branch.
|
|
||||||
2. Coding style: Follow the coding style used in LogiKM.
|
|
||||||
3. Commit message: Use English and be aware of your spell.
|
|
||||||
4. Test: Make sure to test your code.
|
|
||||||
|
|
||||||
Add device mode, API version, related log, screenshots and other related information in your pull request if possible.
|
欢迎👏🏻来到KnowStreaming!本文档是关于如何为KnowStreaming做出贡献的指南。
|
||||||
|
|
||||||
NOTE: We assume all your contribution can be licensed under the [Apache License 2.0](LICENSE).
|
如果您发现不正确或遗漏的内容, 请留下意见/建议。
|
||||||
|
|
||||||
## Issues
|
## 行为守则
|
||||||
|
请务必阅读并遵守我们的 [行为准则](./CODE_OF_CONDUCT.md).
|
||||||
|
|
||||||
We love clearly described issues. :)
|
|
||||||
|
|
||||||
Following information can help us to resolve the issue faster.
|
|
||||||
|
|
||||||
* Device mode and hardware information.
|
## 贡献
|
||||||
* API version.
|
|
||||||
* Logs.
|
**KnowStreaming** 欢迎任何角色的新参与者,包括 **User** 、**Contributor**、**Committer**、**PMC** 。
|
||||||
* Screenshots.
|
|
||||||
* Steps to reproduce the issue.
|
我们鼓励新人积极加入 **KnowStreaming** 项目,从User到Contributor、Committer ,甚至是 PMC 角色。
|
||||||
|
|
||||||
|
为了做到这一点,新人需要积极地为 **KnowStreaming** 项目做出贡献。以下介绍如何对 **KnowStreaming** 进行贡献。
|
||||||
|
|
||||||
|
|
||||||
|
### 创建/打开 Issue
|
||||||
|
|
||||||
|
如果您在文档中发现拼写错误、在代码中**发现错误**或想要**新功能**或想要**提供建议**,您可以在 GitHub 上[创建一个Issue](https://github.com/didi/KnowStreaming/issues/new/choose) 进行报告。
|
||||||
|
|
||||||
|
|
||||||
|
如果您想直接贡献, 您可以选择下面标签的问题。
|
||||||
|
|
||||||
|
- [contribution welcome](https://github.com/didi/KnowStreaming/labels/contribution%20welcome) : 非常需要解决/新增 的Issues
|
||||||
|
- [good first issue](https://github.com/didi/KnowStreaming/labels/good%20first%20issue): 对新人比较友好, 新人可以拿这个Issue来练练手热热身。
|
||||||
|
|
||||||
|
<font color=red ><b> 请注意,任何 PR 都必须与有效issue相关联。否则,PR 将被拒绝。</b></font>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### 开始你的贡献
|
||||||
|
|
||||||
|
**分支介绍**
|
||||||
|
|
||||||
|
我们将 `dev`分支作为开发分支, 说明这是一个不稳定的分支。
|
||||||
|
|
||||||
|
此外,我们的分支模型符合 [https://nvie.com/posts/a-successful-git-branching-model/](https://nvie.com/posts/a-successful-git-branching-model/). 我们强烈建议新人在创建PR之前先阅读上述文章。
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
**贡献流程**
|
||||||
|
|
||||||
|
为方便描述,我们这里定义一下2个名词:
|
||||||
|
|
||||||
|
自己Fork出来的仓库是私人仓库, 我们这里称之为 :**分叉仓库**
|
||||||
|
Fork的源项目,我们称之为:**源仓库**
|
||||||
|
|
||||||
|
|
||||||
|
现在,如果您准备好创建PR, 以下是贡献者的工作流程:
|
||||||
|
|
||||||
|
1. Fork [KnowStreaming](https://github.com/didi/KnowStreaming) 项目到自己的仓库
|
||||||
|
|
||||||
|
2. 从源仓库的`dev`拉取并创建自己的本地分支,例如: `dev`
|
||||||
|
3. 在本地分支上对代码进行修改
|
||||||
|
4. Rebase 开发分支, 并解决冲突
|
||||||
|
5. commit 并 push 您的更改到您自己的**分叉仓库**
|
||||||
|
6. 创建一个 Pull Request 到**源仓库**的`dev`分支中。
|
||||||
|
7. 等待回复。如果回复的慢,请无情的催促。
|
||||||
|
|
||||||
|
|
||||||
|
更为详细的贡献流程请看:[贡献流程](./docs/contributer_guide/贡献流程.md)
|
||||||
|
|
||||||
|
创建Pull Request时:
|
||||||
|
|
||||||
|
1. 请遵循 PR的 [模板](./.github/PULL_REQUEST_TEMPLATE.md)
|
||||||
|
2. 请确保 PR 有相应的issue。
|
||||||
|
3. 如果您的 PR 包含较大的更改,例如组件重构或新组件,请编写有关其设计和使用的详细文档(在对应的issue中)。
|
||||||
|
4. 注意单个 PR 不能太大。如果需要进行大量更改,最好将更改分成几个单独的 PR。
|
||||||
|
5. 在合并PR之前,尽量的将最终的提交信息清晰简洁, 将多次修改的提交尽可能的合并为一次提交。
|
||||||
|
6. 创建 PR 后,将为PR分配一个或多个reviewers。
|
||||||
|
|
||||||
|
|
||||||
|
<font color=red><b>如果您的 PR 包含较大的更改,例如组件重构或新组件,请编写有关其设计和使用的详细文档。</b></font>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码审查指南
|
||||||
|
|
||||||
|
Commiter将轮流review代码,以确保在合并前至少有一名Commiter
|
||||||
|
|
||||||
|
一些原则:
|
||||||
|
|
||||||
|
- 可读性——重要的代码应该有详细的文档。API 应该有 Javadoc。代码风格应与现有风格保持一致。
|
||||||
|
- 优雅:新的函数、类或组件应该设计得很好。
|
||||||
|
- 可测试性——单元测试用例应该覆盖 80% 的新代码。
|
||||||
|
- 可维护性 - 遵守我们的编码规范。
|
||||||
|
|
||||||
|
|
||||||
|
# 开发者
|
||||||
|
|
||||||
|
## 成为Contributor
|
||||||
|
|
||||||
|
只要成功提交并合并PR , 则为Contributor
|
||||||
|
|
||||||
|
贡献者名单请看:[贡献者名单](./docs/contributer_guide/开发者名单.md)
|
||||||
|
|
||||||
|
## 尝试成为Commiter
|
||||||
|
|
||||||
|
一般来说, 贡献8个重要的补丁并至少让三个不同的人来Review他们(您需要3个Commiter的支持)。
|
||||||
|
然后请人给你提名, 您需要展示您的
|
||||||
|
|
||||||
|
1. 至少8个重要的PR和项目的相关问题
|
||||||
|
2. 与团队合作的能力
|
||||||
|
3. 了解项目的代码库和编码风格
|
||||||
|
4. 编写好代码的能力
|
||||||
|
|
||||||
|
当前的Commiter可以通过在KnowStreaming中的Issue标签 `nomination`(提名)来提名您
|
||||||
|
|
||||||
|
1. 你的名字和姓氏
|
||||||
|
2. 指向您的Git个人资料的链接
|
||||||
|
3. 解释为什么你应该成为Commiter
|
||||||
|
4. 详细说明提名人与您合作的3个PR以及相关问题,这些问题可以证明您的能力。
|
||||||
|
|
||||||
|
另外2个Commiter需要支持您的**提名**,如果5个工作日内没有人反对,您就是提交者,如果有人反对或者想要更多的信息,Commiter会讨论并通常达成共识(5个工作日内) 。
|
||||||
|
|
||||||
|
|
||||||
|
# 开源奖励计划
|
||||||
|
|
||||||
|
|
||||||
|
我们非常欢迎开发者们为KnowStreaming开源项目贡献一份力量,相应也将给予贡献者激励以表认可与感谢。
|
||||||
|
|
||||||
|
|
||||||
|
## 参与贡献
|
||||||
|
|
||||||
|
1. 积极参与 Issue 的讨论,如答疑解惑、提供想法或报告无法解决的错误(Issue)
|
||||||
|
2. 撰写和改进项目的文档(Wiki)
|
||||||
|
3. 提交补丁优化代码(Coding)
|
||||||
|
|
||||||
|
|
||||||
|
## 你将获得
|
||||||
|
|
||||||
|
1. 加入KnowStreaming开源项目贡献者名单并展示
|
||||||
|
2. KnowStreaming开源贡献者证书(纸质&电子版)
|
||||||
|
3. KnowStreaming贡献者精美大礼包(KnowStreamin/滴滴 周边)
|
||||||
|
|
||||||
|
|
||||||
|
## 相关规则
|
||||||
|
|
||||||
|
- Contributer和Commiter都会有对应的证书和对应的礼包
|
||||||
|
- 每季度有KnowStreaming项目团队评选出杰出贡献者,颁发相应证书。
|
||||||
|
- 年末进行年度评选
|
||||||
|
|
||||||
|
贡献者名单请看:[贡献者名单](./docs/contributer_guide/开发者名单.md)
|
||||||
32
README.md
@@ -45,22 +45,29 @@
|
|||||||
|
|
||||||
## `Know Streaming` 简介
|
## `Know Streaming` 简介
|
||||||
|
|
||||||
`Know Streaming`是一套云原生的Kafka管控平台,脱胎于众多互联网内部多年的Kafka运营实践经验,专注于Kafka运维管控、监控告警、资源治理、多活容灾等核心场景。在用户体验、监控、运维管控上进行了平台化、可视化、智能化的建设,提供一系列特色的功能,极大地方便了用户和运维人员的日常使用,让普通运维人员都能成为Kafka专家。整体具有以下特点:
|
`Know Streaming`是一套云原生的Kafka管控平台,脱胎于众多互联网内部多年的Kafka运营实践经验,专注于Kafka运维管控、监控告警、资源治理、多活容灾等核心场景。在用户体验、监控、运维管控上进行了平台化、可视化、智能化的建设,提供一系列特色的功能,极大地方便了用户和运维人员的日常使用,让普通运维人员都能成为Kafka专家。
|
||||||
|
|
||||||
|
我们现在正在收集 Know Streaming 用户信息,以帮助我们进一步改进 Know Streaming。
|
||||||
|
请在 [issue#663](https://github.com/didi/KnowStreaming/issues/663) 上提供您的使用信息来支持我们:[谁在使用 Know Streaming](https://github.com/didi/KnowStreaming/issues/663)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
整体具有以下特点:
|
||||||
|
|
||||||
- 👀 **零侵入、全覆盖**
|
- 👀 **零侵入、全覆盖**
|
||||||
- 无需侵入改造 `Apache Kafka` ,一键便能纳管 `0.10.x` ~ `3.x.x` 众多版本的Kafka,包括 `ZK` 或 `Raft` 运行模式的版本,同时在兼容架构上具备良好的扩展性,帮助您提升集群管理水平;
|
- 无需侵入改造 `Apache Kafka` ,一键便能纳管 `0.10.x` ~ `3.x.x` 众多版本的Kafka,包括 `ZK` 或 `Raft` 运行模式的版本,同时在兼容架构上具备良好的扩展性,帮助您提升集群管理水平;
|
||||||
|
|
||||||
- 🌪️ **零成本、界面化**
|
- 🌪️ **零成本、界面化**
|
||||||
- 提炼高频 CLI 能力,设计合理的产品路径,提供清新美观的 GUI 界面,支持 Cluster、Broker、Topic、Group、Message、ACL 等组件 GUI 管理,普通用户5分钟即可上手;
|
- 提炼高频 CLI 能力,设计合理的产品路径,提供清新美观的 GUI 界面,支持 Cluster、Broker、Zookeeper、Topic、ConsumerGroup、Message、ACL、Connect 等组件 GUI 管理,普通用户5分钟即可上手;
|
||||||
|
|
||||||
- 👏 **云原生、插件化**
|
- 👏 **云原生、插件化**
|
||||||
- 基于云原生构建,具备水平扩展能力,只需要增加节点即可获取更强的采集及对外服务能力,提供众多可热插拔的企业级特性,覆盖可观测性生态整合、资源治理、多活容灾等核心场景;
|
- 基于云原生构建,具备水平扩展能力,只需要增加节点即可获取更强的采集及对外服务能力,提供众多可热插拔的企业级特性,覆盖可观测性生态整合、资源治理、多活容灾等核心场景;
|
||||||
|
|
||||||
- 🚀 **专业能力**
|
- 🚀 **专业能力**
|
||||||
- 集群管理:支持集群一键纳管,健康分析、核心组件观测 等功能;
|
- 集群管理:支持一键纳管,健康分析、核心组件观测 等功能;
|
||||||
- 观测提升:多维度指标观测大盘、观测指标最佳实践 等功能;
|
- 观测提升:多维度指标观测大盘、观测指标最佳实践 等功能;
|
||||||
- 异常巡检:集群多维度健康巡检、集群多维度健康分 等功能;
|
- 异常巡检:集群多维度健康巡检、集群多维度健康分 等功能;
|
||||||
- 能力增强:Topic扩缩副本、Topic副本迁移 等功能;
|
- 能力增强:集群负载均衡、Topic扩缩副本、Topic副本迁移 等功能;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -99,9 +106,13 @@
|
|||||||
|
|
||||||
## 成为社区贡献者
|
## 成为社区贡献者
|
||||||
|
|
||||||
点击 [这里](CONTRIBUTING.md),了解如何成为 Know Streaming 的贡献者
|
1. [贡献源码](https://doc.knowstreaming.com/product/10-contribution) 了解如何成为 Know Streaming 的贡献者
|
||||||
|
2. [具体贡献流程](https://doc.knowstreaming.com/product/10-contribution#102-贡献流程)
|
||||||
|
3. [开源激励计划](https://doc.knowstreaming.com/product/10-contribution#105-开源激励计划)
|
||||||
|
4. [贡献者名单](https://doc.knowstreaming.com/product/10-contribution#106-贡献者名单)
|
||||||
|
|
||||||
|
|
||||||
|
获取KnowStreaming开源社区证书。
|
||||||
|
|
||||||
## 加入技术交流群
|
## 加入技术交流群
|
||||||
|
|
||||||
@@ -133,3 +144,14 @@ PS: 提问请尽量把问题一次性描述清楚,并告知环境信息情况
|
|||||||
**`2、微信群`**
|
**`2、微信群`**
|
||||||
|
|
||||||
微信加群:添加`mike_zhangliang`、`PenceXie`的微信号备注KnowStreaming加群。
|
微信加群:添加`mike_zhangliang`、`PenceXie`的微信号备注KnowStreaming加群。
|
||||||
|
<br/>
|
||||||
|
|
||||||
|
加群之前有劳点一下 star,一个小小的 star 是对KnowStreaming作者们努力建设社区的动力。
|
||||||
|
|
||||||
|
感谢感谢!!!
|
||||||
|
|
||||||
|
<img width="116" alt="wx" src="https://user-images.githubusercontent.com/71620349/192257217-c4ebc16c-3ad9-485d-a914-5911d3a4f46b.png">
|
||||||
|
|
||||||
|
## Star History
|
||||||
|
|
||||||
|
[](https://star-history.com/#didi/KnowStreaming&Date)
|
||||||
|
|||||||
@@ -1,6 +1,191 @@
|
|||||||
|
|
||||||
|
## v3.0.1
|
||||||
|
|
||||||
|
**Bug修复**
|
||||||
|
- 修复重置 Group Offset 时,提示信息中缺少 Dead 状态也可进行重置的信息;
|
||||||
|
- 修复 Ldap 某个属性不存在时,会直接抛出空指针导致登陆失败的问题;
|
||||||
|
- 修复集群 Topic 列表页,健康分详情信息中,检查时间展示错误的问题;
|
||||||
|
- 修复更新健康检查结果时,出现死锁的问题;
|
||||||
|
- 修复 Replica 索引模版错误的问题;
|
||||||
|
- 修复 FAQ 文档中的错误链接;
|
||||||
|
- 修复 Broker 的 TopN 指标不存在时,页面数据不展示的问题;
|
||||||
|
- 修复 Group 详情页,图表时间范围选择不生效的问题;
|
||||||
|
|
||||||
|
|
||||||
## v3.0.0-beta
|
**体验优化**
|
||||||
|
- 集群 Group 列表按照 Group 维度进行展示;
|
||||||
|
- 优化避免因 ES 中该指标不存在,导致日志中出现大量空指针的问题;
|
||||||
|
- 优化全局 Message & Notification 展示效果;
|
||||||
|
- 优化 Topic 扩分区名称 & 描述展示;
|
||||||
|
|
||||||
|
|
||||||
|
**新增**
|
||||||
|
- Broker 列表页面,新增 JMX 是否成功连接的信息;
|
||||||
|
|
||||||
|
|
||||||
|
**ZK 部分(未完全发布)**
|
||||||
|
- 后端补充 Kafka ZK 指标采集,Kafka ZK 信息获取相关功能;
|
||||||
|
- 增加本地缓存,避免同一采集周期内 ZK 指标重复采集;
|
||||||
|
- 增加 ZK 节点采集失败跳过策略,避免不断对存在问题的节点不断尝试;
|
||||||
|
- 修复 zkAvgLatency 指标转 Long 时抛出异常问题;
|
||||||
|
- 修复 ks_km_zookeeper 表中,role 字段类型错误问题;
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## v3.0.0
|
||||||
|
|
||||||
|
**Bug修复**
|
||||||
|
- 修复 Group 指标防重复采集不生效问题
|
||||||
|
- 修复自动创建 ES 索引模版失败问题
|
||||||
|
- 修复 Group+Topic 列表中存在已删除Topic的问题
|
||||||
|
- 修复使用 MySQL-8 ,因兼容问题, start_time 信息为 NULL 时,会导致创建任务失败的问题
|
||||||
|
- 修复 Group 信息表更新时,出现死锁的问题
|
||||||
|
- 修复图表补点逻辑与图表时间范围不适配的问题
|
||||||
|
|
||||||
|
|
||||||
|
**体验优化**
|
||||||
|
- 按照资源类别,拆分健康巡检任务
|
||||||
|
- 优化 Group 详情页的指标为实时获取
|
||||||
|
- 图表拖拽排序支持用户级存储
|
||||||
|
- 多集群列表 ZK 信息展示兼容无 ZK 情况
|
||||||
|
- Topic 详情消息预览支持复制功能
|
||||||
|
- 部分内容大数字支持千位分割符展示
|
||||||
|
|
||||||
|
|
||||||
|
**新增**
|
||||||
|
- 集群信息中,新增 Zookeeper 客户端配置字段
|
||||||
|
- 集群信息中,新增 Kafka 集群运行模式字段
|
||||||
|
- 新增 docker-compose 的部署方式
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## v3.0.0-beta.3
|
||||||
|
|
||||||
|
**文档**
|
||||||
|
- FAQ 补充权限识别失败问题的说明
|
||||||
|
- 同步更新文档,保持与官网一致
|
||||||
|
|
||||||
|
|
||||||
|
**Bug修复**
|
||||||
|
- Offset 信息获取时,过滤掉无 Leader 的分区
|
||||||
|
- 升级 oshi-core 版本至 5.6.1 版本,修复 Windows 系统获取系统指标失败问题
|
||||||
|
- 修复 JMX 连接被关闭后,未进行重建的问题
|
||||||
|
- 修复因 DB 中 Broker 信息不存在导致 TotalLogSize 指标获取时抛空指针问题
|
||||||
|
- 修复 dml-logi.sql 中,SQL 注释错误的问题
|
||||||
|
- 修复 startup.sh 中,识别操作系统类型错误的问题
|
||||||
|
- 修复配置管理页面删除配置失败的问题
|
||||||
|
- 修复系统管理应用文件引用路径
|
||||||
|
- 修复 Topic Messages 详情提示信息点击跳转 404 的问题
|
||||||
|
- 修复扩副本时,当前副本数不显示问题
|
||||||
|
|
||||||
|
|
||||||
|
**体验优化**
|
||||||
|
- Topic-Messages 页面,增加返回数据的排序以及按照Earliest/Latest的获取方式
|
||||||
|
- 优化 GroupOffsetResetEnum 类名为 OffsetTypeEnum,使得类名含义更准确
|
||||||
|
- 移动 KafkaZKDAO 类,及 Kafka Znode 实体类的位置,使得 Kafka Zookeeper DAO 更加内聚及便于识别
|
||||||
|
- 后端补充 Overview 页面指标排序的功能
|
||||||
|
- 前端 Webpack 配置优化
|
||||||
|
- Cluster Overview 图表取消放大展示功能
|
||||||
|
- 列表页增加手动刷新功能
|
||||||
|
- 接入/编辑集群,优化 JMX-PORT,Version 信息的回显,优化JMX信息的展示
|
||||||
|
- 提高登录页面图片展示清晰度
|
||||||
|
- 部分样式和文案优化
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## v3.0.0-beta.2
|
||||||
|
|
||||||
|
**文档**
|
||||||
|
- 新增登录系统对接文档
|
||||||
|
- 优化前端工程打包构建部分文档说明
|
||||||
|
- FAQ补充KnowStreaming连接特定JMX IP的说明
|
||||||
|
|
||||||
|
|
||||||
|
**Bug修复**
|
||||||
|
- 修复logi_security_oplog表字段过短,导致删除Topic等操作无法记录的问题
|
||||||
|
- 修复ES查询时,抛java.lang.NumberFormatException: For input string: "{"value":0,"relation":"eq"}" 问题
|
||||||
|
- 修复LogStartOffset和LogEndOffset指标单位错误问题
|
||||||
|
- 修复进行副本变更时,旧副本数为NULL的问题
|
||||||
|
- 修复集群Group列表,在第二页搜索时,搜索时返回的分页信息错误问题
|
||||||
|
- 修复重置Offset时,返回的错误信息提示不一致的问题
|
||||||
|
- 修复集群查看,系统查看,LoadRebalance等页面权限点缺失问题
|
||||||
|
- 修复查询不存在的Topic时,错误信息提示不明显的问题
|
||||||
|
- 修复Windows用户打包前端工程报错的问题
|
||||||
|
- package-lock.json锁定前端依赖版本号,修复因依赖自动升级导致打包失败等问题
|
||||||
|
- 系统管理子应用,补充后端返回的Code码拦截,解决后端接口返回报错不展示的问题
|
||||||
|
- 修复用户登出后,依旧可以访问系统的问题
|
||||||
|
- 修复巡检任务配置时,数值显示错误的问题
|
||||||
|
- 修复Broker/Topic Overview 图表和图表详情问题
|
||||||
|
- 修复Job扩缩副本任务明细数据错误的问题
|
||||||
|
- 修复重置Offset时,分区ID,Offset数值无限制问题
|
||||||
|
- 修复扩缩/迁移副本时,无法选中Kafka系统Topic的问题
|
||||||
|
- 修复Topic的Config页面,编辑表单时不能正确回显当前值的问题
|
||||||
|
- 修复Broker Card返回数据后依旧展示加载态的问题
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
**体验优化**
|
||||||
|
- 优化默认用户密码为 admin/admin
|
||||||
|
- 缩短新增集群后,集群信息加载的耗时
|
||||||
|
- 集群Broker列表,增加Controller角色信息
|
||||||
|
- 副本变更任务结束后,增加进行优先副本选举的操作
|
||||||
|
- Task模块任务分为Metrics、Common、Metadata三类任务,每类任务配备独立线程池,减少对Job模块的线程池,以及不同类任务之间的相互影响
|
||||||
|
- 删除代码中存在的多余无用文件
|
||||||
|
- 自动新增ES索引模版及近7天索引,减少用户搭建时需要做的事项
|
||||||
|
- 优化前端工程打包流程
|
||||||
|
- 优化登录页文案,页面左侧栏内容,单集群详情样式,Topic列表趋势图等
|
||||||
|
- 首次进入Broker/Topic图表详情时,进行预缓存数据从而优化体验
|
||||||
|
- 优化Topic详情Partition Tab的展示
|
||||||
|
- 多集群列表页增加编辑功能
|
||||||
|
- 优化副本变更时,迁移时间支持分钟级别粒度
|
||||||
|
- logi-security版本升级至2.10.13
|
||||||
|
- logi-elasticsearch-client版本升级至1.0.24
|
||||||
|
|
||||||
|
|
||||||
|
**能力提升**
|
||||||
|
- 支持Ldap登录认证
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## v3.0.0-beta.1
|
||||||
|
|
||||||
|
**文档**
|
||||||
|
- 新增Task模块说明文档
|
||||||
|
- FAQ补充 `Specified key was too long; max key length is 767 bytes ` 错误说明
|
||||||
|
- FAQ补充 `出现ESIndexNotFoundException报错` 错误说明
|
||||||
|
|
||||||
|
|
||||||
|
**Bug修复**
|
||||||
|
- 修复 Consumer 点击 Stop 未停止检索的问题
|
||||||
|
- 修复创建/编辑角色权限报错问题
|
||||||
|
- 修复多集群管理/单集群详情均衡卡片状态错误问题
|
||||||
|
- 修复版本列表未排序问题
|
||||||
|
- 修复Raft集群Controller信息不断记录问题
|
||||||
|
- 修复部分版本消费组描述信息获取失败问题
|
||||||
|
- 修复分区Offset获取失败的日志中,缺少Topic名称信息问题
|
||||||
|
- 修复GitHub图地址错误,及图裂问题
|
||||||
|
- 修复Broker默认使用的地址和注释不一致问题
|
||||||
|
- 修复 Consumer 列表分页不生效问题
|
||||||
|
- 修复操作记录表operation_methods字段缺少默认值问题
|
||||||
|
- 修复集群均衡表中move_broker_list字段无效的问题
|
||||||
|
- 修复KafkaUser、KafkaACL信息获取时,日志一直重复提示不支持问题
|
||||||
|
- 修复指标缺失时,曲线出现掉底的问题
|
||||||
|
|
||||||
|
|
||||||
|
**体验优化**
|
||||||
|
- 优化前端构建时间和打包体积,增加依赖打包的分包策略
|
||||||
|
- 优化产品样式和文案展示
|
||||||
|
- 优化ES客户端数为可配置
|
||||||
|
- 优化日志中大量出现的MySQL Key冲突日志
|
||||||
|
|
||||||
|
|
||||||
|
**能力提升**
|
||||||
|
- 增加周期任务,用于主动创建缺少的ES模版及索引的能力,减少额外的脚本操作
|
||||||
|
- 增加JMX连接的Broker地址可选择的能力
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## v3.0.0-beta.0
|
||||||
|
|
||||||
**1、多集群管理**
|
**1、多集群管理**
|
||||||
|
|
||||||
|
|||||||
@@ -439,7 +439,7 @@ curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: appl
|
|||||||
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_replication_metric -d '{
|
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_replication_metric -d '{
|
||||||
"order" : 10,
|
"order" : 10,
|
||||||
"index_patterns" : [
|
"index_patterns" : [
|
||||||
"ks_kafka_partition_metric*"
|
"ks_kafka_replication_metric*"
|
||||||
],
|
],
|
||||||
"settings" : {
|
"settings" : {
|
||||||
"index" : {
|
"index" : {
|
||||||
@@ -500,29 +500,6 @@ curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: appl
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"aliases" : { }
|
"aliases" : { }
|
||||||
}[root@10-255-0-23 template]# cat ks_kafka_replication_metric
|
|
||||||
PUT _template/ks_kafka_replication_metric
|
|
||||||
{
|
|
||||||
"order" : 10,
|
|
||||||
"index_patterns" : [
|
|
||||||
"ks_kafka_replication_metric*"
|
|
||||||
],
|
|
||||||
"settings" : {
|
|
||||||
"index" : {
|
|
||||||
"number_of_shards" : "10"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"mappings" : {
|
|
||||||
"properties" : {
|
|
||||||
"timestamp" : {
|
|
||||||
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
|
|
||||||
"index" : true,
|
|
||||||
"type" : "date",
|
|
||||||
"doc_values" : true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"aliases" : { }
|
|
||||||
}'
|
}'
|
||||||
|
|
||||||
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_topic_metric -d '{
|
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_topic_metric -d '{
|
||||||
@@ -642,6 +619,91 @@ curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: appl
|
|||||||
"aliases" : { }
|
"aliases" : { }
|
||||||
}'
|
}'
|
||||||
|
|
||||||
|
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${SERVER_ES_ADDRESS}/_template/ks_kafka_zookeeper_metric -d '{
|
||||||
|
"order" : 10,
|
||||||
|
"index_patterns" : [
|
||||||
|
"ks_kafka_zookeeper_metric*"
|
||||||
|
],
|
||||||
|
"settings" : {
|
||||||
|
"index" : {
|
||||||
|
"number_of_shards" : "10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings" : {
|
||||||
|
"properties" : {
|
||||||
|
"routingValue" : {
|
||||||
|
"type" : "text",
|
||||||
|
"fields" : {
|
||||||
|
"keyword" : {
|
||||||
|
"ignore_above" : 256,
|
||||||
|
"type" : "keyword"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"clusterPhyId" : {
|
||||||
|
"type" : "long"
|
||||||
|
},
|
||||||
|
"metrics" : {
|
||||||
|
"properties" : {
|
||||||
|
"AvgRequestLatency" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"MinRequestLatency" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"MaxRequestLatency" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"OutstandingRequests" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"NodeCount" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"WatchCount" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"NumAliveConnections" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"PacketsReceived" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"PacketsSent" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"EphemeralsCount" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"ApproximateDataSize" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"OpenFileDescriptorCount" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"MaxFileDescriptorCount" : {
|
||||||
|
"type" : "double"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"key" : {
|
||||||
|
"type" : "text",
|
||||||
|
"fields" : {
|
||||||
|
"keyword" : {
|
||||||
|
"ignore_above" : 256,
|
||||||
|
"type" : "keyword"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timestamp" : {
|
||||||
|
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
|
||||||
|
"type" : "date"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"aliases" : { }
|
||||||
|
}'
|
||||||
|
|
||||||
for i in {0..6};
|
for i in {0..6};
|
||||||
do
|
do
|
||||||
logdate=_$(date -d "${i} day ago" +%Y-%m-%d)
|
logdate=_$(date -d "${i} day ago" +%Y-%m-%d)
|
||||||
@@ -650,6 +712,7 @@ do
|
|||||||
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_group_metric${logdate} && \
|
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_group_metric${logdate} && \
|
||||||
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_partition_metric${logdate} && \
|
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_partition_metric${logdate} && \
|
||||||
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_replication_metric${logdate} && \
|
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_replication_metric${logdate} && \
|
||||||
|
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_zookeeper_metric${logdate} && \
|
||||||
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_topic_metric${logdate} || \
|
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_topic_metric${logdate} || \
|
||||||
exit 2
|
exit 2
|
||||||
done
|
done
|
||||||
@@ -9,7 +9,7 @@ error_exit ()
|
|||||||
[ ! -e "$JAVA_HOME/bin/java" ] && unset JAVA_HOME
|
[ ! -e "$JAVA_HOME/bin/java" ] && unset JAVA_HOME
|
||||||
|
|
||||||
if [ -z "$JAVA_HOME" ]; then
|
if [ -z "$JAVA_HOME" ]; then
|
||||||
if $darwin; then
|
if [ "Darwin" = "$(uname -s)" ]; then
|
||||||
|
|
||||||
if [ -x '/usr/libexec/java_home' ] ; then
|
if [ -x '/usr/libexec/java_home' ] ; then
|
||||||
export JAVA_HOME=`/usr/libexec/java_home`
|
export JAVA_HOME=`/usr/libexec/java_home`
|
||||||
|
|||||||
|
Before Width: | Height: | Size: 9.5 KiB |
|
Before Width: | Height: | Size: 183 KiB |
|
Before Width: | Height: | Size: 50 KiB |
|
Before Width: | Height: | Size: 59 KiB |
1
docs/contributer_guide/代码规范.md
Normal file
@@ -0,0 +1 @@
|
|||||||
|
TODO.
|
||||||
6
docs/contributer_guide/开发者名单.md
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
|
||||||
|
开源贡献者证书发放名单(定期更新)
|
||||||
|
|
||||||
|
|
||||||
|
贡献者名单请看:[贡献者名单](https://doc.knowstreaming.com/product/10-contribution#106-贡献者名单)
|
||||||
|
|
||||||
6
docs/contributer_guide/贡献流程.md
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
|
||||||
|
|
||||||
|
<br>
|
||||||
|
<br>
|
||||||
|
|
||||||
|
请点击:[贡献流程](https://doc.knowstreaming.com/product/10-contribution#102-贡献流程)
|
||||||
264
docs/dev_guide/Task模块简介.md
Normal file
@@ -0,0 +1,264 @@
|
|||||||
|
# Task模块简介
|
||||||
|
|
||||||
|
## 1、Task简介
|
||||||
|
|
||||||
|
在 KnowStreaming 中(下面简称KS),Task模块主要是用于执行一些周期任务,包括Cluster、Broker、Topic等指标的定时采集,集群元数据定时更新至DB,集群状态的健康巡检等。在KS中,与Task模块相关的代码,我们都统一存放在km-task模块中。
|
||||||
|
|
||||||
|
Task模块是基于 LogiCommon 中的Logi-Job组件实现的任务周期执行,Logi-Job 的功能类似 XXX-Job,它是 XXX-Job 在 KnowStreaming 的内嵌实现,主要用于简化 KnowStreaming 的部署。
|
||||||
|
Logi-Job 的任务总共有两种执行模式,分别是:
|
||||||
|
|
||||||
|
+ 广播模式:同一KS集群下,同一任务周期中,所有KS主机都会执行该定时任务。
|
||||||
|
+ 抢占模式:同一KS集群下,同一任务周期中,仅有某一台KS主机会执行该任务。
|
||||||
|
|
||||||
|
KS集群范围定义:连接同一个DB,且application.yml中的spring.logi-job.app-name的名称一样的KS主机为同一KS集群。
|
||||||
|
|
||||||
|
## 2、使用指南
|
||||||
|
|
||||||
|
Task模块基于Logi-Job的广播模式与抢占模式,分别实现了任务的抢占执行、重复执行以及均衡执行,他们之间的差别是:
|
||||||
|
|
||||||
|
+ 抢占执行:同一个KS集群,同一个任务执行周期中,仅有一台KS主机执行该任务;
|
||||||
|
+ 重复执行:同一个KS集群,同一个任务执行周期中,所有KS主机都执行该任务。比如3台KS主机,3个Kafka集群,此时每台KS主机都会去采集这3个Kafka集群的指标;
|
||||||
|
+ 均衡执行:同一个KS集群,同一个任务执行周期中,每台KS主机仅执行该任务的一部分,所有的KS主机共同协作完成了任务。比如3台KS主机,3个Kafka集群,稳定运行情况下,每台KS主机将仅采集1个Kafka集群的指标,3台KS主机共同完成3个Kafka集群指标的采集。
|
||||||
|
|
||||||
|
下面我们看一下具体例子。
|
||||||
|
|
||||||
|
### 2.1、抢占模式——抢占执行
|
||||||
|
|
||||||
|
功能说明:
|
||||||
|
|
||||||
|
+ 同一个KS集群,同一个任务执行周期中,仅有一台KS主机执行该任务。
|
||||||
|
|
||||||
|
代码例子:
|
||||||
|
|
||||||
|
```java
|
||||||
|
// 1、实现Job接口,重写excute方法;
|
||||||
|
// 2、在类上添加@Task注解,并且配置好信息,指定为随机抢占模式;
|
||||||
|
// 效果:KS集群中,每5秒,会有一台KS主机输出 "测试定时任务运行中";
|
||||||
|
@Task(name = "TestJob",
|
||||||
|
description = "测试定时任务",
|
||||||
|
cron = "*/5 * * * * ?",
|
||||||
|
autoRegister = true,
|
||||||
|
consensual = ConsensualEnum.RANDOM, // 这里一定要设置为RANDOM
|
||||||
|
timeout = 6 * 60)
|
||||||
|
public class TestJob implements Job {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TaskResult execute(JobContext jobContext) throws Exception {
|
||||||
|
|
||||||
|
System.out.println("测试定时任务运行中");
|
||||||
|
return new TaskResult();
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### 2.2、广播模式——重复执行
|
||||||
|
|
||||||
|
功能说明:
|
||||||
|
|
||||||
|
+ 同一个KS集群,同一个任务执行周期中,所有KS主机都执行该任务。比如3台KS主机,3个Kafka集群,此时每台KS主机都会去重复采集这3个Kafka集群的指标。
|
||||||
|
|
||||||
|
代码例子:
|
||||||
|
|
||||||
|
```java
|
||||||
|
// 1、实现Job接口,重写excute方法;
|
||||||
|
// 2、在类上添加@Task注解,并且配置好信息,指定为广播抢占模式;
|
||||||
|
// 效果:KS集群中,每5秒,每台KS主机都会输出 "测试定时任务运行中";
|
||||||
|
@Task(name = "TestJob",
|
||||||
|
description = "测试定时任务",
|
||||||
|
cron = "*/5 * * * * ?",
|
||||||
|
autoRegister = true,
|
||||||
|
consensual = ConsensualEnum.BROADCAST, // 这里一定要设置为BROADCAST
|
||||||
|
timeout = 6 * 60)
|
||||||
|
public class TestJob implements Job {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TaskResult execute(JobContext jobContext) throws Exception {
|
||||||
|
|
||||||
|
System.out.println("测试定时任务运行中");
|
||||||
|
return new TaskResult();
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### 2.3、广播模式——均衡执行
|
||||||
|
|
||||||
|
功能说明:
|
||||||
|
|
||||||
|
+ 同一个KS集群,同一个任务执行周期中,每台KS主机仅执行该任务的一部分,所有的KS主机共同协作完成了任务。比如3台KS主机,3个Kafka集群,稳定运行情况下,每台KS主机将仅采集1个Kafka集群的指标,3台KS主机共同完成3个Kafka集群指标的采集。
|
||||||
|
|
||||||
|
代码例子:
|
||||||
|
|
||||||
|
+ 该模式有点特殊,是KS基于Logi-Job的广播模式,做的一个扩展,以下为一个使用例子:
|
||||||
|
|
||||||
|
```java
|
||||||
|
// 1、继承AbstractClusterPhyDispatchTask,实现processSubTask方法;
|
||||||
|
// 2、在类上添加@Task注解,并且配置好信息,指定为广播模式;
|
||||||
|
// 效果:在本样例中,每隔1分钟ks会将所有的kafka集群列表在ks集群主机内均衡拆分,每台主机会将分发到自身的Kafka集群依次执行processSubTask方法,实现KS集群的任务协同处理。
|
||||||
|
@Task(name = "kmJobTask",
|
||||||
|
description = "km job 模块调度执行任务",
|
||||||
|
cron = "0 0/1 * * * ? *",
|
||||||
|
autoRegister = true,
|
||||||
|
consensual = ConsensualEnum.BROADCAST,
|
||||||
|
timeout = 6 * 60)
|
||||||
|
public class KMJobTask extends AbstractClusterPhyDispatchTask {
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private JobService jobService;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception {
|
||||||
|
jobService.scheduleJobByClusterId(clusterPhy.getId());
|
||||||
|
return TaskResult.SUCCESS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## 3、原理简介
|
||||||
|
|
||||||
|
### 3.1、Task注解说明
|
||||||
|
|
||||||
|
```java
|
||||||
|
public @interface Task {
|
||||||
|
String name() default ""; //任务名称
|
||||||
|
String description() default ""; //任务描述
|
||||||
|
String owner() default "system"; //拥有者
|
||||||
|
String cron() default ""; //定时执行的时间策略
|
||||||
|
int retryTimes() default 0; //失败以后所能重试的最大次数
|
||||||
|
long timeout() default 0; //在超时时间里重试
|
||||||
|
//是否自动注册任务到数据库中
|
||||||
|
//如果设置为false,需要手动去数据库km_task表注册定时任务信息。数据库记录和@Task注解缺一不可
|
||||||
|
boolean autoRegister() default false;
|
||||||
|
//执行模式:广播、随机抢占
|
||||||
|
//广播模式:同一集群下的所有服务器都会执行该定时任务
|
||||||
|
//随机抢占模式:同一集群下随机一台服务器执行该任务
|
||||||
|
ConsensualEnum consensual() default ConsensualEnum.RANDOM;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3.2、数据库表介绍
|
||||||
|
|
||||||
|
+ logi_task:记录项目中的定时任务信息,一个定时任务对应一条记录。
|
||||||
|
+ logi_job:具体任务执行信息。
|
||||||
|
+ logi_job_log:定时任务的执行日志。
|
||||||
|
+ logi_worker:记录机器信息,实现集群控制。
|
||||||
|
|
||||||
|
### 3.3、均衡执行简介
|
||||||
|
|
||||||
|
#### 3.3.1、类关系图
|
||||||
|
|
||||||
|
这里以KMJobTask为例,简单介绍KM中的定时任务实现逻辑。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
+ Job:使用logi组件实现定时任务,必须实现该接口。
|
||||||
|
+ Comparable & EntufyIdInterface:比较接口,实现任务的排序逻辑。
|
||||||
|
+ AbstractDispatchTask:实现广播模式下,任务的均衡分发。
|
||||||
|
+ AbstractClusterPhyDispatchTask:对分发到当前服务器的集群列表进行枚举。
|
||||||
|
+ KMJobTask:实现对单个集群的定时任务处理。
|
||||||
|
|
||||||
|
#### 3.3.2、关键类代码
|
||||||
|
|
||||||
|
+ **AbstractDispatchTask类**
|
||||||
|
|
||||||
|
```java
|
||||||
|
// 实现Job接口的抽象类,进行任务的负载均衡执行
|
||||||
|
public abstract class AbstractDispatchTask<E extends Comparable & EntifyIdInterface> implements Job {
|
||||||
|
|
||||||
|
// 罗列所有的任务
|
||||||
|
protected abstract List<E> listAllTasks();
|
||||||
|
|
||||||
|
// 执行被分配给该KS主机的任务
|
||||||
|
protected abstract TaskResult processTask(List<E> subTaskList, long triggerTimeUnitMs);
|
||||||
|
|
||||||
|
// 被Logi-Job触发执行该方法
|
||||||
|
// 该方法进行任务的分配
|
||||||
|
@Override
|
||||||
|
public TaskResult execute(JobContext jobContext) {
|
||||||
|
try {
|
||||||
|
|
||||||
|
long triggerTimeUnitMs = System.currentTimeMillis();
|
||||||
|
|
||||||
|
// 获取所有的任务
|
||||||
|
List<E> allTaskList = this.listAllTasks();
|
||||||
|
|
||||||
|
// 计算当前KS机器需要执行的任务
|
||||||
|
List<E> subTaskList = this.selectTask(allTaskList, jobContext.getAllWorkerCodes(), jobContext.getCurrentWorkerCode());
|
||||||
|
|
||||||
|
// 进行任务处理
|
||||||
|
return this.processTask(subTaskList, triggerTimeUnitMs);
|
||||||
|
} catch (Exception e) {
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
+ **AbstractClusterPhyDispatchTask类**
|
||||||
|
|
||||||
|
```java
|
||||||
|
// 继承AbstractDispatchTask的抽象类,对Kafka集群进行负载均衡执行
|
||||||
|
public abstract class AbstractClusterPhyDispatchTask extends AbstractDispatchTask<ClusterPhy> {
|
||||||
|
|
||||||
|
// 执行被分配的任务,具体由子类实现
|
||||||
|
protected abstract TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception;
|
||||||
|
|
||||||
|
// 返回所有的Kafka集群
|
||||||
|
@Override
|
||||||
|
public List<ClusterPhy> listAllTasks() {
|
||||||
|
return clusterPhyService.listAllClusters();
|
||||||
|
}
|
||||||
|
|
||||||
|
// 执行被分配给该KS主机的Kafka集群任务
|
||||||
|
@Override
|
||||||
|
public TaskResult processTask(List<ClusterPhy> subTaskList, long triggerTimeUnitMs) { // ... }
|
||||||
|
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
+ **KMJobTask类**
|
||||||
|
|
||||||
|
```java
|
||||||
|
// 加上@Task注解,并配置任务执行信息
|
||||||
|
@Task(name = "kmJobTask",
|
||||||
|
description = "km job 模块调度执行任务",
|
||||||
|
cron = "0 0/1 * * * ? *",
|
||||||
|
autoRegister = true,
|
||||||
|
consensual = ConsensualEnum.BROADCAST,
|
||||||
|
timeout = 6 * 60)
|
||||||
|
// 继承AbstractClusterPhyDispatchTask类
|
||||||
|
public class KMJobTask extends AbstractClusterPhyDispatchTask {
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private JobService jobService;
|
||||||
|
|
||||||
|
// 执行该Kafka集群的Job模块的任务
|
||||||
|
@Override
|
||||||
|
protected TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception {
|
||||||
|
jobService.scheduleJobByClusterId(clusterPhy.getId());
|
||||||
|
return TaskResult.SUCCESS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 3.3.3、均衡执行总结
|
||||||
|
|
||||||
|
均衡执行的实现原理总结起来就是以下几点:
|
||||||
|
|
||||||
|
+ Logi-Job设置为广播模式,触发所有的KS主机执行任务;
|
||||||
|
+ 每台KS主机,被触发执行后,按照统一的规则,对任务列表,KS集群主机列表进行排序。然后按照顺序将任务列表均衡的分配给排序后的KS集群主机。KS集群稳定运行情况下,这一步保证了每台KS主机之间分配到的任务列表不重复,不丢失。
|
||||||
|
+ 最后每台KS主机,执行被分配到的任务。
|
||||||
|
|
||||||
|
## 4、注意事项
|
||||||
|
|
||||||
|
+ 不能100%保证任务在一个周期内,且仅且执行一次,可能出现重复执行或丢失的情况,所以必须严格是且仅且执行一次的任务,不建议基于Logi-Job进行任务控制。
|
||||||
|
+ 尽量让Logi-Job仅负责任务的触发,后续的执行建议放到自己创建的线程池中进行。
|
||||||
|
Before Width: | Height: | Size: 600 KiB |
|
Before Width: | Height: | Size: 228 KiB |
BIN
docs/dev_guide/assets/support_kerberos_zk/need_modify_code.png
Normal file
|
After Width: | Height: | Size: 63 KiB |
BIN
docs/dev_guide/assets/support_kerberos_zk/success_1.png
Normal file
|
After Width: | Height: | Size: 306 KiB |
BIN
docs/dev_guide/assets/support_kerberos_zk/success_2.png
Normal file
|
After Width: | Height: | Size: 306 KiB |
BIN
docs/dev_guide/assets/support_kerberos_zk/watch_user_acl.png
Normal file
|
After Width: | Height: | Size: 17 KiB |
@@ -36,7 +36,7 @@ KS-KM 根据其需要纳管的 kafka 版本,按照上述三个维度构建了
|
|||||||
|
|
||||||
  KS-KM 的每个版本针对需要纳管的 kafka 版本列表,事先分析各个版本的差异性和产品需求,同时 KS-KM 构建了一套专门处理兼容性的服务,来进行兼容性的注册、字典构建、处理器分发等操作,其中版本兼容性处理器是来具体处理不同 kafka 版本差异性的地方。
|
  KS-KM 的每个版本针对需要纳管的 kafka 版本列表,事先分析各个版本的差异性和产品需求,同时 KS-KM 构建了一套专门处理兼容性的服务,来进行兼容性的注册、字典构建、处理器分发等操作,其中版本兼容性处理器是来具体处理不同 kafka 版本差异性的地方。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
  如上图所示,KS-KM 的 topic 服务在面对不同 kafka 版本时,其 topic 的创建、删除、扩容由于 kafka 版本自身的差异,导致 KnowStreaming 的处理也不一样,所以需要根据不同的 kafka 版本来实现不同的兼容性处理器,同时向 KnowStreaming 的兼容服务进行兼容性的注册,构建兼容性字典,后续在 KnowStreaming 的运行过程中,针对不同的 kafka 版本即可分发到不同的处理器中执行。
|
  如上图所示,KS-KM 的 topic 服务在面对不同 kafka 版本时,其 topic 的创建、删除、扩容由于 kafka 版本自身的差异,导致 KnowStreaming 的处理也不一样,所以需要根据不同的 kafka 版本来实现不同的兼容性处理器,同时向 KnowStreaming 的兼容服务进行兼容性的注册,构建兼容性字典,后续在 KnowStreaming 的运行过程中,针对不同的 kafka 版本即可分发到不同的处理器中执行。
|
||||||
|
|
||||||
|
|||||||
69
docs/dev_guide/支持Kerberos认证的ZK.md
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
|
||||||
|
## 支持Kerberos认证的ZK
|
||||||
|
|
||||||
|
|
||||||
|
### 1、修改 KnowStreaming 代码
|
||||||
|
|
||||||
|
代码位置:`src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/KafkaAdminZKClient.java`
|
||||||
|
|
||||||
|
将 `createZKClient` 的 `135行 的 false 改为 true
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
修改完后重新进行打包编译,打包编译见:[打包编译](https://github.com/didi/KnowStreaming/blob/master/docs/install_guide/%E6%BA%90%E7%A0%81%E7%BC%96%E8%AF%91%E6%89%93%E5%8C%85%E6%89%8B%E5%86%8C.md
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### 2、查看用户在ZK的ACL
|
||||||
|
|
||||||
|
假设我们使用的用户是 `kafka` 这个用户。
|
||||||
|
|
||||||
|
- 1、查看 server.properties 的配置的 zookeeper.connect 的地址;
|
||||||
|
- 2、使用 `zkCli.sh -serve zookeeper.connect的地址` 登录到ZK页面;
|
||||||
|
- 3、ZK页面上,执行命令 `getAcl /kafka` 查看 `kafka` 用户的权限;
|
||||||
|
|
||||||
|
此时,我们可以看到如下信息:
|
||||||
|

|
||||||
|
|
||||||
|
`kafka` 用户需要的权限是 `cdrwa`。如果用户没有 `cdrwa` 权限的话,需要创建用户并授权,授权命令为:`setAcl`
|
||||||
|
|
||||||
|
|
||||||
|
### 3、创建Kerberos的keytab并修改 KnowStreaming 主机
|
||||||
|
|
||||||
|
- 1、在 Kerberos 的域中创建 `kafka/_HOST` 的 `keytab`,并导出。例如:`kafka/dbs-kafka-test-8-53`;
|
||||||
|
- 2、导出 keytab 后上传到安装 KS 的机器的 `/etc/keytab` 下;
|
||||||
|
- 3、在 KS 机器上,执行 `kinit -kt zookeepe.keytab kafka/dbs-kafka-test-8-53` 看是否能进行 `Kerberos` 登录;
|
||||||
|
- 4、可以登录后,配置 `/opt/zookeeper.jaas` 文件,例子如下:
|
||||||
|
```sql
|
||||||
|
Client {
|
||||||
|
com.sun.security.auth.module.Krb5LoginModule required
|
||||||
|
useKeyTab=true
|
||||||
|
storeKey=false
|
||||||
|
serviceName="zookeeper"
|
||||||
|
keyTab="/etc/keytab/zookeeper.keytab"
|
||||||
|
principal="kafka/dbs-kafka-test-8-53@XXX.XXX.XXX";
|
||||||
|
};
|
||||||
|
```
|
||||||
|
- 5、需要配置 `KDC-Server` 对 `KnowStreaming` 的机器开通防火墙,并在KS的机器 `/etc/host/` 配置 `kdc-server` 的 `hostname`。并将 `krb5.conf` 导入到 `/etc` 下;
|
||||||
|
|
||||||
|
|
||||||
|
### 4、修改 KnowStreaming 的配置
|
||||||
|
|
||||||
|
- 1、在 `/usr/local/KnowStreaming/KnowStreaming/bin/startup.sh` 中的47行的JAVA_OPT中追加如下设置
|
||||||
|
```bash
|
||||||
|
-Dsun.security.krb5.debug=true -Djava.security.krb5.conf=/etc/krb5.conf -Djava.security.auth.login.config=/opt/zookeeper.jaas
|
||||||
|
```
|
||||||
|
|
||||||
|
- 2、重启KS集群后再 start.out 中看到如下信息,则证明Kerberos配置成功;
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
### 5、补充说明
|
||||||
|
|
||||||
|
- 1、多Kafka集群如果用的是一样的Kerberos域的话,只需在每个`ZK`中给`kafka`用户配置`crdwa`权限即可,这样集群初始化的时候`zkclient`是都可以认证;
|
||||||
|
- 2、当前需要修改代码重新打包才可以支持,后续考虑通过页面支持Kerberos认证的ZK接入;
|
||||||
|
- 3、多个Kerberos域暂时未适配;
|
||||||
@@ -29,7 +29,7 @@
|
|||||||
- 初始化 MySQL 表及数据
|
- 初始化 MySQL 表及数据
|
||||||
- 初始化 Elasticsearch 索引
|
- 初始化 Elasticsearch 索引
|
||||||
|
|
||||||
具体见:[快速开始](./1-quick-start.md) 中的最后一步,部署 KnowStreaming 服务中的初始化相关工作。
|
具体见:[单机部署手册](../install_guide/单机部署手册.md) 中的最后一步,部署 KnowStreaming 服务中的初始化相关工作。
|
||||||
|
|
||||||
### 6.1.4、本地启动
|
### 6.1.4、本地启动
|
||||||
|
|
||||||
@@ -73,7 +73,7 @@ km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/KnowStreaming.java
|
|||||||
IDEA 更多具体的配置如下图所示:
|
IDEA 更多具体的配置如下图所示:
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="./assets/startup_using_source_code/IDEA配置.jpg" width = "512" height = "318" div align=center />
|
<img src="http://img-ys011.didistatic.com/static/dc2img/do1_BW1RzgEMh4n6L4dL4ncl" width = "512" height = "318" div align=center />
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
**第四步:启动项目**
|
**第四步:启动项目**
|
||||||
@@ -84,7 +84,7 @@ IDEA 更多具体的配置如下图所示:
|
|||||||
|
|
||||||
`Know Streaming` 启动之后,可以访问一些信息,包括:
|
`Know Streaming` 启动之后,可以访问一些信息,包括:
|
||||||
|
|
||||||
- 产品页面:http://localhost:8080 ,默认账号密码:`admin` / `admin2022_` 进行登录。
|
- 产品页面:http://localhost:8080 ,默认账号密码:`admin` / `admin2022_` 进行登录。`v3.0.0-beta.2`版本开始,默认账号密码为`admin` / `admin`;
|
||||||
- 接口地址:http://localhost:8080/swagger-ui.html 查看后端提供的相关接口。
|
- 接口地址:http://localhost:8080/swagger-ui.html 查看后端提供的相关接口。
|
||||||
|
|
||||||
更多信息,详见:[KnowStreaming 官网](https://knowstreaming.com/)
|
更多信息,详见:[KnowStreaming 官网](https://knowstreaming.com/)
|
||||||
199
docs/dev_guide/登录系统对接.md
Normal file
@@ -0,0 +1,199 @@
|
|||||||
|
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## 登录系统对接
|
||||||
|
|
||||||
|
[KnowStreaming](https://github.com/didi/KnowStreaming)(以下简称KS) 除了实现基于本地MySQL的用户登录认证方式外,还已经实现了基于Ldap的登录认证。
|
||||||
|
|
||||||
|
但是,登录认证系统并非仅此两种。因此,为了具有更好的拓展性,KS具有自定义登陆认证逻辑,快速对接已有系统的特性。
|
||||||
|
|
||||||
|
在KS中,我们将登陆认证相关的一些文件放在[km-extends](https://github.com/didi/KnowStreaming/tree/master/km-extends)模块下的[km-account](https://github.com/didi/KnowStreaming/tree/master/km-extends/km-account)模块里。
|
||||||
|
|
||||||
|
本文将介绍KS如何快速对接自有的用户登录认证系统。
|
||||||
|
|
||||||
|
### 对接步骤
|
||||||
|
|
||||||
|
- 创建一个登陆认证类,实现[LogiCommon](https://github.com/didi/LogiCommon)的LoginExtend接口;
|
||||||
|
- 将[application.yml](https://github.com/didi/KnowStreaming/blob/master/km-rest/src/main/resources/application.yml)中的spring.logi-security.login-extend-bean-name字段改为登陆认证类的bean名称;
|
||||||
|
|
||||||
|
```Java
|
||||||
|
//LoginExtend 接口
|
||||||
|
public interface LoginExtend {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 验证登录信息,同时记住登录状态
|
||||||
|
*/
|
||||||
|
UserBriefVO verifyLogin(AccountLoginDTO var1, HttpServletRequest var2, HttpServletResponse var3) throws LogiSecurityException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 登出接口,清楚登录状态
|
||||||
|
*/
|
||||||
|
Result<Boolean> logout(HttpServletRequest var1, HttpServletResponse var2);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 检查是否已经登录
|
||||||
|
*/
|
||||||
|
boolean interceptorCheck(HttpServletRequest var1, HttpServletResponse var2, String var3, List<String> var4) throws IOException;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### 对接例子
|
||||||
|
|
||||||
|
我们以Ldap对接为例,说明KS如何对接登录认证系统。
|
||||||
|
|
||||||
|
+ 编写[LdapLoginServiceImpl](https://github.com/didi/KnowStreaming/blob/master/km-extends/km-account/src/main/java/com/xiaojukeji/know/streaming/km/account/login/ldap/LdapLoginServiceImpl.java)类,实现LoginExtend接口。
|
||||||
|
+ 设置[application.yml](https://github.com/didi/KnowStreaming/blob/master/km-rest/src/main/resources/application.yml)中的spring.logi-security.login-extend-bean-name=ksLdapLoginService。
|
||||||
|
|
||||||
|
完成上述两步即可实现KS对接Ldap认证登陆。
|
||||||
|
|
||||||
|
```Java
|
||||||
|
@Service("ksLdapLoginService")
|
||||||
|
public class LdapLoginServiceImpl implements LoginExtend {
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public UserBriefVO verifyLogin(AccountLoginDTO loginDTO,
|
||||||
|
HttpServletRequest request,
|
||||||
|
HttpServletResponse response) throws LogiSecurityException {
|
||||||
|
String decodePasswd = AESUtils.decrypt(loginDTO.getPw());
|
||||||
|
|
||||||
|
// 去LDAP验证账密
|
||||||
|
LdapPrincipal ldapAttrsInfo = ldapAuthentication.authenticate(loginDTO.getUserName(), decodePasswd);
|
||||||
|
if (ldapAttrsInfo == null) {
|
||||||
|
// 用户不存在,正常来说上如果有问题,上一步会直接抛出异常
|
||||||
|
throw new LogiSecurityException(ResultCode.USER_NOT_EXISTS);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 进行业务相关操作
|
||||||
|
|
||||||
|
// 记录登录状态,Ldap因为无法记录登录状态,因此有KnowStreaming进行记录
|
||||||
|
initLoginContext(request, response, loginDTO.getUserName(), user.getId());
|
||||||
|
return CopyBeanUtil.copy(user, UserBriefVO.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Result<Boolean> logout(HttpServletRequest request, HttpServletResponse response) {
|
||||||
|
|
||||||
|
//清理cookie和session
|
||||||
|
|
||||||
|
return Result.buildSucc(Boolean.TRUE);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean interceptorCheck(HttpServletRequest request, HttpServletResponse response, String requestMappingValue, List<String> whiteMappingValues) throws IOException {
|
||||||
|
|
||||||
|
// 检查是否已经登录
|
||||||
|
String userName = HttpRequestUtil.getOperator(request);
|
||||||
|
if (StringUtils.isEmpty(userName)) {
|
||||||
|
// 未登录,则进行登出
|
||||||
|
logout(request, response);
|
||||||
|
return Boolean.FALSE;
|
||||||
|
}
|
||||||
|
|
||||||
|
return Boolean.TRUE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### 实现原理
|
||||||
|
|
||||||
|
因为登陆和登出整体实现逻辑是一致的,所以我们以登陆逻辑为例进行介绍。
|
||||||
|
|
||||||
|
+ 登陆原理
|
||||||
|
|
||||||
|
登陆走的是[LogiCommon](https://github.com/didi/LogiCommon)自带的LoginController。
|
||||||
|
|
||||||
|
```java
|
||||||
|
@RestController
|
||||||
|
public class LoginController {
|
||||||
|
|
||||||
|
|
||||||
|
//登陆接口
|
||||||
|
@PostMapping({"/login"})
|
||||||
|
public Result<UserBriefVO> login(HttpServletRequest request, HttpServletResponse response, @RequestBody AccountLoginDTO loginDTO) {
|
||||||
|
try {
|
||||||
|
//登陆认证
|
||||||
|
UserBriefVO userBriefVO = this.loginService.verifyLogin(loginDTO, request, response);
|
||||||
|
return Result.success(userBriefVO);
|
||||||
|
|
||||||
|
} catch (LogiSecurityException var5) {
|
||||||
|
return Result.fail(var5);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
而登陆操作是调用LoginServiceImpl类来实现,但是具体由哪个登陆认证类来执行登陆操作却由loginExtendBeanTool来指定。
|
||||||
|
|
||||||
|
```java
|
||||||
|
//LoginServiceImpl类
|
||||||
|
@Service
|
||||||
|
public class LoginServiceImpl implements LoginService {
|
||||||
|
|
||||||
|
//实现登陆操作,但是具体哪个登陆类由loginExtendBeanTool来管理
|
||||||
|
public UserBriefVO verifyLogin(AccountLoginDTO loginDTO, HttpServletRequest request, HttpServletResponse response) throws LogiSecurityException {
|
||||||
|
|
||||||
|
return this.loginExtendBeanTool.getLoginExtendImpl().verifyLogin(loginDTO, request, response);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
而loginExtendBeanTool类会优先去查找用户指定的登陆认证类,如果失败则调用默认的登陆认证函数。
|
||||||
|
|
||||||
|
```java
|
||||||
|
//LoginExtendBeanTool类
|
||||||
|
@Component("logiSecurityLoginExtendBeanTool")
|
||||||
|
public class LoginExtendBeanTool {
|
||||||
|
|
||||||
|
public LoginExtend getLoginExtendImpl() {
|
||||||
|
LoginExtend loginExtend;
|
||||||
|
//先调用用户指定登陆类,如果失败则调用系统默认登陆认证
|
||||||
|
try {
|
||||||
|
//调用的类由spring.logi-security.login-extend-bean-name指定
|
||||||
|
loginExtend = this.getCustomLoginExtendImplBean();
|
||||||
|
} catch (UnsupportedOperationException var3) {
|
||||||
|
loginExtend = this.getDefaultLoginExtendImplBean();
|
||||||
|
}
|
||||||
|
|
||||||
|
return loginExtend;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
+ 认证原理
|
||||||
|
|
||||||
|
认证的实现则比较简单,向Spring中注册我们的拦截器PermissionInterceptor。
|
||||||
|
|
||||||
|
拦截器会调用LoginServiceImpl类的拦截方法,LoginServiceImpl后续处理逻辑就和前面登陆是一致的。
|
||||||
|
|
||||||
|
```java
|
||||||
|
public class PermissionInterceptor implements HandlerInterceptor {
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 拦截预处理
|
||||||
|
* @return boolean false:拦截, 不向下执行, true:放行
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) throws Exception {
|
||||||
|
|
||||||
|
//免登录相关校验,如果验证通过,提前返回
|
||||||
|
|
||||||
|
//走拦截函数,进行普通用户验证
|
||||||
|
return loginService.interceptorCheck(request, response, classRequestMappingValue, whiteMappingValues);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
@@ -1,25 +1,20 @@
|
|||||||
|
|
||||||

|
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
## JMX-连接失败问题解决
|
## JMX-连接失败问题解决
|
||||||
|
|
||||||
- [JMX-连接失败问题解决](#jmx-连接失败问题解决)
|
集群正常接入`KnowStreaming`之后,即可以看到集群的Broker列表,此时如果查看不了Topic的实时流量,或者是Broker的实时流量信息时,那么大概率就是`JMX`连接的问题了。
|
||||||
- [1、问题&说明](#1问题说明)
|
|
||||||
- [2、解决方法](#2解决方法)
|
|
||||||
- [3、解决方法 —— 认证的JMX](#3解决方法--认证的jmx)
|
|
||||||
|
|
||||||
集群正常接入Logi-KafkaManager之后,即可以看到集群的Broker列表,此时如果查看不了Topic的实时流量,或者是Broker的实时流量信息时,那么大概率就是JMX连接的问题了。
|
|
||||||
|
|
||||||
下面我们按照步骤来一步一步的检查。
|
下面我们按照步骤来一步一步的检查。
|
||||||
|
|
||||||
### 1、问题&说明
|
### 1、问题说明
|
||||||
|
|
||||||
**类型一:JMX配置未开启**
|
**类型一:JMX配置未开启**
|
||||||
|
|
||||||
未开启时,直接到`2、解决方法`查看如何开启即可。
|
未开启时,直接到`2、解决方法`查看如何开启即可。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|
||||||
**类型二:配置错误**
|
**类型二:配置错误**
|
||||||
@@ -43,6 +38,26 @@ java.rmi.ConnectException: Connection refused to host: 192.168.0.1; nested excep
|
|||||||
java.rmi.ConnectException: Connection refused to host: 127.0.0.1;; nested exception is:
|
java.rmi.ConnectException: Connection refused to host: 127.0.0.1;; nested exception is:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**类型三:连接特定IP**
|
||||||
|
|
||||||
|
Broker 配置了内外网,而JMX在配置时,可能配置了内网IP或者外网IP,此时 `KnowStreaming` 需要连接到特定网络的IP才可以进行访问。
|
||||||
|
|
||||||
|
比如:
|
||||||
|
|
||||||
|
Broker在ZK的存储结构如下所示,我们期望连接到 `endpoints` 中标记为 `INTERNAL` 的地址,但是 `KnowStreaming` 却连接了 `EXTERNAL` 的地址,此时可以看 `4、解决方法 —— JMX连接特定网络` 进行解决。
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"listener_security_protocol_map": {"EXTERNAL":"SASL_PLAINTEXT","INTERNAL":"SASL_PLAINTEXT"},
|
||||||
|
"endpoints": ["EXTERNAL://192.168.0.1:7092","INTERNAL://192.168.0.2:7093"],
|
||||||
|
"jmx_port": 8099,
|
||||||
|
"host": "192.168.0.1",
|
||||||
|
"timestamp": "1627289710439",
|
||||||
|
"port": -1,
|
||||||
|
"version": 4
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
### 2、解决方法
|
### 2、解决方法
|
||||||
|
|
||||||
这里仅介绍一下比较通用的解决方式,如若有更好的方式,欢迎大家指导告知一下。
|
这里仅介绍一下比较通用的解决方式,如若有更好的方式,欢迎大家指导告知一下。
|
||||||
@@ -76,26 +91,36 @@ fi
|
|||||||
|
|
||||||
如果您是直接看的这个部分,建议先看一下上一节:`2、解决方法`以确保`JMX`的配置没有问题了。
|
如果您是直接看的这个部分,建议先看一下上一节:`2、解决方法`以确保`JMX`的配置没有问题了。
|
||||||
|
|
||||||
在JMX的配置等都没有问题的情况下,如果是因为认证的原因导致连接不了的,此时可以使用下面介绍的方法进行解决。
|
在`JMX`的配置等都没有问题的情况下,如果是因为认证的原因导致连接不了的,可以在集群接入界面配置你的`JMX`认证信息。
|
||||||
|
|
||||||
**当前这块后端刚刚开发完成,可能还不够完善,有问题随时沟通。**
|
<img src='http://img-ys011.didistatic.com/static/dc2img/do1_EUU352qMEX1Jdp7pxizp' width=350>
|
||||||
|
|
||||||
`Logi-KafkaManager 2.2.0+`之后的版本后端已经支持`JMX`认证方式的连接,但是还没有界面,此时我们可以往`cluster`表的`jmx_properties`字段写入`JMX`的认证信息。
|
|
||||||
|
|
||||||
这个数据是`json`格式的字符串,例子如下所示:
|
|
||||||
|
|
||||||
|
### 4、解决方法 —— JMX连接特定网络
|
||||||
|
|
||||||
|
可以手动往`ks_km_physical_cluster`表的`jmx_properties`字段增加一个`useWhichEndpoint`字段,从而控制 `KnowStreaming` 连接到特定的JMX IP及PORT。
|
||||||
|
|
||||||
|
`jmx_properties`格式:
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"maxConn": 10, # KM对单台Broker的最大JMX连接数
|
"maxConn": 100, # KM对单台Broker的最大JMX连接数
|
||||||
"username": "xxxxx", # 用户名
|
"username": "xxxxx", # 用户名,可以不填写
|
||||||
"password": "xxxx", # 密码
|
"password": "xxxx", # 密码,可以不填写
|
||||||
"openSSL": true, # 开启SSL, true表示开启ssl, false表示关闭
|
"openSSL": true, # 开启SSL, true表示开启ssl, false表示关闭
|
||||||
|
"useWhichEndpoint": "EXTERNAL" #指定要连接的网络名称,填写EXTERNAL就是连接endpoints里面的EXTERNAL地址
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
SQL的例子:
|
SQL例子:
|
||||||
```sql
|
```sql
|
||||||
UPDATE cluster SET jmx_properties='{ "maxConn": 10, "username": "xxxxx", "password": "xxxx", "openSSL": false }' where id={xxx};
|
UPDATE ks_km_physical_cluster SET jmx_properties='{ "maxConn": 10, "username": "xxxxx", "password": "xxxx", "openSSL": false , "useWhichEndpoint": "xxx"}' where id={xxx};
|
||||||
```
|
```
|
||||||
|
|
||||||
|
注意:
|
||||||
|
|
||||||
|
+ 目前此功能只支持采用 `ZK` 做分布式协调的kafka集群。
|
||||||
|
|
||||||
|
|
||||||
@@ -6,9 +6,10 @@
|
|||||||
|
|
||||||
### 2.1.1、安装说明
|
### 2.1.1、安装说明
|
||||||
|
|
||||||
- 以 `v3.0.0-bete` 版本为例进行部署;
|
- 以 `v3.0.0-beta.1` 版本为例进行部署;
|
||||||
- 以 CentOS-7 为例,系统基础配置要求 4C-8G;
|
- 以 CentOS-7 为例,系统基础配置要求 4C-8G;
|
||||||
- 部署完成后,可通过浏览器:`IP:PORT` 进行访问,默认端口是 `8080`,系统默认账号密码: `admin` / `admin2022_`;
|
- 部署完成后,可通过浏览器:`IP:PORT` 进行访问,默认端口是 `8080`,系统默认账号密码: `admin` / `admin2022_`。
|
||||||
|
- `v3.0.0-beta.2`版本开始,默认账号密码为`admin` / `admin`;
|
||||||
- 本文为单机部署,如需分布式部署,[请联系我们](https://knowstreaming.com/support-center)
|
- 本文为单机部署,如需分布式部署,[请联系我们](https://knowstreaming.com/support-center)
|
||||||
|
|
||||||
**软件依赖**
|
**软件依赖**
|
||||||
@@ -19,7 +20,7 @@
|
|||||||
| ElasticSearch | v7.6+ | 8060 |
|
| ElasticSearch | v7.6+ | 8060 |
|
||||||
| JDK | v8+ | - |
|
| JDK | v8+ | - |
|
||||||
| CentOS | v6+ | - |
|
| CentOS | v6+ | - |
|
||||||
| Ubantu | v16+ | - |
|
| Ubuntu | v16+ | - |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -29,7 +30,7 @@
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 在服务器中下载安装脚本, 该脚本中会在当前目录下,重新安装MySQL。重装后的mysql密码存放在当前目录的mysql.password文件中。
|
# 在服务器中下载安装脚本, 该脚本中会在当前目录下,重新安装MySQL。重装后的mysql密码存放在当前目录的mysql.password文件中。
|
||||||
wget https://s3-gzpu.didistatic.com/pub/knowstreaming/deploy_KnowStreaming.sh
|
wget https://s3-gzpu.didistatic.com/pub/knowstreaming/deploy_KnowStreaming-3.0.0-beta.1.sh
|
||||||
|
|
||||||
# 执行脚本
|
# 执行脚本
|
||||||
sh deploy_KnowStreaming.sh
|
sh deploy_KnowStreaming.sh
|
||||||
@@ -42,10 +43,10 @@ sh deploy_KnowStreaming.sh
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 将安装包下载到本地且传输到目标服务器
|
# 将安装包下载到本地且传输到目标服务器
|
||||||
wget https://s3-gzpu.didistatic.com/pub/knowstreaming/KnowStreaming-3.0.0-beta—offline.tar.gz
|
wget https://s3-gzpu.didistatic.com/pub/knowstreaming/KnowStreaming-3.0.0-beta.1-offline.tar.gz
|
||||||
|
|
||||||
# 解压安装包
|
# 解压安装包
|
||||||
tar -zxf KnowStreaming-3.0.0-beta—offline.tar.gz
|
tar -zxf KnowStreaming-3.0.0-beta.1-offline.tar.gz
|
||||||
|
|
||||||
# 执行安装脚本
|
# 执行安装脚本
|
||||||
sh deploy_KnowStreaming-offline.sh
|
sh deploy_KnowStreaming-offline.sh
|
||||||
@@ -58,28 +59,182 @@ sh deploy_KnowStreaming-offline.sh
|
|||||||
|
|
||||||
### 2.1.3、容器部署
|
### 2.1.3、容器部署
|
||||||
|
|
||||||
|
#### 2.1.3.1、Helm
|
||||||
|
|
||||||
**环境依赖**
|
**环境依赖**
|
||||||
|
|
||||||
- Kubernetes >= 1.14 ,Helm >= 2.17.0
|
- Kubernetes >= 1.14 ,Helm >= 2.17.0
|
||||||
|
|
||||||
- 默认配置为全部安装( ElasticSearch + MySQL + KnowStreaming)
|
- 默认依赖全部安装,ElasticSearch(3 节点集群模式) + MySQL(单机) + KnowStreaming-manager + KnowStreaming-ui
|
||||||
|
|
||||||
- 如果使用已有的 ElasticSearch(7.6.x) 和 MySQL(5.7) 只需调整 values.yaml 部分参数即可
|
- 使用已有的 ElasticSearch(7.6.x) 和 MySQL(5.7) 只需调整 values.yaml 部分参数即可
|
||||||
|
|
||||||
**安装命令**
|
**安装命令**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 下载安装包
|
# 相关镜像在Docker Hub都可以下载
|
||||||
wget https://s3-gzpu.didistatic.com/pub/knowstreaming/knowstreaming-3.0.0-hlem.tgz
|
# 快速安装(NAMESPACE需要更改为已存在的,安装启动需要几分钟初始化请稍等~)
|
||||||
|
helm install -n [NAMESPACE] [NAME] http://download.knowstreaming.com/charts/knowstreaming-manager-0.1.5.tgz
|
||||||
# 解压安装包
|
|
||||||
tar -zxf knowstreaming-3.0.0-hlem.tgz
|
|
||||||
|
|
||||||
# 执行命令(NAMESPACE需要更改为已存在的)
|
|
||||||
helm install -n [NAMESPACE] knowstreaming knowstreaming-manager/
|
|
||||||
|
|
||||||
# 获取KnowStreaming前端ui的service. 默认nodeport方式.
|
# 获取KnowStreaming前端ui的service. 默认nodeport方式.
|
||||||
# (http://nodeIP:nodeport,默认用户名密码:admin/admin2022_)
|
# (http://nodeIP:nodeport,默认用户名密码:admin/admin2022_)
|
||||||
|
# `v3.0.0-beta.2`版本开始(helm chart包版本0.1.4开始),默认账号密码为`admin` / `admin`;
|
||||||
|
|
||||||
|
# 添加仓库
|
||||||
|
helm repo add knowstreaming http://download.knowstreaming.com/charts
|
||||||
|
|
||||||
|
# 拉取最新版本
|
||||||
|
helm pull knowstreaming/knowstreaming-manager
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#### 2.1.3.2、Docker Compose
|
||||||
|
**环境依赖**
|
||||||
|
|
||||||
|
- [Docker](https://docs.docker.com/engine/install/)
|
||||||
|
- [Docker Compose](https://docs.docker.com/compose/install/)
|
||||||
|
|
||||||
|
|
||||||
|
**安装命令**
|
||||||
|
```bash
|
||||||
|
# `v3.0.0-beta.2`版本开始(docker镜像为0.2.0版本开始),默认账号密码为`admin` / `admin`;
|
||||||
|
# https://hub.docker.com/u/knowstreaming 在此处寻找最新镜像版本
|
||||||
|
# mysql与es可以使用自己搭建的服务,调整对应配置即可
|
||||||
|
|
||||||
|
# 复制docker-compose.yml到指定位置后执行下方命令即可启动
|
||||||
|
docker-compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
**验证安装**
|
||||||
|
```shell
|
||||||
|
docker-compose ps
|
||||||
|
# 验证启动 - 状态为 UP 则表示成功
|
||||||
|
Name Command State Ports
|
||||||
|
----------------------------------------------------------------------------------------------------
|
||||||
|
elasticsearch-single /usr/local/bin/docker-entr ... Up 9200/tcp, 9300/tcp
|
||||||
|
knowstreaming-init /bin/bash /es_template_cre ... Up
|
||||||
|
knowstreaming-manager /bin/sh /ks-start.sh Up 80/tcp
|
||||||
|
knowstreaming-mysql /entrypoint.sh mysqld Up (health: starting) 3306/tcp, 33060/tcp
|
||||||
|
knowstreaming-ui /docker-entrypoint.sh ngin ... Up 0.0.0.0:80->80/tcp
|
||||||
|
|
||||||
|
# 稍等一分钟左右 knowstreaming-init 会退出,表示es初始化完成,可以访问页面
|
||||||
|
Name Command State Ports
|
||||||
|
-------------------------------------------------------------------------------------------
|
||||||
|
knowstreaming-init /bin/bash /es_template_cre ... Exit 0
|
||||||
|
knowstreaming-mysql /entrypoint.sh mysqld Up (healthy) 3306/tcp, 33060/tcp
|
||||||
|
```
|
||||||
|
|
||||||
|
**访问**
|
||||||
|
```http request
|
||||||
|
http://127.0.0.1:80/
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
**docker-compose.yml**
|
||||||
|
```yml
|
||||||
|
version: "2"
|
||||||
|
services:
|
||||||
|
# *不要调整knowstreaming-manager服务名称,ui中会用到
|
||||||
|
knowstreaming-manager:
|
||||||
|
image: knowstreaming/knowstreaming-manager:latest
|
||||||
|
container_name: knowstreaming-manager
|
||||||
|
privileged: true
|
||||||
|
restart: always
|
||||||
|
depends_on:
|
||||||
|
- elasticsearch-single
|
||||||
|
- knowstreaming-mysql
|
||||||
|
expose:
|
||||||
|
- 80
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- /ks-start.sh
|
||||||
|
environment:
|
||||||
|
TZ: Asia/Shanghai
|
||||||
|
# mysql服务地址
|
||||||
|
SERVER_MYSQL_ADDRESS: knowstreaming-mysql:3306
|
||||||
|
# mysql数据库名
|
||||||
|
SERVER_MYSQL_DB: know_streaming
|
||||||
|
# mysql用户名
|
||||||
|
SERVER_MYSQL_USER: root
|
||||||
|
# mysql用户密码
|
||||||
|
SERVER_MYSQL_PASSWORD: admin2022_
|
||||||
|
# es服务地址
|
||||||
|
SERVER_ES_ADDRESS: elasticsearch-single:9200
|
||||||
|
# 服务JVM参数
|
||||||
|
JAVA_OPTS: -Xmx1g -Xms1g
|
||||||
|
# 对于kafka中ADVERTISED_LISTENERS填写的hostname可以通过该方式完成
|
||||||
|
# extra_hosts:
|
||||||
|
# - "hostname:x.x.x.x"
|
||||||
|
# 服务日志路径
|
||||||
|
# volumes:
|
||||||
|
# - /ks/manage/log:/logs
|
||||||
|
knowstreaming-ui:
|
||||||
|
image: knowstreaming/knowstreaming-ui:latest
|
||||||
|
container_name: knowstreaming-ui
|
||||||
|
restart: always
|
||||||
|
ports:
|
||||||
|
- '80:80'
|
||||||
|
environment:
|
||||||
|
TZ: Asia/Shanghai
|
||||||
|
depends_on:
|
||||||
|
- knowstreaming-manager
|
||||||
|
# extra_hosts:
|
||||||
|
# - "hostname:x.x.x.x"
|
||||||
|
elasticsearch-single:
|
||||||
|
image: docker.io/library/elasticsearch:7.6.2
|
||||||
|
container_name: elasticsearch-single
|
||||||
|
restart: always
|
||||||
|
expose:
|
||||||
|
- 9200
|
||||||
|
- 9300
|
||||||
|
# ports:
|
||||||
|
# - '9200:9200'
|
||||||
|
# - '9300:9300'
|
||||||
|
environment:
|
||||||
|
TZ: Asia/Shanghai
|
||||||
|
# es的JVM参数
|
||||||
|
ES_JAVA_OPTS: -Xms512m -Xmx512m
|
||||||
|
# 单节点配置,多节点集群参考 https://www.elastic.co/guide/en/elasticsearch/reference/7.6/docker.html#docker-compose-file
|
||||||
|
discovery.type: single-node
|
||||||
|
# 数据持久化路径
|
||||||
|
# volumes:
|
||||||
|
# - /ks/es/data:/usr/share/elasticsearch/data
|
||||||
|
|
||||||
|
# es初始化服务,与manager使用同一镜像
|
||||||
|
# 首次启动es需初始化模版和索引,后续会自动创建
|
||||||
|
knowstreaming-init:
|
||||||
|
image: knowstreaming/knowstreaming-manager:latest
|
||||||
|
container_name: knowstreaming-init
|
||||||
|
depends_on:
|
||||||
|
- elasticsearch-single
|
||||||
|
command:
|
||||||
|
- /bin/bash
|
||||||
|
- /es_template_create.sh
|
||||||
|
environment:
|
||||||
|
TZ: Asia/Shanghai
|
||||||
|
# es服务地址
|
||||||
|
SERVER_ES_ADDRESS: elasticsearch-single:9200
|
||||||
|
|
||||||
|
knowstreaming-mysql:
|
||||||
|
image: knowstreaming/knowstreaming-mysql:latest
|
||||||
|
container_name: knowstreaming-mysql
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
TZ: Asia/Shanghai
|
||||||
|
# root 用户密码
|
||||||
|
MYSQL_ROOT_PASSWORD: admin2022_
|
||||||
|
# 初始化时创建的数据库名称
|
||||||
|
MYSQL_DATABASE: know_streaming
|
||||||
|
# 通配所有host,可以访问远程
|
||||||
|
MYSQL_ROOT_HOST: '%'
|
||||||
|
expose:
|
||||||
|
- 3306
|
||||||
|
# ports:
|
||||||
|
# - '3306:3306'
|
||||||
|
# 数据持久化路径
|
||||||
|
# volumes:
|
||||||
|
# - /ks/mysql/data:/data/mysql
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
@@ -219,10 +374,10 @@ sh /data/elasticsearch/control.sh status
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 下载安装包
|
# 下载安装包
|
||||||
wget https://s3-gzpu.didistatic.com/pub/knowstreaming/KnowStreaming-3.0.0-beta.tar.gz
|
wget https://s3-gzpu.didistatic.com/pub/knowstreaming/KnowStreaming-3.0.0-beta.1.tar.gz
|
||||||
|
|
||||||
# 解压安装包到指定目录
|
# 解压安装包到指定目录
|
||||||
tar -zxf KnowStreaming-3.0.0-beta.tar.gz -C /data/
|
tar -zxf KnowStreaming-3.0.0-beta.1.tar.gz -C /data/
|
||||||
|
|
||||||
# 修改启动脚本并加入systemd管理
|
# 修改启动脚本并加入systemd管理
|
||||||
cd /data/KnowStreaming/
|
cd /data/KnowStreaming/
|
||||||
@@ -236,7 +391,7 @@ mysql -uroot -pDidi_km_678 know_streaming < ./init/sql/dml-ks-km.sql
|
|||||||
mysql -uroot -pDidi_km_678 know_streaming < ./init/sql/dml-logi.sql
|
mysql -uroot -pDidi_km_678 know_streaming < ./init/sql/dml-logi.sql
|
||||||
|
|
||||||
# 创建elasticsearch初始化数据
|
# 创建elasticsearch初始化数据
|
||||||
sh ./init/template/template.sh
|
sh ./bin/init_es_template.sh
|
||||||
|
|
||||||
# 修改配置文件
|
# 修改配置文件
|
||||||
vim ./conf/application.yml
|
vim ./conf/application.yml
|
||||||
|
|||||||
@@ -1,6 +1,4 @@
|
|||||||
|

|
||||||

|
|
||||||
|
|
||||||
|
|
||||||
# `Know Streaming` 源码编译打包手册
|
# `Know Streaming` 源码编译打包手册
|
||||||
|
|
||||||
@@ -25,27 +23,23 @@
|
|||||||
|
|
||||||
具体见下面描述。
|
具体见下面描述。
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### 2.1、前后端合并打包
|
### 2.1、前后端合并打包
|
||||||
|
|
||||||
1. 下载源码;
|
1. 下载源码;
|
||||||
2. 进入 `KS-KM` 工程目录,执行 `mvn -Prelease-package -Dmaven.test.skip=true clean install -U` 命令;
|
2. 进入 `KS-KM` 工程目录,执行 `mvn -Prelease-package -Dmaven.test.skip=true clean install -U` 命令;
|
||||||
3. 打包命令执行完成后,会在 `km-dist/target` 目录下面生成一个 `KnowStreaming-*.tar.gz` 的安装包。
|
3. 打包命令执行完成后,会在 `km-dist/target` 目录下面生成一个 `KnowStreaming-*.tar.gz` 的安装包。
|
||||||
|
|
||||||
|
|
||||||
### 2.2、前端单独打包
|
### 2.2、前端单独打包
|
||||||
|
|
||||||
1. 下载源码;
|
1. 下载源码;
|
||||||
2. 进入 `KS-KM/km-console` 工程目录;
|
2. 跳转到 [前端打包构建文档](https://github.com/didi/KnowStreaming/blob/master/km-console/README.md) 按步骤进行。打包成功后,会在 `km-rest/src/main/resources` 目录下生成名为 `templates` 的前端静态资源包;
|
||||||
3. 执行 `npm run build`命令,会在 `KS-KM/km-console` 目录下生成一个名为 `pub` 的前端静态资源包;
|
3. 如果上一步过程中报错,请查看 [FAQ](https://github.com/didi/KnowStreaming/blob/master/docs/user_guide/faq.md) 第 8.10 条;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### 2.3、后端单独打包
|
### 2.3、后端单独打包
|
||||||
|
|
||||||
1. 下载源码;
|
1. 下载源码;
|
||||||
2. 修改顶层 `pom.xml` ,去掉其中的 `km-console` 模块,如下所示;
|
2. 修改顶层 `pom.xml` ,去掉其中的 `km-console` 模块,如下所示;
|
||||||
|
|
||||||
```xml
|
```xml
|
||||||
<modules>
|
<modules>
|
||||||
<!-- <module>km-console</module>-->
|
<!-- <module>km-console</module>-->
|
||||||
@@ -62,10 +56,7 @@
|
|||||||
<module>km-rest</module>
|
<module>km-rest</module>
|
||||||
<module>km-dist</module>
|
<module>km-dist</module>
|
||||||
</modules>
|
</modules>
|
||||||
```
|
```
|
||||||
|
|
||||||
3. 执行 `mvn -U clean package -Dmaven.test.skip=true`命令;
|
3. 执行 `mvn -U clean package -Dmaven.test.skip=true`命令;
|
||||||
4. 执行完成之后会在 `KS-KM/km-rest/target` 目录下面生成一个 `ks-km.jar` 即为KS的后端部署的Jar包,也可以执行 `mvn -Prelease-package -Dmaven.test.skip=true clean install -U` 生成的tar包也仅有后端服务的功能;
|
4. 执行完成之后会在 `KS-KM/km-rest/target` 目录下面生成一个 `ks-km.jar` 即为 KS 的后端部署的 Jar 包,也可以执行 `mvn -Prelease-package -Dmaven.test.skip=true clean install -U` 生成的 tar 包也仅有后端服务的功能;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,253 @@
|
|||||||
## 6.2、版本升级手册
|
## 6.2、版本升级手册
|
||||||
|
|
||||||
**`2.x`版本 升级至 `3.0.0`版本**
|
注意:
|
||||||
|
- 如果想升级至具体版本,需要将你当前版本至你期望使用版本的变更统统执行一遍,然后才能正常使用。
|
||||||
|
- 如果中间某个版本没有升级信息,则表示该版本直接替换安装包即可从前一个版本升级至当前版本。
|
||||||
|
|
||||||
|
### 6.2.0、升级至 `master` 版本
|
||||||
|
|
||||||
|
暂无
|
||||||
|
|
||||||
|
### 6.2.1、升级至 `v3.0.1` 版本
|
||||||
|
|
||||||
|
**ES 索引模版**
|
||||||
|
```bash
|
||||||
|
# 新增 ks_kafka_zookeeper_metric 索引模版。
|
||||||
|
# 可通过再次执行 bin/init_es_template.sh 脚本,创建该索引模版。
|
||||||
|
|
||||||
|
# 索引模版内容
|
||||||
|
PUT _template/ks_kafka_zookeeper_metric
|
||||||
|
{
|
||||||
|
"order" : 10,
|
||||||
|
"index_patterns" : [
|
||||||
|
"ks_kafka_zookeeper_metric*"
|
||||||
|
],
|
||||||
|
"settings" : {
|
||||||
|
"index" : {
|
||||||
|
"number_of_shards" : "10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings" : {
|
||||||
|
"properties" : {
|
||||||
|
"routingValue" : {
|
||||||
|
"type" : "text",
|
||||||
|
"fields" : {
|
||||||
|
"keyword" : {
|
||||||
|
"ignore_above" : 256,
|
||||||
|
"type" : "keyword"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"clusterPhyId" : {
|
||||||
|
"type" : "long"
|
||||||
|
},
|
||||||
|
"metrics" : {
|
||||||
|
"properties" : {
|
||||||
|
"AvgRequestLatency" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"MinRequestLatency" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"MaxRequestLatency" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"OutstandingRequests" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"NodeCount" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"WatchCount" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"NumAliveConnections" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"PacketsReceived" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"PacketsSent" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"EphemeralsCount" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"ApproximateDataSize" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"OpenFileDescriptorCount" : {
|
||||||
|
"type" : "double"
|
||||||
|
},
|
||||||
|
"MaxFileDescriptorCount" : {
|
||||||
|
"type" : "double"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"key" : {
|
||||||
|
"type" : "text",
|
||||||
|
"fields" : {
|
||||||
|
"keyword" : {
|
||||||
|
"ignore_above" : 256,
|
||||||
|
"type" : "keyword"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timestamp" : {
|
||||||
|
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
|
||||||
|
"type" : "date"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"aliases" : { }
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
**SQL 变更**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DROP TABLE IF EXISTS `ks_km_zookeeper`;
|
||||||
|
CREATE TABLE `ks_km_zookeeper` (
|
||||||
|
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||||
|
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '物理集群ID',
|
||||||
|
`host` varchar(128) NOT NULL DEFAULT '' COMMENT 'zookeeper主机名',
|
||||||
|
`port` int(16) NOT NULL DEFAULT '-1' COMMENT 'zookeeper端口',
|
||||||
|
`role` varchar(16) NOT NULL DEFAULT '' COMMENT '角色, leader follower observer',
|
||||||
|
`version` varchar(128) NOT NULL DEFAULT '' COMMENT 'zookeeper版本',
|
||||||
|
`status` int(16) NOT NULL DEFAULT '0' COMMENT '状态: 1存活,0未存活,11存活但是4字命令使用不了',
|
||||||
|
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||||
|
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||||
|
PRIMARY KEY (`id`),
|
||||||
|
UNIQUE KEY `uniq_cluster_phy_id_host_port` (`cluster_phy_id`,`host`, `port`)
|
||||||
|
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Zookeeper信息表';
|
||||||
|
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS `ks_km_group`;
|
||||||
|
CREATE TABLE `ks_km_group` (
|
||||||
|
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||||
|
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
|
||||||
|
`name` varchar(192) COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'Group名称',
|
||||||
|
`member_count` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '成员数',
|
||||||
|
`topic_members` text CHARACTER SET utf8 COMMENT 'group消费的topic列表',
|
||||||
|
`partition_assignor` varchar(255) CHARACTER SET utf8 NOT NULL COMMENT '分配策略',
|
||||||
|
`coordinator_id` int(11) NOT NULL COMMENT 'group协调器brokerId',
|
||||||
|
`type` int(11) NOT NULL COMMENT 'group类型 0:consumer 1:connector',
|
||||||
|
`state` varchar(64) CHARACTER SET utf8 NOT NULL DEFAULT '' COMMENT '状态',
|
||||||
|
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||||
|
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||||
|
PRIMARY KEY (`id`),
|
||||||
|
UNIQUE KEY `uniq_cluster_phy_id_name` (`cluster_phy_id`,`name`)
|
||||||
|
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Group信息表';
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
### 6.2.2、升级至 `v3.0.0` 版本
|
||||||
|
|
||||||
|
**SQL 变更**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ALTER TABLE `ks_km_physical_cluster`
|
||||||
|
ADD COLUMN `zk_properties` TEXT NULL COMMENT 'ZK配置' AFTER `jmx_properties`;
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
### 6.2.3、升级至 `v3.0.0-beta.2`版本
|
||||||
|
|
||||||
|
**配置变更**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
|
||||||
|
# 新增配置
|
||||||
|
spring:
|
||||||
|
logi-security: # know-streaming 依赖的 logi-security 模块的数据库的配置,默认与 know-streaming 的数据库配置保持一致即可
|
||||||
|
login-extend-bean-name: logiSecurityDefaultLoginExtendImpl # 使用的登录系统Service的Bean名称,无需修改
|
||||||
|
|
||||||
|
# 线程池大小相关配置,在task模块中,新增了三类线程池,
|
||||||
|
# 从而减少不同类型任务之间的相互影响,以及减少对logi-job内的线程池的影响
|
||||||
|
thread-pool:
|
||||||
|
task: # 任务模块的配置
|
||||||
|
metrics: # metrics采集任务配置
|
||||||
|
thread-num: 18 # metrics采集任务线程池核心线程数
|
||||||
|
queue-size: 180 # metrics采集任务线程池队列大小
|
||||||
|
metadata: # metadata同步任务配置
|
||||||
|
thread-num: 27 # metadata同步任务线程池核心线程数
|
||||||
|
queue-size: 270 # metadata同步任务线程池队列大小
|
||||||
|
common: # 剩余其他任务配置
|
||||||
|
thread-num: 15 # 剩余其他任务线程池核心线程数
|
||||||
|
queue-size: 150 # 剩余其他任务线程池队列大小
|
||||||
|
|
||||||
|
# 删除配置,下列配置将不再使用
|
||||||
|
thread-pool:
|
||||||
|
task: # 任务模块的配置
|
||||||
|
heaven: # 采集任务配置
|
||||||
|
thread-num: 20 # 采集任务线程池核心线程数
|
||||||
|
queue-size: 1000 # 采集任务线程池队列大小
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
**SQL 变更**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- 多集群管理权限2022-09-06新增
|
||||||
|
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2000', '多集群管理查看', '1593', '1', '2', '多集群管理查看', '0', 'know-streaming');
|
||||||
|
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2002', 'Topic-迁移副本', '1593', '1', '2', 'Topic-迁移副本', '0', 'know-streaming');
|
||||||
|
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2004', 'Topic-扩缩副本', '1593', '1', '2', 'Topic-扩缩副本', '0', 'know-streaming');
|
||||||
|
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2006', 'Cluster-LoadReBalance-周期均衡', '1593', '1', '2', 'Cluster-LoadReBalance-周期均衡', '0', 'know-streaming');
|
||||||
|
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2008', 'Cluster-LoadReBalance-立即均衡', '1593', '1', '2', 'Cluster-LoadReBalance-立即均衡', '0', 'know-streaming');
|
||||||
|
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2010', 'Cluster-LoadReBalance-设置集群规格', '1593', '1', '2', 'Cluster-LoadReBalance-设置集群规格', '0', 'know-streaming');
|
||||||
|
|
||||||
|
|
||||||
|
-- 系统管理权限2022-09-06新增
|
||||||
|
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('3000', '系统管理查看', '1595', '1', '2', '系统管理查看', '0', 'know-streaming');
|
||||||
|
|
||||||
|
|
||||||
|
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2000', '0', 'know-streaming');
|
||||||
|
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2002', '0', 'know-streaming');
|
||||||
|
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2004', '0', 'know-streaming');
|
||||||
|
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2006', '0', 'know-streaming');
|
||||||
|
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2008', '0', 'know-streaming');
|
||||||
|
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2010', '0', 'know-streaming');
|
||||||
|
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '3000', '0', 'know-streaming');
|
||||||
|
|
||||||
|
-- 修改字段长度
|
||||||
|
ALTER TABLE `logi_security_oplog`
|
||||||
|
CHANGE COLUMN `operator_ip` `operator_ip` VARCHAR(64) NOT NULL COMMENT '操作者ip' ,
|
||||||
|
CHANGE COLUMN `operator` `operator` VARCHAR(64) NULL DEFAULT NULL COMMENT '操作者账号' ,
|
||||||
|
CHANGE COLUMN `operate_page` `operate_page` VARCHAR(64) NOT NULL DEFAULT '' COMMENT '操作页面' ,
|
||||||
|
CHANGE COLUMN `operate_type` `operate_type` VARCHAR(64) NOT NULL COMMENT '操作类型' ,
|
||||||
|
CHANGE COLUMN `target_type` `target_type` VARCHAR(64) NOT NULL COMMENT '对象分类' ,
|
||||||
|
CHANGE COLUMN `target` `target` VARCHAR(1024) NOT NULL COMMENT '操作对象' ,
|
||||||
|
CHANGE COLUMN `operation_methods` `operation_methods` VARCHAR(64) NOT NULL DEFAULT '' COMMENT '操作方式' ;
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 6.2.4、升级至 `v3.0.0-beta.1`版本
|
||||||
|
|
||||||
|
**SQL 变更**
|
||||||
|
|
||||||
|
1、在`ks_km_broker`表增加了一个监听信息字段。
|
||||||
|
2、为`logi_security_oplog`表 operation_methods 字段设置默认值''。
|
||||||
|
因此需要执行下面的 sql 对数据库表进行更新。
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ALTER TABLE `ks_km_broker`
|
||||||
|
ADD COLUMN `endpoint_map` VARCHAR(1024) NOT NULL DEFAULT '' COMMENT '监听信息' AFTER `update_time`;
|
||||||
|
|
||||||
|
ALTER TABLE `logi_security_oplog`
|
||||||
|
ALTER COLUMN `operation_methods` set default '';
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 6.2.5、`2.x`版本 升级至 `v3.0.0-beta.0`版本
|
||||||
|
|
||||||
**升级步骤:**
|
**升级步骤:**
|
||||||
|
|
||||||
@@ -24,14 +271,14 @@
|
|||||||
UPDATE ks_km_topic
|
UPDATE ks_km_topic
|
||||||
INNER JOIN
|
INNER JOIN
|
||||||
(SELECT
|
(SELECT
|
||||||
topic.cluster_id AS cluster_id,
|
topic.cluster_id AS cluster_id,
|
||||||
topic.topic_name AS topic_name,
|
topic.topic_name AS topic_name,
|
||||||
topic.description AS description
|
topic.description AS description
|
||||||
FROM topic WHERE description != ''
|
FROM topic WHERE description != ''
|
||||||
) AS t
|
) AS t
|
||||||
|
|
||||||
ON ks_km_topic.cluster_phy_id = t.cluster_id
|
ON ks_km_topic.cluster_phy_id = t.cluster_id
|
||||||
AND ks_km_topic.topic_name = t.topic_name
|
AND ks_km_topic.topic_name = t.topic_name
|
||||||
AND ks_km_topic.id > 0
|
AND ks_km_topic.id > 0
|
||||||
SET ks_km_topic.description = t.description;
|
SET ks_km_topic.description = t.description;
|
||||||
```
|
```
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
|
|
||||||
# FAQ
|
# FAQ
|
||||||
|
|
||||||
## 8.1、支持哪些 Kafka 版本?
|
## 8.1、支持哪些 Kafka 版本?
|
||||||
@@ -38,7 +37,7 @@
|
|||||||
|
|
||||||
## 8.4、`Jmx`连接失败如何解决?
|
## 8.4、`Jmx`连接失败如何解决?
|
||||||
|
|
||||||
- 参看 [Jmx 连接配置&问题解决](./9-attachment#jmx-连接失败问题解决) 说明。
|
- 参看 [Jmx 连接配置&问题解决](https://doc.knowstreaming.com/product/9-attachment#91jmx-%E8%BF%9E%E6%8E%A5%E5%A4%B1%E8%B4%A5%E9%97%AE%E9%A2%98%E8%A7%A3%E5%86%B3) 说明。
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -109,3 +108,77 @@ SECURITY.TRICK_USERS
|
|||||||
设置完成上面两步之后,就可以直接调用需要登录的接口了。
|
设置完成上面两步之后,就可以直接调用需要登录的接口了。
|
||||||
|
|
||||||
但是还有一点需要注意,绕过的用户仅能调用他有权限的接口,比如一个普通用户,那么他就只能调用普通的接口,不能去调用运维人员的接口。
|
但是还有一点需要注意,绕过的用户仅能调用他有权限的接口,比如一个普通用户,那么他就只能调用普通的接口,不能去调用运维人员的接口。
|
||||||
|
|
||||||
|
## 8.8、Specified key was too long; max key length is 767 bytes
|
||||||
|
|
||||||
|
**原因:** 不同版本的 InoDB 引擎,参数‘innodb_large_prefix’默认值不同,即在 5.6 默认值为 OFF,5.7 默认值为 ON。
|
||||||
|
|
||||||
|
对于引擎为 InnoDB,innodb_large_prefix=OFF,且行格式为 Antelope 即支持 REDUNDANT 或 COMPACT 时,索引键前缀长度最大为 767 字节。innodb_large_prefix=ON,且行格式为 Barracuda 即支持 DYNAMIC 或 COMPRESSED 时,索引键前缀长度最大为 3072 字节。
|
||||||
|
|
||||||
|
**解决方案:**
|
||||||
|
|
||||||
|
- 减少 varchar 字符大小低于 767/4=191。
|
||||||
|
- 将字符集改为 latin1(一个字符=一个字节)。
|
||||||
|
- 开启‘innodb_large_prefix’,修改默认行格式‘innodb_file_format’为 Barracuda,并设置 row_format=dynamic。
|
||||||
|
|
||||||
|
## 8.9、出现 ESIndexNotFoundEXception 报错
|
||||||
|
|
||||||
|
**原因 :**没有创建 ES 索引模版
|
||||||
|
|
||||||
|
**解决方案:**执行 init_es_template.sh 脚本,创建 ES 索引模版即可。
|
||||||
|
|
||||||
|
## 8.10、km-console 打包构建失败
|
||||||
|
|
||||||
|
首先,**请确保您正在使用最新版本**,版本列表见 [Tags](https://github.com/didi/KnowStreaming/tags)。如果不是最新版本,请升级后再尝试有无问题。
|
||||||
|
|
||||||
|
常见的原因是由于工程依赖没有正常安装,导致在打包过程中缺少依赖,造成打包失败。您可以检查是否有以下文件夹,且文件夹内是否有内容
|
||||||
|
|
||||||
|
```
|
||||||
|
KnowStreaming/km-console/node_modules
|
||||||
|
KnowStreaming/km-console/packages/layout-clusters-fe/node_modules
|
||||||
|
KnowStreaming/km-console/packages/config-manager-fe/node_modules
|
||||||
|
```
|
||||||
|
|
||||||
|
如果发现没有对应的 `node_modules` 目录或着目录内容为空,说明依赖没有安装成功。请按以下步骤操作,
|
||||||
|
|
||||||
|
1. 手动删除上述三个文件夹(如果有)
|
||||||
|
|
||||||
|
2. 如果之前是通过 `mvn install` 打包 `km-console`,请到项目根目录(KnowStreaming)下重新输入该指令进行打包。观察打包过程有无报错。如有报错,请见步骤 4。
|
||||||
|
|
||||||
|
3. 如果是通过本地独立构建前端工程的方式(指直接执行 `npm run build`),请进入 `KnowStreaming/km-console` 目录,执行下述步骤(注意:执行时请确保您在使用 `node v12` 版本)
|
||||||
|
|
||||||
|
a. 执行 `npm run i`。如有报错,请见步骤 4。
|
||||||
|
|
||||||
|
b. 执行 `npm run build`。如有报错,请见步骤 4。
|
||||||
|
|
||||||
|
4. 麻烦联系我们协助解决。推荐提供以下信息,方面我们快速定位问题,示例如下。
|
||||||
|
|
||||||
|
```
|
||||||
|
操作系统: Mac
|
||||||
|
命令行终端:bash
|
||||||
|
Node 版本: v12.22.12
|
||||||
|
复现步骤: 1. -> 2.
|
||||||
|
错误截图:
|
||||||
|
```
|
||||||
|
|
||||||
|
## 8.11、在 `km-console` 目录下执行 `npm run start` 时看不到应用构建和热加载过程?如何启动单个应用?
|
||||||
|
|
||||||
|
需要到具体的应用中执行 `npm run start`,例如 `cd packages/layout-clusters-fe` 后,执行 `npm run start`。
|
||||||
|
|
||||||
|
应用启动后需要到基座应用中查看(需要启动基座应用,即 layout-clusters-fe)。
|
||||||
|
|
||||||
|
|
||||||
|
## 8.12、权限识别失败问题
|
||||||
|
1、使用admin账号登陆KnowStreaming时,点击系统管理-用户管理-角色管理-新增角色,查看页面是否正常。
|
||||||
|
|
||||||
|
<img src="http://img-ys011.didistatic.com/static/dc2img/do1_gwGfjN9N92UxzHU8dfzr" width = "400" >
|
||||||
|
|
||||||
|
2、查看'/logi-security/api/v1/permission/tree'接口返回值,出现如下图所示乱码现象。
|
||||||
|

|
||||||
|
|
||||||
|
3、查看logi_security_permission表,看看是否出现了中文乱码现象。
|
||||||
|
|
||||||
|
根据以上几点,我们可以确定是由于数据库乱码造成的权限识别失败问题。
|
||||||
|
|
||||||
|
+ 原因:由于数据库编码和我们提供的脚本不一致,数据库里的数据发生了乱码,因此出现权限识别失败问题。
|
||||||
|
+ 解决方案:清空数据库数据,将数据库字符集调整为utf8,最后重新执行[dml-logi.sql](https://github.com/didi/KnowStreaming/blob/master/km-dist/init/sql/dml-logi.sql)脚本导入数据即可。
|
||||||
|
|||||||
@@ -11,7 +11,7 @@
|
|||||||
|
|
||||||
下面是用户第一次使用我们产品的典型体验路径:
|
下面是用户第一次使用我们产品的典型体验路径:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## 5.3、常用功能
|
## 5.3、常用功能
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,19 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.biz.cluster;
|
||||||
|
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.ClusterZookeepersOverviewDTO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.zookeeper.ClusterZookeepersOverviewVO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.zookeeper.ClusterZookeepersStateVO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.zookeeper.ZnodeVO;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 多集群总体状态
|
||||||
|
*/
|
||||||
|
public interface ClusterZookeepersManager {
|
||||||
|
Result<ClusterZookeepersStateVO> getClusterPhyZookeepersState(Long clusterPhyId);
|
||||||
|
|
||||||
|
PaginationResult<ClusterZookeepersOverviewVO> getClusterPhyZookeepersOverview(Long clusterPhyId, ClusterZookeepersOverviewDTO dto);
|
||||||
|
|
||||||
|
Result<ZnodeVO> getZnodeVO(Long clusterPhyId, String path);
|
||||||
|
}
|
||||||
@@ -14,6 +14,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
|
|||||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.res.ClusterBrokersOverviewVO;
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.res.ClusterBrokersOverviewVO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.res.ClusterBrokersStateVO;
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.res.ClusterBrokersStateVO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.kafkacontroller.KafkaControllerVO;
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.kafkacontroller.KafkaControllerVO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
|
||||||
import com.xiaojukeji.know.streaming.km.common.enums.SortTypeEnum;
|
import com.xiaojukeji.know.streaming.km.common.enums.SortTypeEnum;
|
||||||
import com.xiaojukeji.know.streaming.km.common.utils.PaginationMetricsUtil;
|
import com.xiaojukeji.know.streaming.km.common.utils.PaginationMetricsUtil;
|
||||||
import com.xiaojukeji.know.streaming.km.common.utils.PaginationUtil;
|
import com.xiaojukeji.know.streaming.km.common.utils.PaginationUtil;
|
||||||
@@ -23,6 +24,7 @@ import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerMetricService;
|
|||||||
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
|
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
|
||||||
import com.xiaojukeji.know.streaming.km.core.service.kafkacontroller.KafkaControllerService;
|
import com.xiaojukeji.know.streaming.km.core.service.kafkacontroller.KafkaControllerService;
|
||||||
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
|
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
|
||||||
|
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaJMXClient;
|
||||||
import org.springframework.beans.factory.annotation.Autowired;
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
import org.springframework.stereotype.Service;
|
import org.springframework.stereotype.Service;
|
||||||
|
|
||||||
@@ -50,6 +52,9 @@ public class ClusterBrokersManagerImpl implements ClusterBrokersManager {
|
|||||||
@Autowired
|
@Autowired
|
||||||
private KafkaControllerService kafkaControllerService;
|
private KafkaControllerService kafkaControllerService;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private KafkaJMXClient kafkaJMXClient;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public PaginationResult<ClusterBrokersOverviewVO> getClusterPhyBrokersOverview(Long clusterPhyId, ClusterBrokersOverviewDTO dto) {
|
public PaginationResult<ClusterBrokersOverviewVO> getClusterPhyBrokersOverview(Long clusterPhyId, ClusterBrokersOverviewDTO dto) {
|
||||||
// 获取集群Broker列表
|
// 获取集群Broker列表
|
||||||
@@ -71,6 +76,13 @@ public class ClusterBrokersManagerImpl implements ClusterBrokersManager {
|
|||||||
Topic groupTopic = topicService.getTopic(clusterPhyId, org.apache.kafka.common.internals.Topic.GROUP_METADATA_TOPIC_NAME);
|
Topic groupTopic = topicService.getTopic(clusterPhyId, org.apache.kafka.common.internals.Topic.GROUP_METADATA_TOPIC_NAME);
|
||||||
Topic transactionTopic = topicService.getTopic(clusterPhyId, org.apache.kafka.common.internals.Topic.TRANSACTION_STATE_TOPIC_NAME);
|
Topic transactionTopic = topicService.getTopic(clusterPhyId, org.apache.kafka.common.internals.Topic.TRANSACTION_STATE_TOPIC_NAME);
|
||||||
|
|
||||||
|
//获取controller信息
|
||||||
|
KafkaController kafkaController = kafkaControllerService.getKafkaControllerFromDB(clusterPhyId);
|
||||||
|
|
||||||
|
//获取jmx状态信息
|
||||||
|
Map<Integer, Boolean> jmxConnectedMap = new HashMap<>();
|
||||||
|
brokerList.forEach(elem -> jmxConnectedMap.put(elem.getBrokerId(), kafkaJMXClient.getClientWithCheck(clusterPhyId, elem.getBrokerId()) != null));
|
||||||
|
|
||||||
// 格式转换
|
// 格式转换
|
||||||
return PaginationResult.buildSuc(
|
return PaginationResult.buildSuc(
|
||||||
this.convert2ClusterBrokersOverviewVOList(
|
this.convert2ClusterBrokersOverviewVOList(
|
||||||
@@ -78,7 +90,9 @@ public class ClusterBrokersManagerImpl implements ClusterBrokersManager {
|
|||||||
brokerList,
|
brokerList,
|
||||||
metricsResult.getData(),
|
metricsResult.getData(),
|
||||||
groupTopic,
|
groupTopic,
|
||||||
transactionTopic
|
transactionTopic,
|
||||||
|
kafkaController,
|
||||||
|
jmxConnectedMap
|
||||||
),
|
),
|
||||||
paginationResult
|
paginationResult
|
||||||
);
|
);
|
||||||
@@ -159,22 +173,25 @@ public class ClusterBrokersManagerImpl implements ClusterBrokersManager {
|
|||||||
List<Broker> brokerList,
|
List<Broker> brokerList,
|
||||||
List<BrokerMetrics> metricsList,
|
List<BrokerMetrics> metricsList,
|
||||||
Topic groupTopic,
|
Topic groupTopic,
|
||||||
Topic transactionTopic) {
|
Topic transactionTopic,
|
||||||
Map<Integer, BrokerMetrics> metricsMap = metricsList == null? new HashMap<>(): metricsList.stream().collect(Collectors.toMap(BrokerMetrics::getBrokerId, Function.identity()));
|
KafkaController kafkaController,
|
||||||
|
Map<Integer, Boolean> jmxConnectedMap) {
|
||||||
|
Map<Integer, BrokerMetrics> metricsMap = metricsList == null ? new HashMap<>() : metricsList.stream().collect(Collectors.toMap(BrokerMetrics::getBrokerId, Function.identity()));
|
||||||
|
|
||||||
Map<Integer, Broker> brokerMap = brokerList == null? new HashMap<>(): brokerList.stream().collect(Collectors.toMap(Broker::getBrokerId, Function.identity()));
|
Map<Integer, Broker> brokerMap = brokerList == null ? new HashMap<>() : brokerList.stream().collect(Collectors.toMap(Broker::getBrokerId, Function.identity()));
|
||||||
|
|
||||||
List<ClusterBrokersOverviewVO> voList = new ArrayList<>(pagedBrokerIdList.size());
|
List<ClusterBrokersOverviewVO> voList = new ArrayList<>(pagedBrokerIdList.size());
|
||||||
for (Integer brokerId : pagedBrokerIdList) {
|
for (Integer brokerId : pagedBrokerIdList) {
|
||||||
Broker broker = brokerMap.get(brokerId);
|
Broker broker = brokerMap.get(brokerId);
|
||||||
BrokerMetrics brokerMetrics = metricsMap.get(brokerId);
|
BrokerMetrics brokerMetrics = metricsMap.get(brokerId);
|
||||||
|
Boolean jmxConnected = jmxConnectedMap.get(brokerId);
|
||||||
|
|
||||||
voList.add(this.convert2ClusterBrokersOverviewVO(brokerId, broker, brokerMetrics, groupTopic, transactionTopic));
|
voList.add(this.convert2ClusterBrokersOverviewVO(brokerId, broker, brokerMetrics, groupTopic, transactionTopic, kafkaController, jmxConnected));
|
||||||
}
|
}
|
||||||
return voList;
|
return voList;
|
||||||
}
|
}
|
||||||
|
|
||||||
private ClusterBrokersOverviewVO convert2ClusterBrokersOverviewVO(Integer brokerId, Broker broker, BrokerMetrics brokerMetrics, Topic groupTopic, Topic transactionTopic) {
|
private ClusterBrokersOverviewVO convert2ClusterBrokersOverviewVO(Integer brokerId, Broker broker, BrokerMetrics brokerMetrics, Topic groupTopic, Topic transactionTopic, KafkaController kafkaController, Boolean jmxConnected) {
|
||||||
ClusterBrokersOverviewVO clusterBrokersOverviewVO = new ClusterBrokersOverviewVO();
|
ClusterBrokersOverviewVO clusterBrokersOverviewVO = new ClusterBrokersOverviewVO();
|
||||||
clusterBrokersOverviewVO.setBrokerId(brokerId);
|
clusterBrokersOverviewVO.setBrokerId(brokerId);
|
||||||
if (broker != null) {
|
if (broker != null) {
|
||||||
@@ -192,8 +209,12 @@ public class ClusterBrokersManagerImpl implements ClusterBrokersManager {
|
|||||||
if (transactionTopic != null && transactionTopic.getBrokerIdSet().contains(brokerId)) {
|
if (transactionTopic != null && transactionTopic.getBrokerIdSet().contains(brokerId)) {
|
||||||
clusterBrokersOverviewVO.getKafkaRoleList().add(transactionTopic.getTopicName());
|
clusterBrokersOverviewVO.getKafkaRoleList().add(transactionTopic.getTopicName());
|
||||||
}
|
}
|
||||||
|
if (kafkaController != null && kafkaController.getBrokerId().equals(brokerId)) {
|
||||||
|
clusterBrokersOverviewVO.getKafkaRoleList().add(KafkaConstant.CONTROLLER_ROLE);
|
||||||
|
}
|
||||||
|
|
||||||
clusterBrokersOverviewVO.setLatestMetrics(brokerMetrics);
|
clusterBrokersOverviewVO.setLatestMetrics(brokerMetrics);
|
||||||
|
clusterBrokersOverviewVO.setJmxConnected(jmxConnected);
|
||||||
return clusterBrokersOverviewVO;
|
return clusterBrokersOverviewVO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,137 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.biz.cluster.impl;
|
||||||
|
|
||||||
|
import com.didiglobal.logi.log.ILog;
|
||||||
|
import com.didiglobal.logi.log.LogFactory;
|
||||||
|
import com.xiaojukeji.know.streaming.km.biz.cluster.ClusterZookeepersManager;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.ClusterZookeepersOverviewDTO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.ZKConfig;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ZookeeperMetrics;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.metric.ZookeeperMetricParam;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.Znode;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.ZookeeperInfo;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.zookeeper.ClusterZookeepersOverviewVO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.zookeeper.ClusterZookeepersStateVO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.zookeeper.ZnodeVO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.enums.zookeeper.ZKRoleEnum;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.PaginationUtil;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
|
||||||
|
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
|
||||||
|
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.ZookeeperMetricVersionItems;
|
||||||
|
import com.xiaojukeji.know.streaming.km.core.service.zookeeper.ZnodeService;
|
||||||
|
import com.xiaojukeji.know.streaming.km.core.service.zookeeper.ZookeeperMetricService;
|
||||||
|
import com.xiaojukeji.know.streaming.km.core.service.zookeeper.ZookeeperService;
|
||||||
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
import org.springframework.stereotype.Service;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
|
||||||
|
@Service
|
||||||
|
public class ClusterZookeepersManagerImpl implements ClusterZookeepersManager {
|
||||||
|
private static final ILog LOGGER = LogFactory.getLog(ClusterZookeepersManagerImpl.class);
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private ClusterPhyService clusterPhyService;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private ZookeeperService zookeeperService;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private ZookeeperMetricService zookeeperMetricService;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private ZnodeService znodeService;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Result<ClusterZookeepersStateVO> getClusterPhyZookeepersState(Long clusterPhyId) {
|
||||||
|
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(clusterPhyId);
|
||||||
|
if (clusterPhy == null) {
|
||||||
|
return Result.buildFromRSAndMsg(ResultStatus.CLUSTER_NOT_EXIST, MsgConstant.getClusterPhyNotExist(clusterPhyId));
|
||||||
|
}
|
||||||
|
|
||||||
|
// // TODO
|
||||||
|
// private Integer healthState;
|
||||||
|
// private Integer healthCheckPassed;
|
||||||
|
// private Integer healthCheckTotal;
|
||||||
|
|
||||||
|
List<ZookeeperInfo> infoList = zookeeperService.listFromDBByCluster(clusterPhyId);
|
||||||
|
|
||||||
|
ClusterZookeepersStateVO vo = new ClusterZookeepersStateVO();
|
||||||
|
vo.setTotalServerCount(infoList.size());
|
||||||
|
vo.setAliveFollowerCount(0);
|
||||||
|
vo.setTotalFollowerCount(0);
|
||||||
|
vo.setAliveObserverCount(0);
|
||||||
|
vo.setTotalObserverCount(0);
|
||||||
|
vo.setAliveServerCount(0);
|
||||||
|
for (ZookeeperInfo info: infoList) {
|
||||||
|
if (info.getRole().equals(ZKRoleEnum.LEADER.getRole())) {
|
||||||
|
vo.setLeaderNode(info.getHost());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (info.getRole().equals(ZKRoleEnum.FOLLOWER.getRole())) {
|
||||||
|
vo.setTotalFollowerCount(vo.getTotalFollowerCount() + 1);
|
||||||
|
vo.setAliveFollowerCount(info.alive()? vo.getAliveFollowerCount() + 1: vo.getAliveFollowerCount());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (info.getRole().equals(ZKRoleEnum.OBSERVER.getRole())) {
|
||||||
|
vo.setTotalObserverCount(vo.getTotalObserverCount() + 1);
|
||||||
|
vo.setAliveObserverCount(info.alive()? vo.getAliveObserverCount() + 1: vo.getAliveObserverCount());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (info.alive()) {
|
||||||
|
vo.setAliveServerCount(vo.getAliveServerCount() + 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Result<ZookeeperMetrics> metricsResult = zookeeperMetricService.collectMetricsFromZookeeper(new ZookeeperMetricParam(
|
||||||
|
clusterPhyId,
|
||||||
|
infoList.stream().filter(elem -> elem.alive()).map(item -> new Tuple<String, Integer>(item.getHost(), item.getPort())).collect(Collectors.toList()),
|
||||||
|
ConvertUtil.str2ObjByJson(clusterPhy.getZkProperties(), ZKConfig.class),
|
||||||
|
ZookeeperMetricVersionItems.ZOOKEEPER_METRIC_WATCH_COUNT
|
||||||
|
));
|
||||||
|
if (metricsResult.failed()) {
|
||||||
|
LOGGER.error(
|
||||||
|
"class=ClusterZookeepersManagerImpl||method=getClusterPhyZookeepersState||clusterPhyId={}||errMsg={}",
|
||||||
|
clusterPhyId, metricsResult.getMessage()
|
||||||
|
);
|
||||||
|
return Result.buildSuc(vo);
|
||||||
|
}
|
||||||
|
Float watchCount = metricsResult.getData().getMetric(ZookeeperMetricVersionItems.ZOOKEEPER_METRIC_WATCH_COUNT);
|
||||||
|
vo.setWatchCount(watchCount != null? watchCount.intValue(): null);
|
||||||
|
|
||||||
|
return Result.buildSuc(vo);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public PaginationResult<ClusterZookeepersOverviewVO> getClusterPhyZookeepersOverview(Long clusterPhyId, ClusterZookeepersOverviewDTO dto) {
|
||||||
|
//获取集群zookeeper列表
|
||||||
|
List<ClusterZookeepersOverviewVO> clusterZookeepersOverviewVOList = ConvertUtil.list2List(zookeeperService.listFromDBByCluster(clusterPhyId), ClusterZookeepersOverviewVO.class);
|
||||||
|
|
||||||
|
//搜索
|
||||||
|
clusterZookeepersOverviewVOList = PaginationUtil.pageByFuzzyFilter(clusterZookeepersOverviewVOList, dto.getSearchKeywords(), Arrays.asList("host"));
|
||||||
|
|
||||||
|
//分页
|
||||||
|
PaginationResult<ClusterZookeepersOverviewVO> paginationResult = PaginationUtil.pageBySubData(clusterZookeepersOverviewVOList, dto);
|
||||||
|
|
||||||
|
return paginationResult;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Result<ZnodeVO> getZnodeVO(Long clusterPhyId, String path) {
|
||||||
|
Result<Znode> result = znodeService.getZnode(clusterPhyId, path);
|
||||||
|
if (result.failed()) {
|
||||||
|
return Result.buildFromIgnoreData(result);
|
||||||
|
}
|
||||||
|
return Result.buildSuc(ConvertUtil.obj2ObjByJSON(result.getData(), ZnodeVO.class));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**************************************************** private method ****************************************************/
|
||||||
|
|
||||||
|
}
|
||||||
@@ -1,11 +1,14 @@
|
|||||||
package com.xiaojukeji.know.streaming.km.biz.group;
|
package com.xiaojukeji.know.streaming.km.biz.group;
|
||||||
|
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.ClusterGroupSummaryDTO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.group.GroupOffsetResetDTO;
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.group.GroupOffsetResetDTO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationSortDTO;
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationSortDTO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.TopicPartitionKS;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.TopicPartitionKS;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupMemberPO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.group.GroupOverviewVO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.group.GroupTopicConsumedDetailVO;
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.group.GroupTopicConsumedDetailVO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.group.GroupTopicOverviewVO;
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.group.GroupTopicOverviewVO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
|
import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
|
||||||
@@ -22,6 +25,10 @@ public interface GroupManager {
|
|||||||
String searchGroupKeyword,
|
String searchGroupKeyword,
|
||||||
PaginationBaseDTO dto);
|
PaginationBaseDTO dto);
|
||||||
|
|
||||||
|
PaginationResult<GroupTopicOverviewVO> pagingGroupTopicMembers(Long clusterPhyId, String groupName, PaginationBaseDTO dto);
|
||||||
|
|
||||||
|
PaginationResult<GroupOverviewVO> pagingClusterGroupsOverview(Long clusterPhyId, ClusterGroupSummaryDTO dto);
|
||||||
|
|
||||||
PaginationResult<GroupTopicConsumedDetailVO> pagingGroupTopicConsumedMetrics(Long clusterPhyId,
|
PaginationResult<GroupTopicConsumedDetailVO> pagingGroupTopicConsumedMetrics(Long clusterPhyId,
|
||||||
String topicName,
|
String topicName,
|
||||||
String groupName,
|
String groupName,
|
||||||
@@ -31,4 +38,6 @@ public interface GroupManager {
|
|||||||
Result<Set<TopicPartitionKS>> listClusterPhyGroupPartitions(Long clusterPhyId, String groupName, Long startTime, Long endTime);
|
Result<Set<TopicPartitionKS>> listClusterPhyGroupPartitions(Long clusterPhyId, String groupName, Long startTime, Long endTime);
|
||||||
|
|
||||||
Result<Void> resetGroupOffsets(GroupOffsetResetDTO dto, String operator) throws Exception;
|
Result<Void> resetGroupOffsets(GroupOffsetResetDTO dto, String operator) throws Exception;
|
||||||
|
|
||||||
|
List<GroupTopicOverviewVO> getGroupTopicOverviewVOList (Long clusterPhyId, List<GroupMemberPO> groupMemberPOList);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,11 +3,14 @@ package com.xiaojukeji.know.streaming.km.biz.group.impl;
|
|||||||
import com.didiglobal.logi.log.ILog;
|
import com.didiglobal.logi.log.ILog;
|
||||||
import com.didiglobal.logi.log.LogFactory;
|
import com.didiglobal.logi.log.LogFactory;
|
||||||
import com.xiaojukeji.know.streaming.km.biz.group.GroupManager;
|
import com.xiaojukeji.know.streaming.km.biz.group.GroupManager;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.ClusterGroupSummaryDTO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.group.GroupOffsetResetDTO;
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.group.GroupOffsetResetDTO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationSortDTO;
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationSortDTO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.partition.PartitionOffsetDTO;
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.partition.PartitionOffsetDTO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.group.Group;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.group.GroupTopic;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.group.GroupTopic;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.group.GroupTopicMember;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.GroupMetrics;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.GroupMetrics;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||||
@@ -15,11 +18,16 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
|
|||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.TopicPartitionKS;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.TopicPartitionKS;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupMemberPO;
|
import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupMemberPO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.group.GroupOverviewVO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.group.GroupTopicConsumedDetailVO;
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.group.GroupTopicConsumedDetailVO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.group.GroupTopicOverviewVO;
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.group.GroupTopicOverviewVO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
|
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.constant.PaginationConstant;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.converter.GroupConverter;
|
||||||
import com.xiaojukeji.know.streaming.km.common.enums.AggTypeEnum;
|
import com.xiaojukeji.know.streaming.km.common.enums.AggTypeEnum;
|
||||||
import com.xiaojukeji.know.streaming.km.common.enums.GroupOffsetResetEnum;
|
import com.xiaojukeji.know.streaming.km.common.enums.OffsetTypeEnum;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.enums.SortTypeEnum;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupStateEnum;
|
||||||
import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
|
import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
|
||||||
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
|
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
|
||||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||||
@@ -70,30 +78,60 @@ public class GroupManagerImpl implements GroupManager {
|
|||||||
String searchGroupKeyword,
|
String searchGroupKeyword,
|
||||||
PaginationBaseDTO dto) {
|
PaginationBaseDTO dto) {
|
||||||
PaginationResult<GroupMemberPO> paginationResult = groupService.pagingGroupMembers(clusterPhyId, topicName, groupName, searchTopicKeyword, searchGroupKeyword, dto);
|
PaginationResult<GroupMemberPO> paginationResult = groupService.pagingGroupMembers(clusterPhyId, topicName, groupName, searchTopicKeyword, searchGroupKeyword, dto);
|
||||||
if (paginationResult.failed()) {
|
|
||||||
return PaginationResult.buildFailure(paginationResult, dto);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!paginationResult.hasData()) {
|
if (!paginationResult.hasData()) {
|
||||||
|
return PaginationResult.buildSuc(new ArrayList<>(), paginationResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
List<GroupTopicOverviewVO> groupTopicVOList = this.getGroupTopicOverviewVOList(clusterPhyId, paginationResult.getData().getBizData());
|
||||||
|
|
||||||
|
return PaginationResult.buildSuc(groupTopicVOList, paginationResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public PaginationResult<GroupTopicOverviewVO> pagingGroupTopicMembers(Long clusterPhyId, String groupName, PaginationBaseDTO dto) {
|
||||||
|
Group group = groupService.getGroupFromDB(clusterPhyId, groupName);
|
||||||
|
|
||||||
|
//没有topicMember则直接返回
|
||||||
|
if (group == null || ValidateUtils.isEmptyList(group.getTopicMembers())) {
|
||||||
return PaginationResult.buildSuc(dto);
|
return PaginationResult.buildSuc(dto);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 获取指标
|
//排序
|
||||||
Result<List<GroupMetrics>> metricsListResult = groupMetricService.listLatestMetricsAggByGroupTopicFromES(
|
List<GroupTopicMember> groupTopicMembers = PaginationUtil.pageBySort(group.getTopicMembers(), PaginationConstant.DEFAULT_GROUP_TOPIC_SORTED_FIELD, SortTypeEnum.DESC.getSortType());
|
||||||
clusterPhyId,
|
|
||||||
paginationResult.getData().getBizData().stream().map(elem -> new GroupTopic(elem.getGroupName(), elem.getTopicName())).collect(Collectors.toList()),
|
//分页
|
||||||
Arrays.asList(GroupMetricVersionItems.GROUP_METRIC_LAG),
|
PaginationResult<GroupTopicMember> paginationResult = PaginationUtil.pageBySubData(groupTopicMembers, dto);
|
||||||
AggTypeEnum.MAX
|
|
||||||
);
|
List<GroupMemberPO> groupMemberPOList = paginationResult.getData().getBizData().stream().map(elem -> new GroupMemberPO(clusterPhyId, elem.getTopicName(), groupName, group.getState().getState(), elem.getMemberCount())).collect(Collectors.toList());
|
||||||
if (metricsListResult.failed()) {
|
|
||||||
// 如果查询失败,则输出错误信息,但是依旧进行已有数据的返回
|
return PaginationResult.buildSuc(this.getGroupTopicOverviewVOList(clusterPhyId, groupMemberPOList), paginationResult);
|
||||||
log.error("method=pagingGroupMembers||clusterPhyId={}||topicName={}||groupName={}||result={}||errMsg=search es failed", clusterPhyId, topicName, groupName, metricsListResult);
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public PaginationResult<GroupOverviewVO> pagingClusterGroupsOverview(Long clusterPhyId, ClusterGroupSummaryDTO dto) {
|
||||||
|
List<Group> groupList = groupService.listClusterGroups(clusterPhyId);
|
||||||
|
|
||||||
|
// 类型转化
|
||||||
|
List<GroupOverviewVO> voList = groupList.stream().map(elem -> GroupConverter.convert2GroupOverviewVO(elem)).collect(Collectors.toList());
|
||||||
|
|
||||||
|
// 搜索groupName
|
||||||
|
voList = PaginationUtil.pageByFuzzyFilter(voList, dto.getSearchGroupName(), Arrays.asList("name"));
|
||||||
|
|
||||||
|
//搜索topic
|
||||||
|
if (!ValidateUtils.isBlank(dto.getSearchTopicName())) {
|
||||||
|
voList = voList.stream().filter(elem -> {
|
||||||
|
for (String topicName : elem.getTopicNameList()) {
|
||||||
|
if (topicName.contains(dto.getSearchTopicName())) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}).collect(Collectors.toList());
|
||||||
}
|
}
|
||||||
|
|
||||||
return PaginationResult.buildSuc(
|
// 分页 后 返回
|
||||||
this.convert2GroupTopicOverviewVOList(paginationResult.getData().getBizData(), metricsListResult.getData()),
|
return PaginationUtil.pageBySubData(voList, dto);
|
||||||
paginationResult
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@@ -103,7 +141,7 @@ public class GroupManagerImpl implements GroupManager {
|
|||||||
List<String> latestMetricNames,
|
List<String> latestMetricNames,
|
||||||
PaginationSortDTO dto) throws NotExistException, AdminOperateException {
|
PaginationSortDTO dto) throws NotExistException, AdminOperateException {
|
||||||
// 获取消费组消费的TopicPartition列表
|
// 获取消费组消费的TopicPartition列表
|
||||||
Map<TopicPartition, Long> consumedOffsetMap = groupService.getGroupOffset(clusterPhyId, groupName);
|
Map<TopicPartition, Long> consumedOffsetMap = groupService.getGroupOffsetFromKafka(clusterPhyId, groupName);
|
||||||
List<Integer> partitionList = consumedOffsetMap.keySet()
|
List<Integer> partitionList = consumedOffsetMap.keySet()
|
||||||
.stream()
|
.stream()
|
||||||
.filter(elem -> elem.topic().equals(topicName))
|
.filter(elem -> elem.topic().equals(topicName))
|
||||||
@@ -112,7 +150,7 @@ public class GroupManagerImpl implements GroupManager {
|
|||||||
Collections.sort(partitionList);
|
Collections.sort(partitionList);
|
||||||
|
|
||||||
// 获取消费组当前运行信息
|
// 获取消费组当前运行信息
|
||||||
ConsumerGroupDescription groupDescription = groupService.getGroupDescription(clusterPhyId, groupName);
|
ConsumerGroupDescription groupDescription = groupService.getGroupDescriptionFromKafka(clusterPhyId, groupName);
|
||||||
|
|
||||||
// 转换存储格式
|
// 转换存储格式
|
||||||
Map<TopicPartition, MemberDescription> tpMemberMap = new HashMap<>();
|
Map<TopicPartition, MemberDescription> tpMemberMap = new HashMap<>();
|
||||||
@@ -165,13 +203,13 @@ public class GroupManagerImpl implements GroupManager {
|
|||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
ConsumerGroupDescription description = groupService.getGroupDescription(dto.getClusterId(), dto.getGroupName());
|
ConsumerGroupDescription description = groupService.getGroupDescriptionFromKafka(dto.getClusterId(), dto.getGroupName());
|
||||||
if (ConsumerGroupState.DEAD.equals(description.state()) && !dto.isCreateIfNotExist()) {
|
if (ConsumerGroupState.DEAD.equals(description.state()) && !dto.isCreateIfNotExist()) {
|
||||||
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, "group不存在, 重置失败");
|
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, "group不存在, 重置失败");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ConsumerGroupState.EMPTY.equals(description.state()) && !ConsumerGroupState.DEAD.equals(description.state())) {
|
if (!ConsumerGroupState.EMPTY.equals(description.state()) && !ConsumerGroupState.DEAD.equals(description.state())) {
|
||||||
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, String.format("group处于%s, 重置失败(仅Empty情况可重置)", description.state().name()));
|
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, String.format("group处于%s, 重置失败(仅Empty情况可重置)", GroupStateEnum.getByRawState(description.state()).getState()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 获取offset
|
// 获取offset
|
||||||
@@ -184,6 +222,22 @@ public class GroupManagerImpl implements GroupManager {
|
|||||||
return groupService.resetGroupOffsets(dto.getClusterId(), dto.getGroupName(), offsetMapResult.getData(), operator);
|
return groupService.resetGroupOffsets(dto.getClusterId(), dto.getGroupName(), offsetMapResult.getData(), operator);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<GroupTopicOverviewVO> getGroupTopicOverviewVOList(Long clusterPhyId, List<GroupMemberPO> groupMemberPOList) {
|
||||||
|
// 获取指标
|
||||||
|
Result<List<GroupMetrics>> metricsListResult = groupMetricService.listLatestMetricsAggByGroupTopicFromES(
|
||||||
|
clusterPhyId,
|
||||||
|
groupMemberPOList.stream().map(elem -> new GroupTopic(elem.getGroupName(), elem.getTopicName())).collect(Collectors.toList()),
|
||||||
|
Arrays.asList(GroupMetricVersionItems.GROUP_METRIC_LAG),
|
||||||
|
AggTypeEnum.MAX
|
||||||
|
);
|
||||||
|
if (metricsListResult.failed()) {
|
||||||
|
// 如果查询失败,则输出错误信息,但是依旧进行已有数据的返回
|
||||||
|
log.error("method=completeMetricData||clusterPhyId={}||result={}||errMsg=search es failed", clusterPhyId, metricsListResult);
|
||||||
|
}
|
||||||
|
return this.convert2GroupTopicOverviewVOList(groupMemberPOList, metricsListResult.getData());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**************************************************** private method ****************************************************/
|
/**************************************************** private method ****************************************************/
|
||||||
|
|
||||||
@@ -198,12 +252,12 @@ public class GroupManagerImpl implements GroupManager {
|
|||||||
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getTopicNotExist(dto.getClusterId(), dto.getTopicName()));
|
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getTopicNotExist(dto.getClusterId(), dto.getTopicName()));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (GroupOffsetResetEnum.PRECISE_OFFSET.getResetType() == dto.getResetType()
|
if (OffsetTypeEnum.PRECISE_OFFSET.getResetType() == dto.getResetType()
|
||||||
&& ValidateUtils.isEmptyList(dto.getOffsetList())) {
|
&& ValidateUtils.isEmptyList(dto.getOffsetList())) {
|
||||||
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "参数错误,指定offset重置需传offset信息");
|
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "参数错误,指定offset重置需传offset信息");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (GroupOffsetResetEnum.PRECISE_TIMESTAMP.getResetType() == dto.getResetType()
|
if (OffsetTypeEnum.PRECISE_TIMESTAMP.getResetType() == dto.getResetType()
|
||||||
&& ValidateUtils.isNull(dto.getTimestamp())) {
|
&& ValidateUtils.isNull(dto.getTimestamp())) {
|
||||||
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "参数错误,指定时间重置需传时间信息");
|
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "参数错误,指定时间重置需传时间信息");
|
||||||
}
|
}
|
||||||
@@ -212,7 +266,7 @@ public class GroupManagerImpl implements GroupManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private Result<Map<TopicPartition, Long>> getPartitionOffset(GroupOffsetResetDTO dto) {
|
private Result<Map<TopicPartition, Long>> getPartitionOffset(GroupOffsetResetDTO dto) {
|
||||||
if (GroupOffsetResetEnum.PRECISE_OFFSET.getResetType() == dto.getResetType()) {
|
if (OffsetTypeEnum.PRECISE_OFFSET.getResetType() == dto.getResetType()) {
|
||||||
return Result.buildSuc(dto.getOffsetList().stream().collect(Collectors.toMap(
|
return Result.buildSuc(dto.getOffsetList().stream().collect(Collectors.toMap(
|
||||||
elem -> new TopicPartition(dto.getTopicName(), elem.getPartitionId()),
|
elem -> new TopicPartition(dto.getTopicName(), elem.getPartitionId()),
|
||||||
PartitionOffsetDTO::getOffset,
|
PartitionOffsetDTO::getOffset,
|
||||||
@@ -221,9 +275,9 @@ public class GroupManagerImpl implements GroupManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
OffsetSpec offsetSpec = null;
|
OffsetSpec offsetSpec = null;
|
||||||
if (GroupOffsetResetEnum.PRECISE_TIMESTAMP.getResetType() == dto.getResetType()) {
|
if (OffsetTypeEnum.PRECISE_TIMESTAMP.getResetType() == dto.getResetType()) {
|
||||||
offsetSpec = OffsetSpec.forTimestamp(dto.getTimestamp());
|
offsetSpec = OffsetSpec.forTimestamp(dto.getTimestamp());
|
||||||
} else if (GroupOffsetResetEnum.EARLIEST.getResetType() == dto.getResetType()) {
|
} else if (OffsetTypeEnum.EARLIEST.getResetType() == dto.getResetType()) {
|
||||||
offsetSpec = OffsetSpec.earliest();
|
offsetSpec = OffsetSpec.earliest();
|
||||||
} else {
|
} else {
|
||||||
offsetSpec = OffsetSpec.latest();
|
offsetSpec = OffsetSpec.latest();
|
||||||
@@ -271,15 +325,11 @@ public class GroupManagerImpl implements GroupManager {
|
|||||||
|
|
||||||
|
|
||||||
// 获取Group指标信息
|
// 获取Group指标信息
|
||||||
Result<List<GroupMetrics>> groupMetricsResult = groupMetricService.listPartitionLatestMetricsFromES(
|
Result<List<GroupMetrics>> groupMetricsResult = groupMetricService.collectGroupMetricsFromKafka(clusterPhyId, groupName, latestMetricNames == null ? Arrays.asList() : latestMetricNames);
|
||||||
clusterPhyId,
|
|
||||||
groupName,
|
|
||||||
topicName,
|
|
||||||
latestMetricNames == null? Arrays.asList(): latestMetricNames
|
|
||||||
);
|
|
||||||
|
|
||||||
// 转换Group指标
|
// 转换Group指标
|
||||||
List<GroupMetrics> esGroupMetricsList = groupMetricsResult.hasData()? groupMetricsResult.getData(): new ArrayList<>();
|
List<GroupMetrics> esGroupMetricsList = groupMetricsResult.hasData() ? groupMetricsResult.getData().stream().filter(elem -> topicName.equals(elem.getTopic())).collect(Collectors.toList()) : new ArrayList<>();
|
||||||
Map<Integer, GroupMetrics> esMetricsMap = new HashMap<>();
|
Map<Integer, GroupMetrics> esMetricsMap = new HashMap<>();
|
||||||
for (GroupMetrics groupMetrics: esGroupMetricsList) {
|
for (GroupMetrics groupMetrics: esGroupMetricsList) {
|
||||||
esMetricsMap.put(groupMetrics.getPartitionId(), groupMetrics);
|
esMetricsMap.put(groupMetrics.getPartitionId(), groupMetrics);
|
||||||
@@ -296,4 +346,31 @@ public class GroupManagerImpl implements GroupManager {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private List<GroupTopicOverviewVO> convert2GroupTopicOverviewVOList(String groupName, String state, List<GroupTopicMember> groupTopicList, List<GroupMetrics> metricsList) {
|
||||||
|
if (metricsList == null) {
|
||||||
|
metricsList = new ArrayList<>();
|
||||||
|
}
|
||||||
|
|
||||||
|
// <TopicName, GroupMetrics>
|
||||||
|
Map<String, GroupMetrics> metricsMap = new HashMap<>();
|
||||||
|
for (GroupMetrics metrics : metricsList) {
|
||||||
|
if (!groupName.equals(metrics.getGroup())) continue;
|
||||||
|
metricsMap.put(metrics.getTopic(), metrics);
|
||||||
|
}
|
||||||
|
|
||||||
|
List<GroupTopicOverviewVO> voList = new ArrayList<>();
|
||||||
|
for (GroupTopicMember po : groupTopicList) {
|
||||||
|
GroupTopicOverviewVO vo = ConvertUtil.obj2Obj(po, GroupTopicOverviewVO.class);
|
||||||
|
vo.setGroupName(groupName);
|
||||||
|
vo.setState(state);
|
||||||
|
GroupMetrics metrics = metricsMap.get(po.getTopicName());
|
||||||
|
if (metrics != null) {
|
||||||
|
vo.setMaxLag(ConvertUtil.Float2Long(metrics.getMetrics().get(GroupMetricVersionItems.GROUP_METRIC_LAG)));
|
||||||
|
}
|
||||||
|
|
||||||
|
voList.add(vo);
|
||||||
|
}
|
||||||
|
return voList;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,10 @@
|
|||||||
package com.xiaojukeji.know.streaming.km.biz.topic;
|
package com.xiaojukeji.know.streaming.km.biz.topic;
|
||||||
|
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.topic.TopicRecordDTO;
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.topic.TopicRecordDTO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.group.GroupTopicOverviewVO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicBrokersPartitionsSummaryVO;
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicBrokersPartitionsSummaryVO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicRecordVO;
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicRecordVO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicStateVO;
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicStateVO;
|
||||||
@@ -22,4 +25,6 @@ public interface TopicStateManager {
|
|||||||
Result<List<TopicPartitionVO>> getTopicPartitions(Long clusterPhyId, String topicName, List<String> metricsNames);
|
Result<List<TopicPartitionVO>> getTopicPartitions(Long clusterPhyId, String topicName, List<String> metricsNames);
|
||||||
|
|
||||||
Result<TopicBrokersPartitionsSummaryVO> getTopicBrokersPartitionsSummary(Long clusterPhyId, String topicName);
|
Result<TopicBrokersPartitionsSummaryVO> getTopicBrokersPartitionsSummary(Long clusterPhyId, String topicName);
|
||||||
|
|
||||||
|
PaginationResult<GroupTopicOverviewVO> pagingTopicGroupsOverview(Long clusterPhyId, String topicName, String searchGroupName, PaginationBaseDTO dto);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,17 +2,22 @@ package com.xiaojukeji.know.streaming.km.biz.topic.impl;
|
|||||||
|
|
||||||
import com.didiglobal.logi.log.ILog;
|
import com.didiglobal.logi.log.ILog;
|
||||||
import com.didiglobal.logi.log.LogFactory;
|
import com.didiglobal.logi.log.LogFactory;
|
||||||
|
import com.xiaojukeji.know.streaming.km.biz.group.GroupManager;
|
||||||
import com.xiaojukeji.know.streaming.km.biz.topic.TopicStateManager;
|
import com.xiaojukeji.know.streaming.km.biz.topic.TopicStateManager;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.topic.TopicRecordDTO;
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.topic.TopicRecordDTO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.PartitionMetrics;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.PartitionMetrics;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.TopicMetrics;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.TopicMetrics;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.partition.Partition;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.partition.Partition;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupMemberPO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.broker.BrokerReplicaSummaryVO;
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.broker.BrokerReplicaSummaryVO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.group.GroupTopicOverviewVO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicBrokersPartitionsSummaryVO;
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicBrokersPartitionsSummaryVO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicRecordVO;
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicRecordVO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicStateVO;
|
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicStateVO;
|
||||||
@@ -22,25 +27,27 @@ import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.partition.TopicPart
|
|||||||
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||||
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
|
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
|
||||||
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
|
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
|
||||||
import com.xiaojukeji.know.streaming.km.common.converter.PartitionConverter;
|
|
||||||
import com.xiaojukeji.know.streaming.km.common.converter.TopicVOConverter;
|
import com.xiaojukeji.know.streaming.km.common.converter.TopicVOConverter;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.enums.OffsetTypeEnum;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.enums.SortTypeEnum;
|
||||||
import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
|
import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
|
||||||
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
|
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
|
||||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.PaginationUtil;
|
||||||
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
|
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
|
||||||
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
|
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
|
||||||
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
|
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
|
||||||
|
import com.xiaojukeji.know.streaming.km.core.service.group.GroupService;
|
||||||
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionMetricService;
|
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionMetricService;
|
||||||
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicConfigService;
|
|
||||||
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
|
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
|
||||||
|
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicConfigService;
|
||||||
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicMetricService;
|
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicMetricService;
|
||||||
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
|
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
|
||||||
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.TopicMetricVersionItems;
|
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.TopicMetricVersionItems;
|
||||||
|
import org.apache.commons.lang3.ObjectUtils;
|
||||||
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.kafka.clients.admin.OffsetSpec;
|
import org.apache.kafka.clients.admin.OffsetSpec;
|
||||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
import org.apache.kafka.clients.consumer.*;
|
||||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
|
||||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
|
||||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
|
||||||
import org.apache.kafka.common.TopicPartition;
|
import org.apache.kafka.common.TopicPartition;
|
||||||
import org.apache.kafka.common.config.TopicConfig;
|
import org.apache.kafka.common.config.TopicConfig;
|
||||||
import org.springframework.beans.factory.annotation.Autowired;
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
@@ -76,6 +83,12 @@ public class TopicStateManagerImpl implements TopicStateManager {
|
|||||||
@Autowired
|
@Autowired
|
||||||
private TopicConfigService topicConfigService;
|
private TopicConfigService topicConfigService;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private GroupService groupService;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private GroupManager groupManager;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public TopicBrokerAllVO getTopicBrokerAll(Long clusterPhyId, String topicName, String searchBrokerHost) throws NotExistException {
|
public TopicBrokerAllVO getTopicBrokerAll(Long clusterPhyId, String topicName, String searchBrokerHost) throws NotExistException {
|
||||||
Topic topic = topicService.getTopic(clusterPhyId, topicName);
|
Topic topic = topicService.getTopic(clusterPhyId, topicName);
|
||||||
@@ -129,7 +142,12 @@ public class TopicStateManagerImpl implements TopicStateManager {
|
|||||||
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getClusterPhyNotExist(clusterPhyId));
|
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getClusterPhyNotExist(clusterPhyId));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 获取分区offset
|
// 获取分区beginOffset
|
||||||
|
Result<Map<TopicPartition, Long>> beginOffsetsMapResult = partitionService.getPartitionOffsetFromKafka(clusterPhyId, topicName, dto.getFilterPartitionId(), OffsetSpec.earliest(), null);
|
||||||
|
if (beginOffsetsMapResult.failed()) {
|
||||||
|
return Result.buildFromIgnoreData(beginOffsetsMapResult);
|
||||||
|
}
|
||||||
|
// 获取分区endOffset
|
||||||
Result<Map<TopicPartition, Long>> endOffsetsMapResult = partitionService.getPartitionOffsetFromKafka(clusterPhyId, topicName, dto.getFilterPartitionId(), OffsetSpec.latest(), null);
|
Result<Map<TopicPartition, Long>> endOffsetsMapResult = partitionService.getPartitionOffsetFromKafka(clusterPhyId, topicName, dto.getFilterPartitionId(), OffsetSpec.latest(), null);
|
||||||
if (endOffsetsMapResult.failed()) {
|
if (endOffsetsMapResult.failed()) {
|
||||||
return Result.buildFromIgnoreData(endOffsetsMapResult);
|
return Result.buildFromIgnoreData(endOffsetsMapResult);
|
||||||
@@ -142,13 +160,48 @@ public class TopicStateManagerImpl implements TopicStateManager {
|
|||||||
// 创建kafka-consumer
|
// 创建kafka-consumer
|
||||||
kafkaConsumer = new KafkaConsumer<>(this.generateClientProperties(clusterPhy, dto.getMaxRecords()));
|
kafkaConsumer = new KafkaConsumer<>(this.generateClientProperties(clusterPhy, dto.getMaxRecords()));
|
||||||
|
|
||||||
kafkaConsumer.assign(endOffsetsMapResult.getData().keySet());
|
List<TopicPartition> partitionList = new ArrayList<>();
|
||||||
for (Map.Entry<TopicPartition, Long> entry: endOffsetsMapResult.getData().entrySet()) {
|
long maxMessage = 0;
|
||||||
kafkaConsumer.seek(entry.getKey(), Math.max(0, entry.getValue() - dto.getMaxRecords()));
|
for (Map.Entry<TopicPartition, Long> entry : endOffsetsMapResult.getData().entrySet()) {
|
||||||
|
long begin = beginOffsetsMapResult.getData().get(entry.getKey());
|
||||||
|
long end = entry.getValue();
|
||||||
|
if (begin == end){
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
maxMessage += end - begin;
|
||||||
|
partitionList.add(entry.getKey());
|
||||||
|
}
|
||||||
|
maxMessage = Math.min(maxMessage, dto.getMaxRecords());
|
||||||
|
kafkaConsumer.assign(partitionList);
|
||||||
|
|
||||||
|
Map<TopicPartition, OffsetAndTimestamp> partitionOffsetAndTimestampMap = new HashMap<>();
|
||||||
|
// 获取指定时间每个分区的offset(按指定开始时间查询消息时)
|
||||||
|
if (OffsetTypeEnum.PRECISE_TIMESTAMP.getResetType() == dto.getFilterOffsetReset()) {
|
||||||
|
Map<TopicPartition, Long> timestampsToSearch = new HashMap<>();
|
||||||
|
partitionList.forEach(topicPartition -> {
|
||||||
|
timestampsToSearch.put(topicPartition, dto.getStartTimestampUnitMs());
|
||||||
|
});
|
||||||
|
partitionOffsetAndTimestampMap = kafkaConsumer.offsetsForTimes(timestampsToSearch);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (TopicPartition partition : partitionList) {
|
||||||
|
if (OffsetTypeEnum.EARLIEST.getResetType() == dto.getFilterOffsetReset()) {
|
||||||
|
// 重置到最旧
|
||||||
|
kafkaConsumer.seek(partition, beginOffsetsMapResult.getData().get(partition));
|
||||||
|
} else if (OffsetTypeEnum.PRECISE_TIMESTAMP.getResetType() == dto.getFilterOffsetReset()) {
|
||||||
|
// 重置到指定时间
|
||||||
|
kafkaConsumer.seek(partition, partitionOffsetAndTimestampMap.get(partition).offset());
|
||||||
|
} else if (OffsetTypeEnum.PRECISE_OFFSET.getResetType() == dto.getFilterOffsetReset()) {
|
||||||
|
// 重置到指定位置
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// 默认,重置到最新
|
||||||
|
kafkaConsumer.seek(partition, Math.max(beginOffsetsMapResult.getData().get(partition), endOffsetsMapResult.getData().get(partition) - dto.getMaxRecords()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 这里需要减去 KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS 是因为poll一次需要耗时,如果这里不减去,则可能会导致poll之后,超过要求的时间
|
// 这里需要减去 KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS 是因为poll一次需要耗时,如果这里不减去,则可能会导致poll之后,超过要求的时间
|
||||||
while (System.currentTimeMillis() - startTime + KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS <= dto.getPullTimeoutUnitMs() && voList.size() < dto.getMaxRecords()) {
|
while (System.currentTimeMillis() - startTime <= dto.getPullTimeoutUnitMs() && voList.size() < maxMessage) {
|
||||||
ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(Duration.ofMillis(KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS));
|
ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(Duration.ofMillis(KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS));
|
||||||
for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
|
for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
|
||||||
if (this.checkIfIgnore(consumerRecord, dto.getFilterKey(), dto.getFilterValue())) {
|
if (this.checkIfIgnore(consumerRecord, dto.getFilterKey(), dto.getFilterValue())) {
|
||||||
@@ -168,6 +221,15 @@ public class TopicStateManagerImpl implements TopicStateManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 排序
|
||||||
|
if (ObjectUtils.isNotEmpty(voList)) {
|
||||||
|
// 默认按时间倒序排序
|
||||||
|
if (StringUtils.isBlank(dto.getSortType())) {
|
||||||
|
dto.setSortType(SortTypeEnum.DESC.getSortType());
|
||||||
|
}
|
||||||
|
PaginationUtil.pageBySort(voList, dto.getSortField(), dto.getSortType());
|
||||||
|
}
|
||||||
|
|
||||||
return Result.buildSuc(voList.subList(0, Math.min(dto.getMaxRecords(), voList.size())));
|
return Result.buildSuc(voList.subList(0, Math.min(dto.getMaxRecords(), voList.size())));
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
log.error("method=getTopicMessages||clusterPhyId={}||topicName={}||param={}||errMsg=exception", clusterPhyId, topicName, dto, e);
|
log.error("method=getTopicMessages||clusterPhyId={}||topicName={}||param={}||errMsg=exception", clusterPhyId, topicName, dto, e);
|
||||||
@@ -296,6 +358,19 @@ public class TopicStateManagerImpl implements TopicStateManager {
|
|||||||
return Result.buildSuc(vo);
|
return Result.buildSuc(vo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public PaginationResult<GroupTopicOverviewVO> pagingTopicGroupsOverview(Long clusterPhyId, String topicName, String searchGroupName, PaginationBaseDTO dto) {
|
||||||
|
PaginationResult<GroupMemberPO> paginationResult = groupService.pagingGroupMembers(clusterPhyId, topicName, "", "", searchGroupName, dto);
|
||||||
|
|
||||||
|
if (!paginationResult.hasData()) {
|
||||||
|
return PaginationResult.buildSuc(new ArrayList<>(), paginationResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
List<GroupTopicOverviewVO> groupTopicVOList = groupManager.getGroupTopicOverviewVOList(clusterPhyId, paginationResult.getData().getBizData());
|
||||||
|
|
||||||
|
return PaginationResult.buildSuc(groupTopicVOList, paginationResult);
|
||||||
|
}
|
||||||
|
|
||||||
/**************************************************** private method ****************************************************/
|
/**************************************************** private method ****************************************************/
|
||||||
|
|
||||||
private boolean checkIfIgnore(ConsumerRecord<String, String> consumerRecord, String filterKey, String filterValue) {
|
private boolean checkIfIgnore(ConsumerRecord<String, String> consumerRecord, String filterKey, String filterValue) {
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import com.didiglobal.logi.log.LogFactory;
|
|||||||
import com.didiglobal.logi.security.common.dto.config.ConfigDTO;
|
import com.didiglobal.logi.security.common.dto.config.ConfigDTO;
|
||||||
import com.didiglobal.logi.security.service.ConfigService;
|
import com.didiglobal.logi.security.service.ConfigService;
|
||||||
import com.xiaojukeji.know.streaming.km.biz.version.VersionControlManager;
|
import com.xiaojukeji.know.streaming.km.biz.version.VersionControlManager;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.MetricDetailDTO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.UserMetricConfigDTO;
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.UserMetricConfigDTO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.metric.UserMetricConfig;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.metric.UserMetricConfig;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||||
@@ -47,29 +48,29 @@ public class VersionControlManagerImpl implements VersionControlManager {
|
|||||||
@PostConstruct
|
@PostConstruct
|
||||||
public void init(){
|
public void init(){
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_HEALTH_SCORE, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_HEALTH_SCORE, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_TOTAL_PRODUCE_REQUESTS, true));
|
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_FAILED_FETCH_REQ, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_FAILED_FETCH_REQ, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_FAILED_PRODUCE_REQ, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_FAILED_PRODUCE_REQ, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_MESSAGE_IN, true));
|
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_UNDER_REPLICA_PARTITIONS, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_UNDER_REPLICA_PARTITIONS, true));
|
||||||
|
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_TOTAL_PRODUCE_REQUESTS, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_BYTES_IN, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_BYTES_IN, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_BYTES_OUT, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_BYTES_OUT, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_BYTES_REJECTED, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_BYTES_REJECTED, true));
|
||||||
|
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_MESSAGE_IN, true));
|
||||||
|
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_HEALTH_SCORE, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_HEALTH_SCORE, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_REQ_QUEUE_SIZE, true));
|
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_RES_QUEUE_SIZE, true));
|
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_ACTIVE_CONTROLLER_COUNT, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_ACTIVE_CONTROLLER_COUNT, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_PRODUCE_REQ, true));
|
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_LOG_SIZE, true));
|
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_CONNECTIONS, true));
|
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_MESSAGES_IN, true));
|
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_BYTES_IN, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_BYTES_IN, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_BYTES_OUT, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_BYTES_OUT, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_GROUP_REBALANCES, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_CONNECTIONS, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_JOB_RUNNING, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_MESSAGES_IN, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_PARTITIONS_NO_LEADER, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_PARTITIONS_NO_LEADER, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_PARTITION_URP, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_PARTITION_URP, true));
|
||||||
|
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_LOG_SIZE, true));
|
||||||
|
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_PRODUCE_REQ, true));
|
||||||
|
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_REQ_QUEUE_SIZE, true));
|
||||||
|
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_RES_QUEUE_SIZE, true));
|
||||||
|
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_GROUP_REBALANCES, true));
|
||||||
|
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_JOB_RUNNING, true));
|
||||||
|
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_OFFSET_CONSUMED, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_OFFSET_CONSUMED, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_LAG, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_LAG, true));
|
||||||
@@ -77,18 +78,18 @@ public class VersionControlManagerImpl implements VersionControlManager {
|
|||||||
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_HEALTH_SCORE, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_HEALTH_SCORE, true));
|
||||||
|
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_HEALTH_SCORE, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_HEALTH_SCORE, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_TOTAL_REQ_QUEUE, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_CONNECTION_COUNT, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_TOTAL_RES_QUEUE, true));
|
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_MESSAGE_IN, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_MESSAGE_IN, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_TOTAL_PRODUCE_REQ, true));
|
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_NETWORK_RPO_AVG_IDLE, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_NETWORK_RPO_AVG_IDLE, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_REQ_AVG_IDLE, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_REQ_AVG_IDLE, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_CONNECTION_COUNT, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_TOTAL_PRODUCE_REQ, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_BYTES_IN, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_TOTAL_REQ_QUEUE, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_BYTES_OUT, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_TOTAL_RES_QUEUE, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_PARTITIONS_SKEW, true));
|
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_LEADERS_SKEW, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_LEADERS_SKEW, true));
|
||||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_UNDER_REPLICATE_PARTITION, true));
|
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_UNDER_REPLICATE_PARTITION, true));
|
||||||
|
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_PARTITIONS_SKEW, true));
|
||||||
|
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_BYTES_IN, true));
|
||||||
|
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_BYTES_OUT, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Autowired
|
@Autowired
|
||||||
@@ -106,10 +107,15 @@ public class VersionControlManagerImpl implements VersionControlManager {
|
|||||||
allVersionItemVO.addAll(ConvertUtil.list2List(versionControlService.listVersionControlItem(METRIC_BROKER.getCode()), VersionItemVO.class));
|
allVersionItemVO.addAll(ConvertUtil.list2List(versionControlService.listVersionControlItem(METRIC_BROKER.getCode()), VersionItemVO.class));
|
||||||
allVersionItemVO.addAll(ConvertUtil.list2List(versionControlService.listVersionControlItem(METRIC_PARTITION.getCode()), VersionItemVO.class));
|
allVersionItemVO.addAll(ConvertUtil.list2List(versionControlService.listVersionControlItem(METRIC_PARTITION.getCode()), VersionItemVO.class));
|
||||||
allVersionItemVO.addAll(ConvertUtil.list2List(versionControlService.listVersionControlItem(METRIC_REPLICATION.getCode()), VersionItemVO.class));
|
allVersionItemVO.addAll(ConvertUtil.list2List(versionControlService.listVersionControlItem(METRIC_REPLICATION.getCode()), VersionItemVO.class));
|
||||||
|
allVersionItemVO.addAll(ConvertUtil.list2List(versionControlService.listVersionControlItem(METRIC_ZOOKEEPER.getCode()), VersionItemVO.class));
|
||||||
allVersionItemVO.addAll(ConvertUtil.list2List(versionControlService.listVersionControlItem(WEB_OP.getCode()), VersionItemVO.class));
|
allVersionItemVO.addAll(ConvertUtil.list2List(versionControlService.listVersionControlItem(WEB_OP.getCode()), VersionItemVO.class));
|
||||||
|
|
||||||
Map<String, VersionItemVO> map = allVersionItemVO.stream().collect(
|
Map<String, VersionItemVO> map = allVersionItemVO.stream().collect(
|
||||||
Collectors.toMap(u -> u.getType() + "@" + u.getName(), Function.identity() ));
|
Collectors.toMap(
|
||||||
|
u -> u.getType() + "@" + u.getName(),
|
||||||
|
Function.identity(),
|
||||||
|
(v1, v2) -> v1)
|
||||||
|
);
|
||||||
|
|
||||||
return Result.buildSuc(map);
|
return Result.buildSuc(map);
|
||||||
}
|
}
|
||||||
@@ -159,6 +165,9 @@ public class VersionControlManagerImpl implements VersionControlManager {
|
|||||||
|
|
||||||
UserMetricConfig umc = userMetricConfigMap.get(itemType + "@" + metric);
|
UserMetricConfig umc = userMetricConfigMap.get(itemType + "@" + metric);
|
||||||
userMetricConfigVO.setSet(null != umc && umc.isSet());
|
userMetricConfigVO.setSet(null != umc && umc.isSet());
|
||||||
|
if (umc != null) {
|
||||||
|
userMetricConfigVO.setRank(umc.getRank());
|
||||||
|
}
|
||||||
userMetricConfigVO.setName(itemVO.getName());
|
userMetricConfigVO.setName(itemVO.getName());
|
||||||
userMetricConfigVO.setType(itemVO.getType());
|
userMetricConfigVO.setType(itemVO.getType());
|
||||||
userMetricConfigVO.setDesc(itemVO.getDesc());
|
userMetricConfigVO.setDesc(itemVO.getDesc());
|
||||||
@@ -178,13 +187,29 @@ public class VersionControlManagerImpl implements VersionControlManager {
|
|||||||
@Override
|
@Override
|
||||||
public Result<Void> updateUserMetricItem(Long clusterId, Integer type, UserMetricConfigDTO dto, String operator) {
|
public Result<Void> updateUserMetricItem(Long clusterId, Integer type, UserMetricConfigDTO dto, String operator) {
|
||||||
Map<String, Boolean> metricsSetMap = dto.getMetricsSet();
|
Map<String, Boolean> metricsSetMap = dto.getMetricsSet();
|
||||||
if(null == metricsSetMap || metricsSetMap.isEmpty()){
|
|
||||||
|
//转换metricDetailDTOList
|
||||||
|
List<MetricDetailDTO> metricDetailDTOList = dto.getMetricDetailDTOList();
|
||||||
|
Map<String, MetricDetailDTO> metricDetailMap = new HashMap<>();
|
||||||
|
if (metricDetailDTOList != null && !metricDetailDTOList.isEmpty()) {
|
||||||
|
metricDetailMap = metricDetailDTOList.stream().collect(Collectors.toMap(MetricDetailDTO::getMetric, Function.identity()));
|
||||||
|
}
|
||||||
|
|
||||||
|
//转换metricsSetMap
|
||||||
|
if (metricsSetMap != null && !metricsSetMap.isEmpty()) {
|
||||||
|
for (Map.Entry<String, Boolean> metricAndShowEntry : metricsSetMap.entrySet()) {
|
||||||
|
if (metricDetailMap.containsKey(metricAndShowEntry.getKey())) continue;
|
||||||
|
metricDetailMap.put(metricAndShowEntry.getKey(), new MetricDetailDTO(metricAndShowEntry.getKey(), metricAndShowEntry.getValue(), null));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (metricDetailMap.isEmpty()) {
|
||||||
return Result.buildSuc();
|
return Result.buildSuc();
|
||||||
}
|
}
|
||||||
|
|
||||||
Set<UserMetricConfig> userMetricConfigs = getUserMetricConfig(operator);
|
Set<UserMetricConfig> userMetricConfigs = getUserMetricConfig(operator);
|
||||||
for(Map.Entry<String, Boolean> metricAndShowEntry : metricsSetMap.entrySet()){
|
for (MetricDetailDTO metricDetailDTO : metricDetailMap.values()) {
|
||||||
UserMetricConfig userMetricConfig = new UserMetricConfig(type, metricAndShowEntry.getKey(), metricAndShowEntry.getValue());
|
UserMetricConfig userMetricConfig = new UserMetricConfig(type, metricDetailDTO.getMetric(), metricDetailDTO.getSet(), metricDetailDTO.getRank());
|
||||||
userMetricConfigs.remove(userMetricConfig);
|
userMetricConfigs.remove(userMetricConfig);
|
||||||
userMetricConfigs.add(userMetricConfig);
|
userMetricConfigs.add(userMetricConfig);
|
||||||
}
|
}
|
||||||
@@ -228,7 +253,7 @@ public class VersionControlManagerImpl implements VersionControlManager {
|
|||||||
return defaultMetrics;
|
return defaultMetrics;
|
||||||
}
|
}
|
||||||
|
|
||||||
return JSON.parseObject(value, new TypeReference<Set<UserMetricConfig>>(){});
|
return JSON.parseObject(value, new TypeReference<Set<UserMetricConfig>>() {});
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void main(String[] args){
|
public static void main(String[] args){
|
||||||
|
|||||||
@@ -1,120 +0,0 @@
|
|||||||
package com.xiaojukeji.know.streaming.km.collector.metric;
|
|
||||||
|
|
||||||
import com.didiglobal.logi.log.ILog;
|
|
||||||
import com.didiglobal.logi.log.LogFactory;
|
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.event.metric.*;
|
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.po.BaseESPO;
|
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.*;
|
|
||||||
import com.xiaojukeji.know.streaming.km.common.enums.metric.KafkaMetricIndexEnum;
|
|
||||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
|
||||||
import com.xiaojukeji.know.streaming.km.common.utils.EnvUtil;
|
|
||||||
import com.xiaojukeji.know.streaming.km.common.utils.NamedThreadFactory;
|
|
||||||
import com.xiaojukeji.know.streaming.km.persistence.es.dao.BaseMetricESDAO;
|
|
||||||
import org.apache.commons.collections.CollectionUtils;
|
|
||||||
import org.springframework.context.ApplicationListener;
|
|
||||||
import org.springframework.stereotype.Component;
|
|
||||||
|
|
||||||
import javax.annotation.PostConstruct;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Objects;
|
|
||||||
import java.util.concurrent.LinkedBlockingDeque;
|
|
||||||
import java.util.concurrent.ThreadPoolExecutor;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
@Component
|
|
||||||
public class MetricESSender implements ApplicationListener<BaseMetricEvent> {
|
|
||||||
protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER");
|
|
||||||
|
|
||||||
private static final int THRESHOLD = 100;
|
|
||||||
|
|
||||||
private ThreadPoolExecutor esExecutor = new ThreadPoolExecutor(10, 20, 6000, TimeUnit.MILLISECONDS,
|
|
||||||
new LinkedBlockingDeque<>(1000),
|
|
||||||
new NamedThreadFactory("KM-Collect-MetricESSender-ES"),
|
|
||||||
(r, e) -> LOGGER.warn("class=MetricESSender||msg=KM-Collect-MetricESSender-ES Deque is blocked, taskCount:{}" + e.getTaskCount()));
|
|
||||||
|
|
||||||
@PostConstruct
|
|
||||||
public void init(){
|
|
||||||
LOGGER.info("class=MetricESSender||method=init||msg=init finished");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onApplicationEvent(BaseMetricEvent event) {
|
|
||||||
if(event instanceof BrokerMetricEvent) {
|
|
||||||
BrokerMetricEvent brokerMetricEvent = (BrokerMetricEvent)event;
|
|
||||||
send2es(KafkaMetricIndexEnum.BROKER_INFO,
|
|
||||||
ConvertUtil.list2List(brokerMetricEvent.getBrokerMetrics(), BrokerMetricPO.class)
|
|
||||||
);
|
|
||||||
|
|
||||||
} else if(event instanceof ClusterMetricEvent) {
|
|
||||||
ClusterMetricEvent clusterMetricEvent = (ClusterMetricEvent)event;
|
|
||||||
send2es(KafkaMetricIndexEnum.CLUSTER_INFO,
|
|
||||||
ConvertUtil.list2List(clusterMetricEvent.getClusterMetrics(), ClusterMetricPO.class)
|
|
||||||
);
|
|
||||||
|
|
||||||
} else if(event instanceof TopicMetricEvent) {
|
|
||||||
TopicMetricEvent topicMetricEvent = (TopicMetricEvent)event;
|
|
||||||
send2es(KafkaMetricIndexEnum.TOPIC_INFO,
|
|
||||||
ConvertUtil.list2List(topicMetricEvent.getTopicMetrics(), TopicMetricPO.class)
|
|
||||||
);
|
|
||||||
|
|
||||||
} else if(event instanceof PartitionMetricEvent) {
|
|
||||||
PartitionMetricEvent partitionMetricEvent = (PartitionMetricEvent)event;
|
|
||||||
send2es(KafkaMetricIndexEnum.PARTITION_INFO,
|
|
||||||
ConvertUtil.list2List(partitionMetricEvent.getPartitionMetrics(), PartitionMetricPO.class)
|
|
||||||
);
|
|
||||||
|
|
||||||
} else if(event instanceof GroupMetricEvent) {
|
|
||||||
GroupMetricEvent groupMetricEvent = (GroupMetricEvent)event;
|
|
||||||
send2es(KafkaMetricIndexEnum.GROUP_INFO,
|
|
||||||
ConvertUtil.list2List(groupMetricEvent.getGroupMetrics(), GroupMetricPO.class)
|
|
||||||
);
|
|
||||||
|
|
||||||
} else if(event instanceof ReplicaMetricEvent) {
|
|
||||||
ReplicaMetricEvent replicaMetricEvent = (ReplicaMetricEvent)event;
|
|
||||||
send2es(KafkaMetricIndexEnum.REPLICATION_INFO,
|
|
||||||
ConvertUtil.list2List(replicaMetricEvent.getReplicationMetrics(), ReplicationMetricPO.class)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 根据不同监控维度来发送
|
|
||||||
*/
|
|
||||||
private boolean send2es(KafkaMetricIndexEnum stats, List<? extends BaseESPO> statsList){
|
|
||||||
if (CollectionUtils.isEmpty(statsList)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!EnvUtil.isOnline()) {
|
|
||||||
LOGGER.info("class=MetricESSender||method=send2es||ariusStats={}||size={}",
|
|
||||||
stats.getIndex(), statsList.size());
|
|
||||||
}
|
|
||||||
|
|
||||||
BaseMetricESDAO baseMetricESDao = BaseMetricESDAO.getByStatsType(stats);
|
|
||||||
if (Objects.isNull( baseMetricESDao )) {
|
|
||||||
LOGGER.error("class=MetricESSender||method=send2es||errMsg=fail to find {}", stats.getIndex());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
int size = statsList.size();
|
|
||||||
int num = (size) % THRESHOLD == 0 ? (size / THRESHOLD) : (size / THRESHOLD + 1);
|
|
||||||
|
|
||||||
if (size < THRESHOLD) {
|
|
||||||
esExecutor.execute(
|
|
||||||
() -> baseMetricESDao.batchInsertStats(statsList)
|
|
||||||
);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 1; i < num + 1; i++) {
|
|
||||||
int end = (i * THRESHOLD) > size ? size : (i * THRESHOLD);
|
|
||||||
int start = (i - 1) * THRESHOLD;
|
|
||||||
|
|
||||||
esExecutor.execute(
|
|
||||||
() -> baseMetricESDao.batchInsertStats(statsList.subList(start, end))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -91,7 +91,7 @@ public class ReplicaMetricCollector extends AbstractMetricCollector<ReplicationM
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
Result<ReplicationMetrics> ret = replicaMetricService.collectReplicaMetricsFromKafkaWithCache(
|
Result<ReplicationMetrics> ret = replicaMetricService.collectReplicaMetricsFromKafka(
|
||||||
clusterPhyId,
|
clusterPhyId,
|
||||||
metrics.getTopic(),
|
metrics.getTopic(),
|
||||||
metrics.getBrokerId(),
|
metrics.getBrokerId(),
|
||||||
|
|||||||
@@ -0,0 +1,122 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.collector.metric;
|
||||||
|
|
||||||
|
import com.didiglobal.logi.log.ILog;
|
||||||
|
import com.didiglobal.logi.log.LogFactory;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.ZKConfig;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafkacontroller.KafkaController;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.metric.ZookeeperMetricParam;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionControlItem;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.event.metric.ZookeeperMetricEvent;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.EnvUtil;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.ZookeeperInfo;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ZookeeperMetrics;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.ZookeeperMetricPO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.core.service.kafkacontroller.KafkaControllerService;
|
||||||
|
import com.xiaojukeji.know.streaming.km.core.service.version.VersionControlService;
|
||||||
|
import com.xiaojukeji.know.streaming.km.core.service.zookeeper.ZookeeperMetricService;
|
||||||
|
import com.xiaojukeji.know.streaming.km.core.service.zookeeper.ZookeeperService;
|
||||||
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.METRIC_ZOOKEEPER;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @author didi
|
||||||
|
*/
|
||||||
|
@Component
|
||||||
|
public class ZookeeperMetricCollector extends AbstractMetricCollector<ZookeeperMetricPO> {
|
||||||
|
protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER");
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private VersionControlService versionControlService;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private ZookeeperMetricService zookeeperMetricService;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private ZookeeperService zookeeperService;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private KafkaControllerService kafkaControllerService;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void collectMetrics(ClusterPhy clusterPhy) {
|
||||||
|
Long startTime = System.currentTimeMillis();
|
||||||
|
Long clusterPhyId = clusterPhy.getId();
|
||||||
|
List<VersionControlItem> items = versionControlService.listVersionControlItem(clusterPhyId, collectorType().getCode());
|
||||||
|
List<ZookeeperInfo> aliveZKList = zookeeperService.listFromDBByCluster(clusterPhyId)
|
||||||
|
.stream()
|
||||||
|
.filter(elem -> Constant.ALIVE.equals(elem.getStatus()))
|
||||||
|
.collect(Collectors.toList());
|
||||||
|
KafkaController kafkaController = kafkaControllerService.getKafkaControllerFromDB(clusterPhyId);
|
||||||
|
|
||||||
|
ZookeeperMetrics metrics = ZookeeperMetrics.initWithMetric(clusterPhyId, Constant.COLLECT_METRICS_COST_TIME_METRICS_NAME, (float)Constant.INVALID_CODE);
|
||||||
|
if (ValidateUtils.isEmptyList(aliveZKList)) {
|
||||||
|
// 没有存活的ZK时,发布事件,然后直接返回
|
||||||
|
publishMetric(new ZookeeperMetricEvent(this, Arrays.asList(metrics)));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 构造参数
|
||||||
|
ZookeeperMetricParam param = new ZookeeperMetricParam(
|
||||||
|
clusterPhyId,
|
||||||
|
aliveZKList.stream().map(elem -> new Tuple<String, Integer>(elem.getHost(), elem.getPort())).collect(Collectors.toList()),
|
||||||
|
ConvertUtil.str2ObjByJson(clusterPhy.getZkProperties(), ZKConfig.class),
|
||||||
|
kafkaController == null? Constant.INVALID_CODE: kafkaController.getBrokerId(),
|
||||||
|
null
|
||||||
|
);
|
||||||
|
|
||||||
|
for(VersionControlItem v : items) {
|
||||||
|
try {
|
||||||
|
if(null != metrics.getMetrics().get(v.getName())) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
param.setMetricName(v.getName());
|
||||||
|
|
||||||
|
Result<ZookeeperMetrics> ret = zookeeperMetricService.collectMetricsFromZookeeper(param);
|
||||||
|
if(null == ret || ret.failed() || null == ret.getData()){
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics.putMetric(ret.getData().getMetrics());
|
||||||
|
|
||||||
|
if(!EnvUtil.isOnline()){
|
||||||
|
LOGGER.info(
|
||||||
|
"class=ZookeeperMetricCollector||method=collectMetrics||clusterPhyId={}||metricName={}||metricValue={}",
|
||||||
|
clusterPhyId, v.getName(), ConvertUtil.obj2Json(ret.getData().getMetrics())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} catch (Exception e){
|
||||||
|
LOGGER.error(
|
||||||
|
"class=ZookeeperMetricCollector||method=collectMetrics||clusterPhyId={}||metricName={}||errMsg=exception!",
|
||||||
|
clusterPhyId, v.getName(), e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics.putMetric(Constant.COLLECT_METRICS_COST_TIME_METRICS_NAME, (System.currentTimeMillis() - startTime) / 1000.0f);
|
||||||
|
|
||||||
|
publishMetric(new ZookeeperMetricEvent(this, Arrays.asList(metrics)));
|
||||||
|
|
||||||
|
LOGGER.info(
|
||||||
|
"class=ZookeeperMetricCollector||method=collectMetrics||clusterPhyId={}||startTime={}||costTime={}||msg=msg=collect finished.",
|
||||||
|
clusterPhyId, startTime, System.currentTimeMillis() - startTime
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public VersionItemTypeEnum collectorType() {
|
||||||
|
return METRIC_ZOOKEEPER;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,72 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.collector.sink;
|
||||||
|
|
||||||
|
import com.didiglobal.logi.log.ILog;
|
||||||
|
import com.didiglobal.logi.log.LogFactory;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.po.BaseESPO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.EnvUtil;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.NamedThreadFactory;
|
||||||
|
import com.xiaojukeji.know.streaming.km.persistence.es.dao.BaseMetricESDAO;
|
||||||
|
import org.apache.commons.collections.CollectionUtils;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.concurrent.LinkedBlockingDeque;
|
||||||
|
import java.util.concurrent.ThreadPoolExecutor;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
public abstract class AbstractMetricESSender {
|
||||||
|
protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER");
|
||||||
|
|
||||||
|
private static final int THRESHOLD = 100;
|
||||||
|
|
||||||
|
private static final ThreadPoolExecutor esExecutor = new ThreadPoolExecutor(
|
||||||
|
10,
|
||||||
|
20,
|
||||||
|
6000,
|
||||||
|
TimeUnit.MILLISECONDS,
|
||||||
|
new LinkedBlockingDeque<>(1000),
|
||||||
|
new NamedThreadFactory("KM-Collect-MetricESSender-ES"),
|
||||||
|
(r, e) -> LOGGER.warn("class=MetricESSender||msg=KM-Collect-MetricESSender-ES Deque is blocked, taskCount:{}" + e.getTaskCount())
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 根据不同监控维度来发送
|
||||||
|
*/
|
||||||
|
protected boolean send2es(String index, List<? extends BaseESPO> statsList){
|
||||||
|
if (CollectionUtils.isEmpty(statsList)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!EnvUtil.isOnline()) {
|
||||||
|
LOGGER.info("class=MetricESSender||method=send2es||ariusStats={}||size={}",
|
||||||
|
index, statsList.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
BaseMetricESDAO baseMetricESDao = BaseMetricESDAO.getByStatsType(index);
|
||||||
|
if (Objects.isNull( baseMetricESDao )) {
|
||||||
|
LOGGER.error("class=MetricESSender||method=send2es||errMsg=fail to find {}", index);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
int size = statsList.size();
|
||||||
|
int num = (size) % THRESHOLD == 0 ? (size / THRESHOLD) : (size / THRESHOLD + 1);
|
||||||
|
|
||||||
|
if (size < THRESHOLD) {
|
||||||
|
esExecutor.execute(
|
||||||
|
() -> baseMetricESDao.batchInsertStats(statsList)
|
||||||
|
);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 1; i < num + 1; i++) {
|
||||||
|
int end = (i * THRESHOLD) > size ? size : (i * THRESHOLD);
|
||||||
|
int start = (i - 1) * THRESHOLD;
|
||||||
|
|
||||||
|
esExecutor.execute(
|
||||||
|
() -> baseMetricESDao.batchInsertStats(statsList.subList(start, end))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,28 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.collector.sink;
|
||||||
|
|
||||||
|
import com.didiglobal.logi.log.ILog;
|
||||||
|
import com.didiglobal.logi.log.LogFactory;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.event.metric.BrokerMetricEvent;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.BrokerMetricPO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||||
|
import org.springframework.context.ApplicationListener;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import javax.annotation.PostConstruct;
|
||||||
|
|
||||||
|
import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.BROKER_INDEX;
|
||||||
|
|
||||||
|
@Component
|
||||||
|
public class BrokerMetricESSender extends AbstractMetricESSender implements ApplicationListener<BrokerMetricEvent> {
|
||||||
|
protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER");
|
||||||
|
|
||||||
|
@PostConstruct
|
||||||
|
public void init(){
|
||||||
|
LOGGER.info("class=BrokerMetricESSender||method=init||msg=init finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onApplicationEvent(BrokerMetricEvent event) {
|
||||||
|
send2es(BROKER_INDEX, ConvertUtil.list2List(event.getBrokerMetrics(), BrokerMetricPO.class));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,29 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.collector.sink;
|
||||||
|
|
||||||
|
import com.didiglobal.logi.log.ILog;
|
||||||
|
import com.didiglobal.logi.log.LogFactory;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.event.metric.ClusterMetricEvent;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.ClusterMetricPO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||||
|
import org.springframework.context.ApplicationListener;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import javax.annotation.PostConstruct;
|
||||||
|
|
||||||
|
|
||||||
|
import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.CLUSTER_INDEX;
|
||||||
|
|
||||||
|
@Component
|
||||||
|
public class ClusterMetricESSender extends AbstractMetricESSender implements ApplicationListener<ClusterMetricEvent> {
|
||||||
|
protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER");
|
||||||
|
|
||||||
|
@PostConstruct
|
||||||
|
public void init(){
|
||||||
|
LOGGER.info("class=ClusterMetricESSender||method=init||msg=init finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onApplicationEvent(ClusterMetricEvent event) {
|
||||||
|
send2es(CLUSTER_INDEX, ConvertUtil.list2List(event.getClusterMetrics(), ClusterMetricPO.class));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,29 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.collector.sink;
|
||||||
|
|
||||||
|
import com.didiglobal.logi.log.ILog;
|
||||||
|
import com.didiglobal.logi.log.LogFactory;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.event.metric.GroupMetricEvent;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.GroupMetricPO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||||
|
import org.springframework.context.ApplicationListener;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import javax.annotation.PostConstruct;
|
||||||
|
|
||||||
|
|
||||||
|
import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.GROUP_INDEX;
|
||||||
|
|
||||||
|
@Component
|
||||||
|
public class GroupMetricESSender extends AbstractMetricESSender implements ApplicationListener<GroupMetricEvent> {
|
||||||
|
protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER");
|
||||||
|
|
||||||
|
@PostConstruct
|
||||||
|
public void init(){
|
||||||
|
LOGGER.info("class=GroupMetricESSender||method=init||msg=init finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onApplicationEvent(GroupMetricEvent event) {
|
||||||
|
send2es(GROUP_INDEX, ConvertUtil.list2List(event.getGroupMetrics(), GroupMetricPO.class));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,28 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.collector.sink;
|
||||||
|
|
||||||
|
import com.didiglobal.logi.log.ILog;
|
||||||
|
import com.didiglobal.logi.log.LogFactory;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.event.metric.PartitionMetricEvent;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.PartitionMetricPO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||||
|
import org.springframework.context.ApplicationListener;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import javax.annotation.PostConstruct;
|
||||||
|
|
||||||
|
import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.PARTITION_INDEX;
|
||||||
|
|
||||||
|
@Component
|
||||||
|
public class PartitionMetricESSender extends AbstractMetricESSender implements ApplicationListener<PartitionMetricEvent> {
|
||||||
|
protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER");
|
||||||
|
|
||||||
|
@PostConstruct
|
||||||
|
public void init(){
|
||||||
|
LOGGER.info("class=PartitionMetricESSender||method=init||msg=init finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onApplicationEvent(PartitionMetricEvent event) {
|
||||||
|
send2es(PARTITION_INDEX, ConvertUtil.list2List(event.getPartitionMetrics(), PartitionMetricPO.class));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,28 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.collector.sink;
|
||||||
|
|
||||||
|
import com.didiglobal.logi.log.ILog;
|
||||||
|
import com.didiglobal.logi.log.LogFactory;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.event.metric.ReplicaMetricEvent;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.ReplicationMetricPO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||||
|
import org.springframework.context.ApplicationListener;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import javax.annotation.PostConstruct;
|
||||||
|
|
||||||
|
import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.REPLICATION_INDEX;
|
||||||
|
|
||||||
|
@Component
|
||||||
|
public class ReplicaMetricESSender extends AbstractMetricESSender implements ApplicationListener<ReplicaMetricEvent> {
|
||||||
|
protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER");
|
||||||
|
|
||||||
|
@PostConstruct
|
||||||
|
public void init(){
|
||||||
|
LOGGER.info("class=GroupMetricESSender||method=init||msg=init finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onApplicationEvent(ReplicaMetricEvent event) {
|
||||||
|
send2es(REPLICATION_INDEX, ConvertUtil.list2List(event.getReplicationMetrics(), ReplicationMetricPO.class));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,29 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.collector.sink;
|
||||||
|
|
||||||
|
import com.didiglobal.logi.log.ILog;
|
||||||
|
import com.didiglobal.logi.log.LogFactory;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.event.metric.*;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.*;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||||
|
import org.springframework.context.ApplicationListener;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import javax.annotation.PostConstruct;
|
||||||
|
|
||||||
|
|
||||||
|
import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.TOPIC_INDEX;
|
||||||
|
|
||||||
|
@Component
|
||||||
|
public class TopicMetricESSender extends AbstractMetricESSender implements ApplicationListener<TopicMetricEvent> {
|
||||||
|
protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER");
|
||||||
|
|
||||||
|
@PostConstruct
|
||||||
|
public void init(){
|
||||||
|
LOGGER.info("class=TopicMetricESSender||method=init||msg=init finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onApplicationEvent(TopicMetricEvent event) {
|
||||||
|
send2es(TOPIC_INDEX, ConvertUtil.list2List(event.getTopicMetrics(), TopicMetricPO.class));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,28 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.collector.sink;
|
||||||
|
|
||||||
|
import com.didiglobal.logi.log.ILog;
|
||||||
|
import com.didiglobal.logi.log.LogFactory;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.event.metric.ZookeeperMetricEvent;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.ZookeeperMetricPO;
|
||||||
|
import org.springframework.context.ApplicationListener;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import javax.annotation.PostConstruct;
|
||||||
|
|
||||||
|
import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.ZOOKEEPER_INDEX;
|
||||||
|
|
||||||
|
@Component
|
||||||
|
public class ZookeeperMetricESSender extends AbstractMetricESSender implements ApplicationListener<ZookeeperMetricEvent> {
|
||||||
|
protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER");
|
||||||
|
|
||||||
|
@PostConstruct
|
||||||
|
public void init(){
|
||||||
|
LOGGER.info("class=ZookeeperMetricESSender||method=init||msg=init finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onApplicationEvent(ZookeeperMetricEvent event) {
|
||||||
|
send2es(ZOOKEEPER_INDEX, ConvertUtil.list2List(event.getZookeeperMetrics(), ZookeeperMetricPO.class));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,18 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.dto.cluster;
|
||||||
|
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
|
||||||
|
import io.swagger.annotations.ApiModelProperty;
|
||||||
|
import lombok.Data;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @author wyb
|
||||||
|
* @date 2022/10/17
|
||||||
|
*/
|
||||||
|
@Data
|
||||||
|
public class ClusterGroupSummaryDTO extends PaginationBaseDTO {
|
||||||
|
@ApiModelProperty("查找该Topic")
|
||||||
|
private String searchTopicName;
|
||||||
|
|
||||||
|
@ApiModelProperty("查找该Group")
|
||||||
|
private String searchGroupName;
|
||||||
|
}
|
||||||
@@ -3,6 +3,7 @@ package com.xiaojukeji.know.streaming.km.common.bean.dto.cluster;
|
|||||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.BaseDTO;
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.BaseDTO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.JmxConfig;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.JmxConfig;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.ZKConfig;
|
||||||
import io.swagger.annotations.ApiModel;
|
import io.swagger.annotations.ApiModel;
|
||||||
import io.swagger.annotations.ApiModelProperty;
|
import io.swagger.annotations.ApiModelProperty;
|
||||||
import lombok.Data;
|
import lombok.Data;
|
||||||
@@ -34,4 +35,8 @@ public class ClusterPhyBaseDTO extends BaseDTO {
|
|||||||
@NotNull(message = "jmxProperties不允许为空")
|
@NotNull(message = "jmxProperties不允许为空")
|
||||||
@ApiModelProperty(value="Jmx配置")
|
@ApiModelProperty(value="Jmx配置")
|
||||||
protected JmxConfig jmxProperties;
|
protected JmxConfig jmxProperties;
|
||||||
|
|
||||||
|
// TODO 前端页面增加时,需要加一个不为空的限制
|
||||||
|
@ApiModelProperty(value="ZK配置")
|
||||||
|
protected ZKConfig zkProperties;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,13 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.dto.cluster;
|
||||||
|
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
|
||||||
|
import lombok.Data;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @author wyc
|
||||||
|
* @date 2022/9/23
|
||||||
|
*/
|
||||||
|
@Data
|
||||||
|
public class ClusterZookeepersOverviewDTO extends PaginationBaseDTO {
|
||||||
|
|
||||||
|
}
|
||||||
@@ -3,6 +3,7 @@ package com.xiaojukeji.know.streaming.km.common.bean.dto.group;
|
|||||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.partition.PartitionOffsetDTO;
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.partition.PartitionOffsetDTO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.topic.ClusterTopicDTO;
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.topic.ClusterTopicDTO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.enums.OffsetTypeEnum;
|
||||||
import io.swagger.annotations.ApiModelProperty;
|
import io.swagger.annotations.ApiModelProperty;
|
||||||
import lombok.Data;
|
import lombok.Data;
|
||||||
|
|
||||||
@@ -23,7 +24,7 @@ public class GroupOffsetResetDTO extends ClusterTopicDTO {
|
|||||||
private String groupName;
|
private String groupName;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @see com.xiaojukeji.know.streaming.km.common.enums.GroupOffsetResetEnum
|
* @see OffsetTypeEnum
|
||||||
*/
|
*/
|
||||||
@NotNull(message = "resetType不允许为空")
|
@NotNull(message = "resetType不允许为空")
|
||||||
@ApiModelProperty(value = "重置方式", example = "1")
|
@ApiModelProperty(value = "重置方式", example = "1")
|
||||||
|
|||||||
@@ -0,0 +1,32 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.dto.metrices;
|
||||||
|
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.BaseDTO;
|
||||||
|
import io.swagger.annotations.ApiModel;
|
||||||
|
import io.swagger.annotations.ApiModelProperty;
|
||||||
|
import lombok.AllArgsConstructor;
|
||||||
|
import lombok.Data;
|
||||||
|
import lombok.NoArgsConstructor;
|
||||||
|
|
||||||
|
import javax.validation.constraints.NotNull;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @author didi
|
||||||
|
*/
|
||||||
|
@Data
|
||||||
|
@NoArgsConstructor
|
||||||
|
@AllArgsConstructor
|
||||||
|
@ApiModel(description = "指标详细属性信息")
|
||||||
|
public class MetricDetailDTO extends BaseDTO {
|
||||||
|
|
||||||
|
@ApiModelProperty("指标名称")
|
||||||
|
private String metric;
|
||||||
|
|
||||||
|
@ApiModelProperty("指标是否显示")
|
||||||
|
private Boolean set;
|
||||||
|
|
||||||
|
@NotNull(message = "MetricDetailDTO的rank字段应不为空")
|
||||||
|
@ApiModelProperty("指标优先级")
|
||||||
|
private Integer rank;
|
||||||
|
|
||||||
|
}
|
||||||
@@ -7,6 +7,8 @@ import lombok.AllArgsConstructor;
|
|||||||
import lombok.Data;
|
import lombok.Data;
|
||||||
import lombok.NoArgsConstructor;
|
import lombok.NoArgsConstructor;
|
||||||
|
|
||||||
|
import javax.validation.Valid;
|
||||||
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
|
||||||
@@ -17,4 +19,8 @@ import java.util.Map;
|
|||||||
public class UserMetricConfigDTO extends BaseDTO {
|
public class UserMetricConfigDTO extends BaseDTO {
|
||||||
@ApiModelProperty("指标展示设置项,key:指标名;value:是否展现(true展现/false不展现)")
|
@ApiModelProperty("指标展示设置项,key:指标名;value:是否展现(true展现/false不展现)")
|
||||||
private Map<String, Boolean> metricsSet;
|
private Map<String, Boolean> metricsSet;
|
||||||
|
|
||||||
|
@Valid
|
||||||
|
@ApiModelProperty("指标自定义属性列表")
|
||||||
|
private List<MetricDetailDTO> metricDetailDTOList;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
package com.xiaojukeji.know.streaming.km.common.bean.dto.topic;
|
package com.xiaojukeji.know.streaming.km.common.bean.dto.topic;
|
||||||
|
|
||||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.BaseDTO;
|
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationSortDTO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.enums.OffsetTypeEnum;
|
||||||
import io.swagger.annotations.ApiModel;
|
import io.swagger.annotations.ApiModel;
|
||||||
import io.swagger.annotations.ApiModelProperty;
|
import io.swagger.annotations.ApiModelProperty;
|
||||||
import lombok.Data;
|
import lombok.Data;
|
||||||
@@ -15,7 +16,7 @@ import javax.validation.constraints.NotNull;
|
|||||||
@Data
|
@Data
|
||||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||||
@ApiModel(description = "Topic记录")
|
@ApiModel(description = "Topic记录")
|
||||||
public class TopicRecordDTO extends BaseDTO {
|
public class TopicRecordDTO extends PaginationSortDTO {
|
||||||
@NotNull(message = "truncate不允许为空")
|
@NotNull(message = "truncate不允许为空")
|
||||||
@ApiModelProperty(value = "是否截断", example = "true")
|
@ApiModelProperty(value = "是否截断", example = "true")
|
||||||
private Boolean truncate;
|
private Boolean truncate;
|
||||||
@@ -34,4 +35,13 @@ public class TopicRecordDTO extends BaseDTO {
|
|||||||
|
|
||||||
@ApiModelProperty(value = "预览超时时间", example = "10000")
|
@ApiModelProperty(value = "预览超时时间", example = "10000")
|
||||||
private Long pullTimeoutUnitMs = 8000L;
|
private Long pullTimeoutUnitMs = 8000L;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @see OffsetTypeEnum
|
||||||
|
*/
|
||||||
|
@ApiModelProperty(value = "offset", example = "")
|
||||||
|
private Integer filterOffsetReset = 0;
|
||||||
|
|
||||||
|
@ApiModelProperty(value = "开始日期时间戳", example = "")
|
||||||
|
private Long startTimestampUnitMs;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,17 @@
|
|||||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.broker;
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.broker;
|
||||||
|
|
||||||
import com.xiaojukeji.know.streaming.km.common.zookeeper.znode.brokers.BrokerMetadata;
|
|
||||||
|
import com.alibaba.fastjson.TypeReference;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.common.IpPortData;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.po.broker.BrokerPO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||||
import lombok.AllArgsConstructor;
|
import lombok.AllArgsConstructor;
|
||||||
import lombok.Data;
|
import lombok.Data;
|
||||||
import lombok.NoArgsConstructor;
|
import lombok.NoArgsConstructor;
|
||||||
import org.apache.kafka.common.Node;
|
import org.apache.kafka.common.Node;
|
||||||
|
|
||||||
import java.io.Serializable;
|
import java.io.Serializable;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @author didi
|
* @author didi
|
||||||
@@ -55,6 +60,11 @@ public class Broker implements Serializable {
|
|||||||
*/
|
*/
|
||||||
private Integer status;
|
private Integer status;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 监听信息
|
||||||
|
*/
|
||||||
|
private Map<String, IpPortData> endpointMap;
|
||||||
|
|
||||||
public static Broker buildFrom(Long clusterPhyId, Node node, Long startTimestamp) {
|
public static Broker buildFrom(Long clusterPhyId, Node node, Long startTimestamp) {
|
||||||
Broker metadata = new Broker();
|
Broker metadata = new Broker();
|
||||||
metadata.setClusterPhyId(clusterPhyId);
|
metadata.setClusterPhyId(clusterPhyId);
|
||||||
@@ -68,17 +78,25 @@ public class Broker implements Serializable {
|
|||||||
return metadata;
|
return metadata;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Broker buildFrom(Long clusterPhyId, Integer brokerId, BrokerMetadata brokerMetadata) {
|
public static Broker buildFrom(BrokerPO brokerPO) {
|
||||||
Broker metadata = new Broker();
|
Broker broker = ConvertUtil.obj2Obj(brokerPO, Broker.class);
|
||||||
metadata.setClusterPhyId(clusterPhyId);
|
String endpointMapStr = brokerPO.getEndpointMap();
|
||||||
metadata.setBrokerId(brokerId);
|
if (broker == null || endpointMapStr == null || endpointMapStr.equals("")) {
|
||||||
metadata.setHost(brokerMetadata.getHost());
|
return broker;
|
||||||
metadata.setPort(brokerMetadata.getPort());
|
}
|
||||||
metadata.setJmxPort(brokerMetadata.getJmxPort());
|
|
||||||
metadata.setStartTimestamp(brokerMetadata.getTimestamp());
|
// 填充endpoint信息
|
||||||
metadata.setRack(brokerMetadata.getRack());
|
Map<String, IpPortData> endpointMap = ConvertUtil.str2ObjByJson(endpointMapStr, new TypeReference<Map<String, IpPortData>>(){});
|
||||||
metadata.setStatus(1);
|
broker.setEndpointMap(endpointMap);
|
||||||
return metadata;
|
return broker;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getJmxHost(String endPoint) {
|
||||||
|
if (endPoint == null || endpointMap == null) {
|
||||||
|
return host;
|
||||||
|
}
|
||||||
|
IpPortData ip = endpointMap.get(endPoint);
|
||||||
|
return ip != null ? ip.getIp() : host;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean alive() {
|
public boolean alive() {
|
||||||
|
|||||||
@@ -53,9 +53,16 @@ public class ClusterPhy implements Comparable<ClusterPhy>, EntifyIdInterface {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* jmx配置
|
* jmx配置
|
||||||
|
* @see com.xiaojukeji.know.streaming.km.common.bean.entity.config.JmxConfig
|
||||||
*/
|
*/
|
||||||
private String jmxProperties;
|
private String jmxProperties;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* zk配置
|
||||||
|
* @see com.xiaojukeji.know.streaming.km.common.bean.entity.config.ZKConfig
|
||||||
|
*/
|
||||||
|
private String zkProperties;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 开启ACL
|
* 开启ACL
|
||||||
* @see com.xiaojukeji.know.streaming.km.common.enums.cluster.ClusterAuthTypeEnum
|
* @see com.xiaojukeji.know.streaming.km.common.enums.cluster.ClusterAuthTypeEnum
|
||||||
|
|||||||
@@ -27,6 +27,9 @@ public class JmxConfig implements Serializable {
|
|||||||
|
|
||||||
@ApiModelProperty(value="SSL情况下的token", example = "KsKmCCY19")
|
@ApiModelProperty(value="SSL情况下的token", example = "KsKmCCY19")
|
||||||
private String token;
|
private String token;
|
||||||
|
|
||||||
|
@ApiModelProperty(value="使用哪个endpoint网络", example = "EXTERNAL")
|
||||||
|
private String useWhichEndpoint;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,70 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.config;
|
||||||
|
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||||
|
import io.swagger.annotations.ApiModel;
|
||||||
|
import io.swagger.annotations.ApiModelProperty;
|
||||||
|
|
||||||
|
import java.io.Serializable;
|
||||||
|
import java.util.Properties;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @author zengqiao
|
||||||
|
* @date 22/02/24
|
||||||
|
*/
|
||||||
|
@ApiModel(description = "ZK配置")
|
||||||
|
public class ZKConfig implements Serializable {
|
||||||
|
@ApiModelProperty(value="ZK的jmx配置")
|
||||||
|
private JmxConfig jmxConfig;
|
||||||
|
|
||||||
|
@ApiModelProperty(value="ZK是否开启secure", example = "false")
|
||||||
|
private Boolean openSecure = false;
|
||||||
|
|
||||||
|
@ApiModelProperty(value="ZK的Session超时时间", example = "15000")
|
||||||
|
private Integer sessionTimeoutUnitMs = 15000;
|
||||||
|
|
||||||
|
@ApiModelProperty(value="ZK的Request超时时间", example = "5000")
|
||||||
|
private Integer requestTimeoutUnitMs = 5000;
|
||||||
|
|
||||||
|
@ApiModelProperty(value="ZK的Request超时时间")
|
||||||
|
private Properties otherProps = new Properties();
|
||||||
|
|
||||||
|
public JmxConfig getJmxConfig() {
|
||||||
|
return jmxConfig == null? new JmxConfig(): jmxConfig;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setJmxConfig(JmxConfig jmxConfig) {
|
||||||
|
this.jmxConfig = jmxConfig;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Boolean getOpenSecure() {
|
||||||
|
return openSecure != null && openSecure;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setOpenSecure(Boolean openSecure) {
|
||||||
|
this.openSecure = openSecure;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Integer getSessionTimeoutUnitMs() {
|
||||||
|
return sessionTimeoutUnitMs == null? Constant.DEFAULT_SESSION_TIMEOUT_UNIT_MS: sessionTimeoutUnitMs;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setSessionTimeoutUnitMs(Integer sessionTimeoutUnitMs) {
|
||||||
|
this.sessionTimeoutUnitMs = sessionTimeoutUnitMs;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Integer getRequestTimeoutUnitMs() {
|
||||||
|
return requestTimeoutUnitMs == null? Constant.DEFAULT_REQUEST_TIMEOUT_UNIT_MS: requestTimeoutUnitMs;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setRequestTimeoutUnitMs(Integer requestTimeoutUnitMs) {
|
||||||
|
this.requestTimeoutUnitMs = requestTimeoutUnitMs;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Properties getOtherProps() {
|
||||||
|
return otherProps == null? new Properties() : otherProps;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setOtherProps(Properties otherProps) {
|
||||||
|
this.otherProps = otherProps;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,12 +1,12 @@
|
|||||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.config.metric;
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.config.metric;
|
||||||
|
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||||
import lombok.AllArgsConstructor;
|
import lombok.AllArgsConstructor;
|
||||||
import lombok.Data;
|
import lombok.Data;
|
||||||
import lombok.NoArgsConstructor;
|
import lombok.NoArgsConstructor;
|
||||||
|
|
||||||
@Data
|
@Data
|
||||||
@NoArgsConstructor
|
@NoArgsConstructor
|
||||||
@AllArgsConstructor
|
|
||||||
public class UserMetricConfig {
|
public class UserMetricConfig {
|
||||||
|
|
||||||
private int type;
|
private int type;
|
||||||
@@ -15,6 +15,22 @@ public class UserMetricConfig {
|
|||||||
|
|
||||||
private boolean set;
|
private boolean set;
|
||||||
|
|
||||||
|
private Integer rank;
|
||||||
|
|
||||||
|
public UserMetricConfig(int type, String metric, boolean set, Integer rank) {
|
||||||
|
this.type = type;
|
||||||
|
this.metric = metric;
|
||||||
|
this.set = set;
|
||||||
|
this.rank = rank;
|
||||||
|
}
|
||||||
|
|
||||||
|
public UserMetricConfig(int type, String metric, boolean set) {
|
||||||
|
this.type = type;
|
||||||
|
this.metric = metric;
|
||||||
|
this.set = set;
|
||||||
|
this.rank = null;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int hashCode(){
|
public int hashCode(){
|
||||||
return metric.hashCode() << 1 + type;
|
return metric.hashCode() << 1 + type;
|
||||||
|
|||||||
@@ -0,0 +1,74 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.group;
|
||||||
|
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupStateEnum;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupTypeEnum;
|
||||||
|
import lombok.AllArgsConstructor;
|
||||||
|
import lombok.Data;
|
||||||
|
import lombok.NoArgsConstructor;
|
||||||
|
import org.apache.kafka.clients.admin.ConsumerGroupDescription;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @author wyb
|
||||||
|
* @date 2022/10/10
|
||||||
|
*/
|
||||||
|
@Data
|
||||||
|
@NoArgsConstructor
|
||||||
|
@AllArgsConstructor
|
||||||
|
public class Group {
|
||||||
|
/**
|
||||||
|
* 集群id
|
||||||
|
*/
|
||||||
|
private Long clusterPhyId;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* group类型
|
||||||
|
* @see GroupTypeEnum
|
||||||
|
*/
|
||||||
|
private GroupTypeEnum type;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* group名称
|
||||||
|
*/
|
||||||
|
private String name;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* group状态
|
||||||
|
* @see GroupStateEnum
|
||||||
|
*/
|
||||||
|
private GroupStateEnum state;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* group成员数量
|
||||||
|
*/
|
||||||
|
private Integer memberCount;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* group消费的topic列表
|
||||||
|
*/
|
||||||
|
private List<GroupTopicMember> topicMembers;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* group分配策略
|
||||||
|
*/
|
||||||
|
private String partitionAssignor;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* group协调器brokerId
|
||||||
|
*/
|
||||||
|
private int coordinatorId;
|
||||||
|
|
||||||
|
public Group(Long clusterPhyId, String groupName, ConsumerGroupDescription groupDescription) {
|
||||||
|
this.clusterPhyId = clusterPhyId;
|
||||||
|
this.type = groupDescription.isSimpleConsumerGroup()? GroupTypeEnum.CONSUMER: GroupTypeEnum.CONNECTOR;
|
||||||
|
this.name = groupName;
|
||||||
|
this.state = GroupStateEnum.getByRawState(groupDescription.state());
|
||||||
|
this.memberCount = groupDescription.members() == null? 0: groupDescription.members().size();
|
||||||
|
this.topicMembers = new ArrayList<>();
|
||||||
|
this.partitionAssignor = groupDescription.partitionAssignor();
|
||||||
|
this.coordinatorId = groupDescription.coordinator() == null? Constant.INVALID_CODE: groupDescription.coordinator().id();
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.group;
|
||||||
|
|
||||||
|
import lombok.Data;
|
||||||
|
import lombok.NoArgsConstructor;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @author wyb
|
||||||
|
* @date 2022/10/10
|
||||||
|
*/
|
||||||
|
@Data
|
||||||
|
@NoArgsConstructor
|
||||||
|
public class GroupTopicMember {
|
||||||
|
/**
|
||||||
|
* Topic名称
|
||||||
|
*/
|
||||||
|
private String topicName;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 消费此Topic的成员数量
|
||||||
|
*/
|
||||||
|
private Integer memberCount;
|
||||||
|
|
||||||
|
public GroupTopicMember(String topicName, Integer memberCount) {
|
||||||
|
this.topicName = topicName;
|
||||||
|
this.memberCount = memberCount;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,28 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.metrics;
|
||||||
|
|
||||||
|
import lombok.Data;
|
||||||
|
import lombok.ToString;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @author zengqiao
|
||||||
|
* @date 20/6/17
|
||||||
|
*/
|
||||||
|
@Data
|
||||||
|
@ToString
|
||||||
|
public class ZookeeperMetrics extends BaseMetrics {
|
||||||
|
public ZookeeperMetrics(Long clusterPhyId) {
|
||||||
|
super(clusterPhyId);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static ZookeeperMetrics initWithMetric(Long clusterPhyId, String metric, Float value) {
|
||||||
|
ZookeeperMetrics metrics = new ZookeeperMetrics(clusterPhyId);
|
||||||
|
metrics.setClusterPhyId( clusterPhyId );
|
||||||
|
metrics.putMetric(metric, value);
|
||||||
|
return metrics;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String unique() {
|
||||||
|
return "ZK@" + clusterPhyId;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,47 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.param.metric;
|
||||||
|
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.ZKConfig;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
|
||||||
|
import lombok.Data;
|
||||||
|
import lombok.NoArgsConstructor;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @author didi
|
||||||
|
*/
|
||||||
|
@Data
|
||||||
|
@NoArgsConstructor
|
||||||
|
public class ZookeeperMetricParam extends MetricParam {
|
||||||
|
private Long clusterPhyId;
|
||||||
|
|
||||||
|
private List<Tuple<String, Integer>> zkAddressList;
|
||||||
|
|
||||||
|
private ZKConfig zkConfig;
|
||||||
|
|
||||||
|
private String metricName;
|
||||||
|
|
||||||
|
private Integer kafkaControllerId;
|
||||||
|
|
||||||
|
public ZookeeperMetricParam(Long clusterPhyId,
|
||||||
|
List<Tuple<String, Integer>> zkAddressList,
|
||||||
|
ZKConfig zkConfig,
|
||||||
|
String metricName) {
|
||||||
|
this.clusterPhyId = clusterPhyId;
|
||||||
|
this.zkAddressList = zkAddressList;
|
||||||
|
this.zkConfig = zkConfig;
|
||||||
|
this.metricName = metricName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public ZookeeperMetricParam(Long clusterPhyId,
|
||||||
|
List<Tuple<String, Integer>> zkAddressList,
|
||||||
|
ZKConfig zkConfig,
|
||||||
|
Integer kafkaControllerId,
|
||||||
|
String metricName) {
|
||||||
|
this.clusterPhyId = clusterPhyId;
|
||||||
|
this.zkAddressList = zkAddressList;
|
||||||
|
this.zkConfig = zkConfig;
|
||||||
|
this.kafkaControllerId = kafkaControllerId;
|
||||||
|
this.metricName = metricName;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.param.partition;
|
||||||
|
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam;
|
||||||
|
import lombok.Data;
|
||||||
|
import lombok.NoArgsConstructor;
|
||||||
|
import org.apache.kafka.common.TopicPartition;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
@Data
|
||||||
|
@NoArgsConstructor
|
||||||
|
public class BatchPartitionParam extends ClusterPhyParam {
|
||||||
|
private List<TopicPartition> tpList;
|
||||||
|
|
||||||
|
public BatchPartitionParam(Long clusterPhyId, List<TopicPartition> tpList) {
|
||||||
|
super(clusterPhyId);
|
||||||
|
this.tpList = tpList;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.param.partition;
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.param.partition;
|
||||||
|
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam;
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicParam;
|
||||||
import lombok.Data;
|
import lombok.Data;
|
||||||
import lombok.NoArgsConstructor;
|
import lombok.NoArgsConstructor;
|
||||||
import org.apache.kafka.clients.admin.OffsetSpec;
|
import org.apache.kafka.clients.admin.OffsetSpec;
|
||||||
@@ -10,13 +10,13 @@ import java.util.Map;
|
|||||||
|
|
||||||
@Data
|
@Data
|
||||||
@NoArgsConstructor
|
@NoArgsConstructor
|
||||||
public class PartitionOffsetParam extends ClusterPhyParam {
|
public class PartitionOffsetParam extends TopicParam {
|
||||||
private Map<TopicPartition, OffsetSpec> topicPartitionOffsets;
|
private Map<TopicPartition, OffsetSpec> topicPartitionOffsets;
|
||||||
|
|
||||||
private Long timestamp;
|
private Long timestamp;
|
||||||
|
|
||||||
public PartitionOffsetParam(Long clusterPhyId, Map<TopicPartition, OffsetSpec> topicPartitionOffsets, Long timestamp) {
|
public PartitionOffsetParam(Long clusterPhyId, String topicName, Map<TopicPartition, OffsetSpec> topicPartitionOffsets, Long timestamp) {
|
||||||
super(clusterPhyId);
|
super(clusterPhyId, topicName);
|
||||||
this.topicPartitionOffsets = topicPartitionOffsets;
|
this.topicPartitionOffsets = topicPartitionOffsets;
|
||||||
this.timestamp = timestamp;
|
this.timestamp = timestamp;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,4 +15,12 @@ public class TopicParam extends ClusterPhyParam {
|
|||||||
super(clusterPhyId);
|
super(clusterPhyId);
|
||||||
this.topicName = topicName;
|
this.topicName = topicName;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "TopicParam{" +
|
||||||
|
"clusterPhyId=" + clusterPhyId +
|
||||||
|
", topicName='" + topicName + '\'' +
|
||||||
|
'}';
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -56,6 +56,7 @@ public enum ResultStatus {
|
|||||||
KAFKA_OPERATE_FAILED(8010, "Kafka操作失败"),
|
KAFKA_OPERATE_FAILED(8010, "Kafka操作失败"),
|
||||||
MYSQL_OPERATE_FAILED(8020, "MySQL操作失败"),
|
MYSQL_OPERATE_FAILED(8020, "MySQL操作失败"),
|
||||||
ZK_OPERATE_FAILED(8030, "ZK操作失败"),
|
ZK_OPERATE_FAILED(8030, "ZK操作失败"),
|
||||||
|
ZK_FOUR_LETTER_CMD_FORBIDDEN(8031, "ZK四字命令被禁止"),
|
||||||
ES_OPERATE_ERROR(8040, "ES操作失败"),
|
ES_OPERATE_ERROR(8040, "ES操作失败"),
|
||||||
HTTP_REQ_ERROR(8050, "第三方http请求异常"),
|
HTTP_REQ_ERROR(8050, "第三方http请求异常"),
|
||||||
|
|
||||||
|
|||||||
@@ -23,6 +23,8 @@ public class VersionMetricControlItem extends VersionControlItem{
|
|||||||
public static final String CATEGORY_PERFORMANCE = "Performance";
|
public static final String CATEGORY_PERFORMANCE = "Performance";
|
||||||
public static final String CATEGORY_FLOW = "Flow";
|
public static final String CATEGORY_FLOW = "Flow";
|
||||||
|
|
||||||
|
public static final String CATEGORY_CLIENT = "Client";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 指标单位名称,非指标的没有
|
* 指标单位名称,非指标的没有
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -0,0 +1,19 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper;
|
||||||
|
|
||||||
|
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
|
||||||
|
import io.swagger.annotations.ApiModelProperty;
|
||||||
|
import lombok.Data;
|
||||||
|
import org.apache.zookeeper.data.Stat;
|
||||||
|
|
||||||
|
@Data
|
||||||
|
public class Znode {
|
||||||
|
@ApiModelProperty(value = "节点名称", example = "broker")
|
||||||
|
private String name;
|
||||||
|
|
||||||
|
@ApiModelProperty(value = "节点数据", example = "saassad")
|
||||||
|
private String data;
|
||||||
|
|
||||||
|
@ApiModelProperty(value = "节点属性", example = "")
|
||||||
|
private Stat stat;
|
||||||
|
}
|
||||||
@@ -0,0 +1,42 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper;
|
||||||
|
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.BaseEntity;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||||
|
import lombok.Data;
|
||||||
|
|
||||||
|
@Data
|
||||||
|
public class ZookeeperInfo extends BaseEntity {
|
||||||
|
/**
|
||||||
|
* 集群Id
|
||||||
|
*/
|
||||||
|
private Long clusterPhyId;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 主机
|
||||||
|
*/
|
||||||
|
private String host;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 端口
|
||||||
|
*/
|
||||||
|
private Integer port;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 角色
|
||||||
|
*/
|
||||||
|
private String role;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 版本
|
||||||
|
*/
|
||||||
|
private String version;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ZK状态
|
||||||
|
*/
|
||||||
|
private Integer status;
|
||||||
|
|
||||||
|
public boolean alive() {
|
||||||
|
return !(Constant.DOWN.equals(status));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.fourletterword;
|
||||||
|
|
||||||
|
import java.io.Serializable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 四字命令结果数据的基础类
|
||||||
|
*/
|
||||||
|
public class BaseFourLetterWordCmdData implements Serializable {
|
||||||
|
}
|
||||||
@@ -0,0 +1,38 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.fourletterword;
|
||||||
|
|
||||||
|
import lombok.Data;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* clientPort=2183
|
||||||
|
* dataDir=/data1/data/zkData2/version-2
|
||||||
|
* dataLogDir=/data1/data/zkLog2/version-2
|
||||||
|
* tickTime=2000
|
||||||
|
* maxClientCnxns=60
|
||||||
|
* minSessionTimeout=4000
|
||||||
|
* maxSessionTimeout=40000
|
||||||
|
* serverId=2
|
||||||
|
* initLimit=15
|
||||||
|
* syncLimit=10
|
||||||
|
* electionAlg=3
|
||||||
|
* electionPort=4445
|
||||||
|
* quorumPort=4444
|
||||||
|
* peerType=0
|
||||||
|
*/
|
||||||
|
@Data
|
||||||
|
public class ConfigCmdData extends BaseFourLetterWordCmdData {
|
||||||
|
private Long clientPort;
|
||||||
|
private String dataDir;
|
||||||
|
private String dataLogDir;
|
||||||
|
private Long tickTime;
|
||||||
|
private Long maxClientCnxns;
|
||||||
|
private Long minSessionTimeout;
|
||||||
|
private Long maxSessionTimeout;
|
||||||
|
private Integer serverId;
|
||||||
|
private String initLimit;
|
||||||
|
private Long syncLimit;
|
||||||
|
private Long electionAlg;
|
||||||
|
private Long electionPort;
|
||||||
|
private Long quorumPort;
|
||||||
|
private Long peerType;
|
||||||
|
}
|
||||||
@@ -0,0 +1,39 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.fourletterword;
|
||||||
|
|
||||||
|
import lombok.Data;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* zk_version 3.4.6-1569965, built on 02/20/2014 09:09 GMT
|
||||||
|
* zk_avg_latency 0
|
||||||
|
* zk_max_latency 399
|
||||||
|
* zk_min_latency 0
|
||||||
|
* zk_packets_received 234857
|
||||||
|
* zk_packets_sent 234860
|
||||||
|
* zk_num_alive_connections 4
|
||||||
|
* zk_outstanding_requests 0
|
||||||
|
* zk_server_state follower
|
||||||
|
* zk_znode_count 35566
|
||||||
|
* zk_watch_count 39
|
||||||
|
* zk_ephemerals_count 10
|
||||||
|
* zk_approximate_data_size 3356708
|
||||||
|
* zk_open_file_descriptor_count 35
|
||||||
|
* zk_max_file_descriptor_count 819200
|
||||||
|
*/
|
||||||
|
@Data
|
||||||
|
public class MonitorCmdData extends BaseFourLetterWordCmdData {
|
||||||
|
private String zkVersion;
|
||||||
|
private Float zkAvgLatency;
|
||||||
|
private Long zkMaxLatency;
|
||||||
|
private Long zkMinLatency;
|
||||||
|
private Long zkPacketsReceived;
|
||||||
|
private Long zkPacketsSent;
|
||||||
|
private Long zkNumAliveConnections;
|
||||||
|
private Long zkOutstandingRequests;
|
||||||
|
private String zkServerState;
|
||||||
|
private Long zkZnodeCount;
|
||||||
|
private Long zkWatchCount;
|
||||||
|
private Long zkEphemeralsCount;
|
||||||
|
private Long zkApproximateDataSize;
|
||||||
|
private Long zkOpenFileDescriptorCount;
|
||||||
|
private Long zkMaxFileDescriptorCount;
|
||||||
|
}
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.fourletterword;
|
||||||
|
|
||||||
|
import lombok.Data;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Zookeeper version: 3.5.9-83df9301aa5c2a5d284a9940177808c01bc35cef, built on 01/06/2021 19:49 GMT
|
||||||
|
* Latency min/avg/max: 0/0/2209
|
||||||
|
* Received: 278202469
|
||||||
|
* Sent: 279449055
|
||||||
|
* Connections: 31
|
||||||
|
* Outstanding: 0
|
||||||
|
* Zxid: 0x20033fc12
|
||||||
|
* Mode: leader
|
||||||
|
* Node count: 10084
|
||||||
|
* Proposal sizes last/min/max: 36/32/31260 leader特有
|
||||||
|
*/
|
||||||
|
@Data
|
||||||
|
public class ServerCmdData extends BaseFourLetterWordCmdData {
|
||||||
|
private String zkVersion;
|
||||||
|
private Float zkAvgLatency;
|
||||||
|
private Long zkMaxLatency;
|
||||||
|
private Long zkMinLatency;
|
||||||
|
private Long zkPacketsReceived;
|
||||||
|
private Long zkPacketsSent;
|
||||||
|
private Long zkNumAliveConnections;
|
||||||
|
private Long zkOutstandingRequests;
|
||||||
|
private String zkServerState;
|
||||||
|
private Long zkZnodeCount;
|
||||||
|
private Long zkZxid;
|
||||||
|
}
|
||||||
@@ -0,0 +1,116 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.fourletterword.parser;
|
||||||
|
|
||||||
|
import com.didiglobal.logi.log.ILog;
|
||||||
|
import com.didiglobal.logi.log.LogFactory;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.fourletterword.ConfigCmdData;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.zookeeper.FourLetterWordUtil;
|
||||||
|
import lombok.Data;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* clientPort=2183
|
||||||
|
* dataDir=/data1/data/zkData2/version-2
|
||||||
|
* dataLogDir=/data1/data/zkLog2/version-2
|
||||||
|
* tickTime=2000
|
||||||
|
* maxClientCnxns=60
|
||||||
|
* minSessionTimeout=4000
|
||||||
|
* maxSessionTimeout=40000
|
||||||
|
* serverId=2
|
||||||
|
* initLimit=15
|
||||||
|
* syncLimit=10
|
||||||
|
* electionAlg=3
|
||||||
|
* electionPort=4445
|
||||||
|
* quorumPort=4444
|
||||||
|
* peerType=0
|
||||||
|
*/
|
||||||
|
@Data
|
||||||
|
public class ConfigCmdDataParser implements FourLetterWordDataParser<ConfigCmdData> {
|
||||||
|
private static final ILog LOGGER = LogFactory.getLog(ConfigCmdDataParser.class);
|
||||||
|
|
||||||
|
private Result<ConfigCmdData> dataResult = null;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getCmd() {
|
||||||
|
return FourLetterWordUtil.ConfigCmd;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ConfigCmdData parseAndInitData(Long clusterPhyId, String host, int port, String cmdData) {
|
||||||
|
Map<String, String> dataMap = new HashMap<>();
|
||||||
|
for (String elem : cmdData.split("\n")) {
|
||||||
|
if (elem.isEmpty()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
int idx = elem.indexOf('=');
|
||||||
|
if (idx >= 0) {
|
||||||
|
dataMap.put(elem.substring(0, idx), elem.substring(idx + 1).trim());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ConfigCmdData configCmdData = new ConfigCmdData();
|
||||||
|
dataMap.entrySet().stream().forEach(elem -> {
|
||||||
|
try {
|
||||||
|
switch (elem.getKey()) {
|
||||||
|
case "clientPort":
|
||||||
|
configCmdData.setClientPort(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "dataDir":
|
||||||
|
configCmdData.setDataDir(elem.getValue());
|
||||||
|
break;
|
||||||
|
case "dataLogDir":
|
||||||
|
configCmdData.setDataLogDir(elem.getValue());
|
||||||
|
break;
|
||||||
|
case "tickTime":
|
||||||
|
configCmdData.setTickTime(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "maxClientCnxns":
|
||||||
|
configCmdData.setMaxClientCnxns(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "minSessionTimeout":
|
||||||
|
configCmdData.setMinSessionTimeout(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "maxSessionTimeout":
|
||||||
|
configCmdData.setMaxSessionTimeout(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "serverId":
|
||||||
|
configCmdData.setServerId(Integer.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "initLimit":
|
||||||
|
configCmdData.setInitLimit(elem.getValue());
|
||||||
|
break;
|
||||||
|
case "syncLimit":
|
||||||
|
configCmdData.setSyncLimit(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "electionAlg":
|
||||||
|
configCmdData.setElectionAlg(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "electionPort":
|
||||||
|
configCmdData.setElectionPort(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "quorumPort":
|
||||||
|
configCmdData.setQuorumPort(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "peerType":
|
||||||
|
configCmdData.setPeerType(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
LOGGER.warn(
|
||||||
|
"class=ConfigCmdDataParser||method=parseAndInitData||name={}||value={}||msg=data not parsed!",
|
||||||
|
elem.getKey(), elem.getValue()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOGGER.error(
|
||||||
|
"class=ConfigCmdDataParser||method=parseAndInitData||clusterPhyId={}||host={}||port={}||name={}||value={}||errMsg=exception!",
|
||||||
|
clusterPhyId, host, port, elem.getKey(), elem.getValue(), e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return configCmdData;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.fourletterword.parser;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 四字命令结果解析类
|
||||||
|
*/
|
||||||
|
public interface FourLetterWordDataParser<T> {
|
||||||
|
String getCmd();
|
||||||
|
|
||||||
|
T parseAndInitData(Long clusterPhyId, String host, int port, String cmdData);
|
||||||
|
}
|
||||||
@@ -0,0 +1,117 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.fourletterword.parser;
|
||||||
|
|
||||||
|
import com.didiglobal.logi.log.ILog;
|
||||||
|
import com.didiglobal.logi.log.LogFactory;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.fourletterword.MonitorCmdData;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.zookeeper.FourLetterWordUtil;
|
||||||
|
import lombok.Data;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* zk_version 3.4.6-1569965, built on 02/20/2014 09:09 GMT
|
||||||
|
* zk_avg_latency 0
|
||||||
|
* zk_max_latency 399
|
||||||
|
* zk_min_latency 0
|
||||||
|
* zk_packets_received 234857
|
||||||
|
* zk_packets_sent 234860
|
||||||
|
* zk_num_alive_connections 4
|
||||||
|
* zk_outstanding_requests 0
|
||||||
|
* zk_server_state follower
|
||||||
|
* zk_znode_count 35566
|
||||||
|
* zk_watch_count 39
|
||||||
|
* zk_ephemerals_count 10
|
||||||
|
* zk_approximate_data_size 3356708
|
||||||
|
* zk_open_file_descriptor_count 35
|
||||||
|
* zk_max_file_descriptor_count 819200
|
||||||
|
*/
|
||||||
|
@Data
|
||||||
|
public class MonitorCmdDataParser implements FourLetterWordDataParser<MonitorCmdData> {
|
||||||
|
private static final ILog LOGGER = LogFactory.getLog(MonitorCmdDataParser.class);
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getCmd() {
|
||||||
|
return FourLetterWordUtil.MonitorCmd;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public MonitorCmdData parseAndInitData(Long clusterPhyId, String host, int port, String cmdData) {
|
||||||
|
Map<String, String> dataMap = new HashMap<>();
|
||||||
|
for (String elem : cmdData.split("\n")) {
|
||||||
|
if (elem.isEmpty()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
int idx = elem.indexOf('\t');
|
||||||
|
if (idx >= 0) {
|
||||||
|
dataMap.put(elem.substring(0, idx), elem.substring(idx + 1).trim());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MonitorCmdData monitorCmdData = new MonitorCmdData();
|
||||||
|
dataMap.entrySet().stream().forEach(elem -> {
|
||||||
|
try {
|
||||||
|
switch (elem.getKey()) {
|
||||||
|
case "zk_version":
|
||||||
|
monitorCmdData.setZkVersion(elem.getValue().split("-")[0]);
|
||||||
|
break;
|
||||||
|
case "zk_avg_latency":
|
||||||
|
monitorCmdData.setZkAvgLatency(Float.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "zk_max_latency":
|
||||||
|
monitorCmdData.setZkMaxLatency(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "zk_min_latency":
|
||||||
|
monitorCmdData.setZkMinLatency(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "zk_packets_received":
|
||||||
|
monitorCmdData.setZkPacketsReceived(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "zk_packets_sent":
|
||||||
|
monitorCmdData.setZkPacketsSent(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "zk_num_alive_connections":
|
||||||
|
monitorCmdData.setZkNumAliveConnections(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "zk_outstanding_requests":
|
||||||
|
monitorCmdData.setZkOutstandingRequests(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "zk_server_state":
|
||||||
|
monitorCmdData.setZkServerState(elem.getValue());
|
||||||
|
break;
|
||||||
|
case "zk_znode_count":
|
||||||
|
monitorCmdData.setZkZnodeCount(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "zk_watch_count":
|
||||||
|
monitorCmdData.setZkWatchCount(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "zk_ephemerals_count":
|
||||||
|
monitorCmdData.setZkEphemeralsCount(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "zk_approximate_data_size":
|
||||||
|
monitorCmdData.setZkApproximateDataSize(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "zk_open_file_descriptor_count":
|
||||||
|
monitorCmdData.setZkOpenFileDescriptorCount(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "zk_max_file_descriptor_count":
|
||||||
|
monitorCmdData.setZkMaxFileDescriptorCount(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
LOGGER.warn(
|
||||||
|
"class=MonitorCmdDataParser||method=parseAndInitData||name={}||value={}||msg=data not parsed!",
|
||||||
|
elem.getKey(), elem.getValue()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOGGER.error(
|
||||||
|
"class=MonitorCmdDataParser||method=parseAndInitData||clusterPhyId={}||host={}||port={}||name={}||value={}||errMsg=exception!",
|
||||||
|
clusterPhyId, host, port, elem.getKey(), elem.getValue(), e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return monitorCmdData;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,97 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.fourletterword.parser;
|
||||||
|
|
||||||
|
import com.didiglobal.logi.log.ILog;
|
||||||
|
import com.didiglobal.logi.log.LogFactory;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.fourletterword.ServerCmdData;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.utils.zookeeper.FourLetterWordUtil;
|
||||||
|
import lombok.Data;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Zookeeper version: 3.5.9-83df9301aa5c2a5d284a9940177808c01bc35cef, built on 01/06/2021 19:49 GMT
|
||||||
|
* Latency min/avg/max: 0/0/2209
|
||||||
|
* Received: 278202469
|
||||||
|
* Sent: 279449055
|
||||||
|
* Connections: 31
|
||||||
|
* Outstanding: 0
|
||||||
|
* Zxid: 0x20033fc12
|
||||||
|
* Mode: leader
|
||||||
|
* Node count: 10084
|
||||||
|
* Proposal sizes last/min/max: 36/32/31260 leader特有
|
||||||
|
*/
|
||||||
|
@Data
|
||||||
|
public class ServerCmdDataParser implements FourLetterWordDataParser<ServerCmdData> {
|
||||||
|
private static final ILog LOGGER = LogFactory.getLog(ServerCmdDataParser.class);
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getCmd() {
|
||||||
|
return FourLetterWordUtil.ServerCmd;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ServerCmdData parseAndInitData(Long clusterPhyId, String host, int port, String cmdData) {
|
||||||
|
Map<String, String> dataMap = new HashMap<>();
|
||||||
|
for (String elem : cmdData.split("\n")) {
|
||||||
|
if (elem.isEmpty()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
int idx = elem.indexOf(':');
|
||||||
|
if (idx >= 0) {
|
||||||
|
dataMap.put(elem.substring(0, idx), elem.substring(idx + 1).trim());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ServerCmdData serverCmdData = new ServerCmdData();
|
||||||
|
dataMap.entrySet().stream().forEach(elem -> {
|
||||||
|
try {
|
||||||
|
switch (elem.getKey()) {
|
||||||
|
case "Zookeeper version":
|
||||||
|
serverCmdData.setZkVersion(elem.getValue().split("-")[0]);
|
||||||
|
break;
|
||||||
|
case "Latency min/avg/max":
|
||||||
|
String[] data = elem.getValue().split("/");
|
||||||
|
serverCmdData.setZkMinLatency(Long.valueOf(data[0]));
|
||||||
|
serverCmdData.setZkAvgLatency(Float.valueOf(data[1]));
|
||||||
|
serverCmdData.setZkMaxLatency(Long.valueOf(data[2]));
|
||||||
|
break;
|
||||||
|
case "Received":
|
||||||
|
serverCmdData.setZkPacketsReceived(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "Sent":
|
||||||
|
serverCmdData.setZkPacketsSent(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "Connections":
|
||||||
|
serverCmdData.setZkNumAliveConnections(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "Outstanding":
|
||||||
|
serverCmdData.setZkOutstandingRequests(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "Mode":
|
||||||
|
serverCmdData.setZkServerState(elem.getValue());
|
||||||
|
break;
|
||||||
|
case "Node count":
|
||||||
|
serverCmdData.setZkZnodeCount(Long.valueOf(elem.getValue()));
|
||||||
|
break;
|
||||||
|
case "Zxid":
|
||||||
|
serverCmdData.setZkZxid(Long.parseUnsignedLong(elem.getValue().trim().substring(2), 16));
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
LOGGER.warn(
|
||||||
|
"class=ServerCmdDataParser||method=parseAndInitData||name={}||value={}||msg=data not parsed!",
|
||||||
|
elem.getKey(), elem.getValue()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOGGER.error(
|
||||||
|
"class=ServerCmdDataParser||method=parseAndInitData||clusterPhyId={}||host={}||port={}||name={}||value={}||errMsg=exception!",
|
||||||
|
clusterPhyId, host, port, elem.getKey(), elem.getValue(), e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return serverCmdData;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,15 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.event.cluster;
|
||||||
|
|
||||||
|
import lombok.Getter;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 集群新增事件
|
||||||
|
* @author zengqiao
|
||||||
|
* @date 22/02/25
|
||||||
|
*/
|
||||||
|
@Getter
|
||||||
|
public class ClusterPhyAddedEvent extends ClusterPhyBaseEvent {
|
||||||
|
public ClusterPhyAddedEvent(Object source, Long clusterPhyId) {
|
||||||
|
super(source, clusterPhyId);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
package com.xiaojukeji.know.streaming.km.common.bean.event.kafka.zk;
|
|
||||||
|
|
||||||
import lombok.Getter;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
public abstract class BaseKafkaZKEvent {
|
|
||||||
/**
|
|
||||||
* 触发时间
|
|
||||||
*/
|
|
||||||
protected Long eventTime;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 初始化数据的事件
|
|
||||||
*/
|
|
||||||
protected Boolean initEvent;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 集群ID
|
|
||||||
*/
|
|
||||||
protected Long clusterPhyId;
|
|
||||||
|
|
||||||
protected BaseKafkaZKEvent(Long eventTime, Long clusterPhyId) {
|
|
||||||
this.eventTime = eventTime;
|
|
||||||
this.clusterPhyId = clusterPhyId;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
package com.xiaojukeji.know.streaming.km.common.bean.event.kafka.zk;
|
|
||||||
|
|
||||||
import lombok.Getter;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
public class ControllerChangeEvent extends BaseKafkaZKEvent {
|
|
||||||
public ControllerChangeEvent(Long eventTime, Long clusterPhyId) {
|
|
||||||
super(eventTime, clusterPhyId);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -8,8 +8,6 @@ import org.springframework.context.ApplicationEvent;
|
|||||||
*/
|
*/
|
||||||
@Getter
|
@Getter
|
||||||
public class BaseMetricEvent extends ApplicationEvent {
|
public class BaseMetricEvent extends ApplicationEvent {
|
||||||
|
|
||||||
|
|
||||||
public BaseMetricEvent(Object source) {
|
public BaseMetricEvent(Object source) {
|
||||||
super( source );
|
super( source );
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,20 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.event.metric;
|
||||||
|
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ZookeeperMetrics;
|
||||||
|
import lombok.Getter;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @author didi
|
||||||
|
*/
|
||||||
|
@Getter
|
||||||
|
public class ZookeeperMetricEvent extends BaseMetricEvent {
|
||||||
|
|
||||||
|
private List<ZookeeperMetrics> zookeeperMetrics;
|
||||||
|
|
||||||
|
public ZookeeperMetricEvent(Object source, List<ZookeeperMetrics> zookeeperMetrics) {
|
||||||
|
super( source );
|
||||||
|
this.zookeeperMetrics = zookeeperMetrics;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -42,4 +42,9 @@ public class BrokerPO extends BasePO {
|
|||||||
* Broker状态
|
* Broker状态
|
||||||
*/
|
*/
|
||||||
private Integer status;
|
private Integer status;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 监听信息
|
||||||
|
*/
|
||||||
|
private String endpointMap;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,6 +41,11 @@ public class ClusterPhyPO extends BasePO {
|
|||||||
*/
|
*/
|
||||||
private String jmxProperties;
|
private String jmxProperties;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* zk配置
|
||||||
|
*/
|
||||||
|
private String zkProperties;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 认证类型
|
* 认证类型
|
||||||
* @see com.xiaojukeji.know.streaming.km.common.enums.cluster.ClusterAuthTypeEnum
|
* @see com.xiaojukeji.know.streaming.km.common.enums.cluster.ClusterAuthTypeEnum
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package com.xiaojukeji.know.streaming.km.common.bean.po.group;
|
|||||||
import com.baomidou.mybatisplus.annotation.TableName;
|
import com.baomidou.mybatisplus.annotation.TableName;
|
||||||
import com.xiaojukeji.know.streaming.km.common.bean.po.BasePO;
|
import com.xiaojukeji.know.streaming.km.common.bean.po.BasePO;
|
||||||
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||||
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupStateEnum;
|
|
||||||
import lombok.Data;
|
import lombok.Data;
|
||||||
import lombok.NoArgsConstructor;
|
import lombok.NoArgsConstructor;
|
||||||
|
|
||||||
@@ -23,12 +22,19 @@ public class GroupMemberPO extends BasePO {
|
|||||||
|
|
||||||
private Integer memberCount;
|
private Integer memberCount;
|
||||||
|
|
||||||
public GroupMemberPO(Long clusterPhyId, String topicName, String groupName, Date updateTime) {
|
public GroupMemberPO(Long clusterPhyId, String topicName, String groupName, String state, Integer memberCount) {
|
||||||
this.clusterPhyId = clusterPhyId;
|
this.clusterPhyId = clusterPhyId;
|
||||||
this.topicName = topicName;
|
this.topicName = topicName;
|
||||||
this.groupName = groupName;
|
this.groupName = groupName;
|
||||||
this.state = GroupStateEnum.UNKNOWN.getState();
|
this.state = state;
|
||||||
this.memberCount = 0;
|
this.memberCount = memberCount;
|
||||||
|
}
|
||||||
|
public GroupMemberPO(Long clusterPhyId, String topicName, String groupName, String state, Integer memberCount, Date updateTime) {
|
||||||
|
this.clusterPhyId = clusterPhyId;
|
||||||
|
this.topicName = topicName;
|
||||||
|
this.groupName = groupName;
|
||||||
|
this.state = state;
|
||||||
|
this.memberCount = memberCount;
|
||||||
this.updateTime = updateTime;
|
this.updateTime = updateTime;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -0,0 +1,61 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.po.group;
|
||||||
|
|
||||||
|
|
||||||
|
import com.baomidou.mybatisplus.annotation.TableName;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.po.BasePO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupStateEnum;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupTypeEnum;
|
||||||
|
import lombok.Data;
|
||||||
|
import lombok.NoArgsConstructor;
|
||||||
|
|
||||||
|
|
||||||
|
@Data
|
||||||
|
@NoArgsConstructor
|
||||||
|
@TableName(Constant.MYSQL_TABLE_NAME_PREFIX + "group")
|
||||||
|
public class GroupPO extends BasePO {
|
||||||
|
/**
|
||||||
|
* 集群id
|
||||||
|
*/
|
||||||
|
private Long clusterPhyId;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* group类型
|
||||||
|
*
|
||||||
|
* @see GroupTypeEnum
|
||||||
|
*/
|
||||||
|
private Integer type;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* group名称
|
||||||
|
*/
|
||||||
|
private String name;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* group状态
|
||||||
|
*
|
||||||
|
* @see GroupStateEnum
|
||||||
|
*/
|
||||||
|
private String state;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* group成员数量
|
||||||
|
*/
|
||||||
|
private Integer memberCount;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* group消费的topic列表
|
||||||
|
*/
|
||||||
|
private String topicMembers;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* group分配策略
|
||||||
|
*/
|
||||||
|
private String partitionAssignor;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* group协调器brokerId
|
||||||
|
*/
|
||||||
|
private int coordinatorId;
|
||||||
|
|
||||||
|
}
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.po.metrice;
|
||||||
|
|
||||||
|
import lombok.Data;
|
||||||
|
import lombok.NoArgsConstructor;
|
||||||
|
|
||||||
|
import static com.xiaojukeji.know.streaming.km.common.utils.CommonUtils.monitorTimestamp2min;
|
||||||
|
|
||||||
|
@Data
|
||||||
|
@NoArgsConstructor
|
||||||
|
public class ZookeeperMetricPO extends BaseMetricESPO {
|
||||||
|
public ZookeeperMetricPO(Long clusterPhyId){
|
||||||
|
super(clusterPhyId);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getKey() {
|
||||||
|
return "ZK@" + clusterPhyId + "@" + monitorTimestamp2min(timestamp);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getRoutingValue() {
|
||||||
|
return String.valueOf(clusterPhyId);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,40 @@
|
|||||||
|
package com.xiaojukeji.know.streaming.km.common.bean.po.zookeeper;
|
||||||
|
|
||||||
|
import com.baomidou.mybatisplus.annotation.TableName;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.bean.po.BasePO;
|
||||||
|
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||||
|
import lombok.Data;
|
||||||
|
|
||||||
|
@Data
|
||||||
|
@TableName(Constant.MYSQL_TABLE_NAME_PREFIX + "zookeeper")
|
||||||
|
public class ZookeeperInfoPO extends BasePO {
|
||||||
|
/**
|
||||||
|
* 集群Id
|
||||||
|
*/
|
||||||
|
private Long clusterPhyId;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 主机
|
||||||
|
*/
|
||||||
|
private String host;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 端口
|
||||||
|
*/
|
||||||
|
private Integer port;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 角色
|
||||||
|
*/
|
||||||
|
private String role;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 版本
|
||||||
|
*/
|
||||||
|
private String version;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ZK状态
|
||||||
|
*/
|
||||||
|
private Integer status;
|
||||||
|
}
|
||||||
@@ -31,9 +31,15 @@ public class ClusterPhyBaseVO extends BaseTimeVO {
|
|||||||
@ApiModelProperty(value="Jmx配置", example = "{}")
|
@ApiModelProperty(value="Jmx配置", example = "{}")
|
||||||
protected String jmxProperties;
|
protected String jmxProperties;
|
||||||
|
|
||||||
|
@ApiModelProperty(value="ZK配置", example = "{}")
|
||||||
|
protected String zkProperties;
|
||||||
|
|
||||||
@ApiModelProperty(value="描述", example = "测试")
|
@ApiModelProperty(value="描述", example = "测试")
|
||||||
protected String description;
|
protected String description;
|
||||||
|
|
||||||
@ApiModelProperty(value="集群的kafka版本", example = "2.5.1")
|
@ApiModelProperty(value="集群的kafka版本", example = "2.5.1")
|
||||||
protected String kafkaVersion;
|
protected String kafkaVersion;
|
||||||
|
|
||||||
|
@ApiModelProperty(value="集群的运行模式", example = "2:raft模式,其他是ZK模式")
|
||||||
|
private Integer runState;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -31,6 +31,9 @@ public class ClusterBrokersOverviewVO extends BrokerMetadataVO {
|
|||||||
@ApiModelProperty(value = "jmx端口")
|
@ApiModelProperty(value = "jmx端口")
|
||||||
private Integer jmxPort;
|
private Integer jmxPort;
|
||||||
|
|
||||||
|
@ApiModelProperty(value = "jmx连接状态 true:连接成功 false:连接失败")
|
||||||
|
private Boolean jmxConnected;
|
||||||
|
|
||||||
@ApiModelProperty(value = "是否存活 true:存活 false:不存活")
|
@ApiModelProperty(value = "是否存活 true:存活 false:不存活")
|
||||||
private Boolean alive;
|
private Boolean alive;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,4 +14,7 @@ import lombok.NoArgsConstructor;
|
|||||||
public class UserMetricConfigVO extends VersionItemVO {
|
public class UserMetricConfigVO extends VersionItemVO {
|
||||||
@ApiModelProperty(value = "该指标用户是否设置展现", example = "true")
|
@ApiModelProperty(value = "该指标用户是否设置展现", example = "true")
|
||||||
private Boolean set;
|
private Boolean set;
|
||||||
|
|
||||||
|
@ApiModelProperty(value = "该指标展示优先级", example = "1")
|
||||||
|
private Integer rank;
|
||||||
}
|
}
|
||||||
|
|||||||