mirror of
https://github.com/didi/KnowStreaming.git
synced 2025-12-24 20:22:12 +08:00
Compare commits
185 Commits
v3.2.0
...
ve_demo_3.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a87a0663ed | ||
|
|
2390ae8941 | ||
|
|
e2692a6fc4 | ||
|
|
c18eeb6d55 | ||
|
|
f5de9789f2 | ||
|
|
4ae34d0030 | ||
|
|
95bce89ce5 | ||
|
|
6853862753 | ||
|
|
610af4a9e8 | ||
|
|
49d3d078d3 | ||
|
|
ac4ea13be9 | ||
|
|
2339a6f0cd | ||
|
|
b6ea4aec19 | ||
|
|
8346453aa3 | ||
|
|
a9eb4ae30e | ||
|
|
cceff91f81 | ||
|
|
2744f5b6dd | ||
|
|
009ffeb099 | ||
|
|
e8e05812d0 | ||
|
|
58a421c4b9 | ||
|
|
af916d5a71 | ||
|
|
8b30f78744 | ||
|
|
592dee884a | ||
|
|
715744ca15 | ||
|
|
8a95401364 | ||
|
|
e80f8086d4 | ||
|
|
af82c2e615 | ||
|
|
1369e7b9eb | ||
|
|
ab6afe6dbc | ||
|
|
6e9dc4f807 | ||
|
|
a8be274ca6 | ||
|
|
e24a582067 | ||
|
|
251f7f7110 | ||
|
|
65f8beef32 | ||
|
|
38366809f1 | ||
|
|
530219a317 | ||
|
|
c07e544c50 | ||
|
|
c9308ee4f2 | ||
|
|
95158813b9 | ||
|
|
59e8a416b5 | ||
|
|
f6becbdf2c | ||
|
|
07bd00d60c | ||
|
|
1adfa639ac | ||
|
|
3f817991aa | ||
|
|
3b72f732be | ||
|
|
e2ad3afe3d | ||
|
|
ae04ffdd71 | ||
|
|
cf9d5b6832 | ||
|
|
9c418d3b38 | ||
|
|
128b180c83 | ||
|
|
b60941abc8 | ||
|
|
1a42472fd8 | ||
|
|
18e00f043e | ||
|
|
6385889902 | ||
|
|
ea0c744677 | ||
|
|
d1417bef8c | ||
|
|
a7309612d5 | ||
|
|
6e56688a31 | ||
|
|
a6abfb3ea8 | ||
|
|
ca696dd6e1 | ||
|
|
db40a5cd0a | ||
|
|
55161e439a | ||
|
|
bdffc10ca6 | ||
|
|
b1892c21e2 | ||
|
|
90e5492060 | ||
|
|
b1aa12bfa5 | ||
|
|
64cddb7912 | ||
|
|
42195c3180 | ||
|
|
94b1e508fd | ||
|
|
dd3dcd37e9 | ||
|
|
0a6e9b7633 | ||
|
|
470e471cad | ||
|
|
bd58b48bcb | ||
|
|
0cd071c5c6 | ||
|
|
abaadfb9a8 | ||
|
|
49e7fea6d3 | ||
|
|
d68a19679e | ||
|
|
75be94fbea | ||
|
|
c11aa4fd17 | ||
|
|
cb96fef1a5 | ||
|
|
e98cfbcf91 | ||
|
|
0140b2e898 | ||
|
|
b3b7ab9f6b | ||
|
|
b34edb9b64 | ||
|
|
c2bc0f788d | ||
|
|
3f518c9e63 | ||
|
|
7f7801a5f7 | ||
|
|
e1e02f7c2a | ||
|
|
c497e4cb2d | ||
|
|
e34e3f3e3d | ||
|
|
b3fd494398 | ||
|
|
ffc115cb76 | ||
|
|
7bfe787e39 | ||
|
|
2256e8bbdb | ||
|
|
e975932d41 | ||
|
|
db044caf8b | ||
|
|
82fbea4e5f | ||
|
|
6aaa4b34b8 | ||
|
|
3cb1f03668 | ||
|
|
e61c446410 | ||
|
|
9d0345c9cd | ||
|
|
62f870a342 | ||
|
|
13641c00ba | ||
|
|
9f6882cf0d | ||
|
|
d3cc0cb687 | ||
|
|
769c2c0fbc | ||
|
|
c71865f623 | ||
|
|
aa35965d7a | ||
|
|
77b87f1dbe | ||
|
|
a82d7f594e | ||
|
|
cca7246281 | ||
|
|
258385dc9a | ||
|
|
65238231f0 | ||
|
|
cb22e02fbe | ||
|
|
aa0bec1206 | ||
|
|
c56d8cfb0f | ||
|
|
793c780015 | ||
|
|
ec6f063450 | ||
|
|
f25c65b98b | ||
|
|
2d99aae779 | ||
|
|
a8847dc282 | ||
|
|
4852c01c88 | ||
|
|
3d6f405b69 | ||
|
|
18e3fbf41d | ||
|
|
ae8cc3092b | ||
|
|
5c26e8947b | ||
|
|
fbe6945d3b | ||
|
|
7dc8f2dc48 | ||
|
|
91c60ce72c | ||
|
|
687eea80c8 | ||
|
|
9bfe3fd1db | ||
|
|
03f81bc6de | ||
|
|
eed9571ffa | ||
|
|
e4651ef749 | ||
|
|
f715cf7a8d | ||
|
|
fad9ddb9a1 | ||
|
|
b6e4f50849 | ||
|
|
5c6911e398 | ||
|
|
a0371ab88b | ||
|
|
fa2abadc25 | ||
|
|
f03460f3cd | ||
|
|
b5683b73c2 | ||
|
|
c062586c7e | ||
|
|
98a5c7b776 | ||
|
|
e204023b1f | ||
|
|
4c5ffccc45 | ||
|
|
fbcf58e19c | ||
|
|
e5c6d00438 | ||
|
|
ab6a4d7099 | ||
|
|
78b2b8a45e | ||
|
|
add2af4f3f | ||
|
|
235c0ed30e | ||
|
|
5bd93aa478 | ||
|
|
f95be2c1b3 | ||
|
|
5110b30f62 | ||
|
|
861faa5df5 | ||
|
|
efdf624c67 | ||
|
|
caccf9cef5 | ||
|
|
6ba3dceb84 | ||
|
|
9b7c41e804 | ||
|
|
346aee8fe7 | ||
|
|
353d781bca | ||
|
|
3ce4bf231a | ||
|
|
d046cb8bf4 | ||
|
|
da95c63503 | ||
|
|
915e48de22 | ||
|
|
256f770971 | ||
|
|
16e251cbe8 | ||
|
|
67743b859a | ||
|
|
c275b42632 | ||
|
|
a02760417b | ||
|
|
0e50bfc5d4 | ||
|
|
eab988e18f | ||
|
|
dd6004b9d4 | ||
|
|
ac7c32acd5 | ||
|
|
f4a219ceef | ||
|
|
a8b56fb613 | ||
|
|
2925a20e8e | ||
|
|
6b3eb05735 | ||
|
|
17e0c39f83 | ||
|
|
4994639111 | ||
|
|
c187b5246f | ||
|
|
6ed6d5ec8a | ||
|
|
0735b332a8 | ||
|
|
344cec19fe |
11
.github/PULL_REQUEST_TEMPLATE.md
vendored
11
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -14,9 +14,10 @@ XXXX
|
||||
|
||||
请遵循此清单,以帮助我们快速轻松地整合您的贡献:
|
||||
|
||||
* [ ] 确保有针对更改提交的 Github issue(通常在您开始处理之前)。诸如拼写错误之类的琐碎更改不需要 Github issue。您的Pull Request应该只解决这个问题,而不需要进行其他更改—— 一个 PR 解决一个问题。
|
||||
* [ ] 格式化 Pull Request 标题,如[ISSUE #123] support Confluent Schema Registry。 Pull Request 中的每个提交都应该有一个有意义的主题行和正文。
|
||||
* [ ] 编写足够详细的Pull Request描述,以了解Pull Request的作用、方式和原因。
|
||||
* [ ] 编写必要的单元测试来验证您的逻辑更正。如果提交了新功能或重大更改,请记住在test 模块中添加 integration-test
|
||||
* [ ] 确保编译通过,集成测试通过
|
||||
* [ ] 一个 PR(Pull Request的简写)只解决一个问题,禁止一个 PR 解决多个问题;
|
||||
* [ ] 确保 PR 有对应的 Issue(通常在您开始处理之前创建),除非是书写错误之类的琐碎更改不需要 Issue ;
|
||||
* [ ] 格式化 PR 及 Commit-Log 的标题及内容,例如 #861 。PS:Commit-Log 需要在 Git Commit 代码时进行填写,在 GitHub 上修改不了;
|
||||
* [ ] 编写足够详细的 PR 描述,以了解 PR 的作用、方式和原因;
|
||||
* [ ] 编写必要的单元测试来验证您的逻辑更正。如果提交了新功能或重大更改,请记住在 test 模块中添加 integration-test;
|
||||
* [ ] 确保编译通过,集成测试通过;
|
||||
|
||||
|
||||
43
.github/workflows/ci_build.yml
vendored
Normal file
43
.github/workflows/ci_build.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: KnowStreaming Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up JDK 11
|
||||
uses: actions/setup-java@v3
|
||||
with:
|
||||
java-version: '11'
|
||||
distribution: 'temurin'
|
||||
cache: maven
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: '12.22.12'
|
||||
|
||||
- name: Build With Maven
|
||||
run: mvn -Prelease-package -Dmaven.test.skip=true clean install -U
|
||||
|
||||
- name: Get KnowStreaming Version
|
||||
if: ${{ success() }}
|
||||
run: |
|
||||
version=`mvn -Dexec.executable='echo' -Dexec.args='${project.version}' --non-recursive exec:exec -q`
|
||||
echo "VERSION=${version}" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload Binary Package
|
||||
if: ${{ success() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: KnowStreaming-${{ env.VERSION }}.tar.gz
|
||||
path: km-dist/target/KnowStreaming-${{ env.VERSION }}.tar.gz
|
||||
@@ -4,7 +4,7 @@
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
contributors and maintainers pledge to making participation in our project, and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
education, socio-economic status, nationality, personal appearance, race,
|
||||
@@ -56,7 +56,7 @@ further defined and clarified by project maintainers.
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at shirenchuang@didiglobal.com . All
|
||||
reported by contacting the project team at https://knowstreaming.com/support-center . All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
|
||||
@@ -90,6 +90,7 @@
|
||||
- [单机部署手册](docs/install_guide/单机部署手册.md)
|
||||
- [版本升级手册](docs/install_guide/版本升级手册.md)
|
||||
- [本地源码启动手册](docs/dev_guide/本地源码启动手册.md)
|
||||
- [页面无数据排查手册](docs/dev_guide/页面无数据排查手册.md)
|
||||
|
||||
**`产品相关手册`**
|
||||
|
||||
@@ -100,7 +101,9 @@
|
||||
|
||||
**点击 [这里](https://doc.knowstreaming.com/product),也可以从官网获取到更多文档**
|
||||
|
||||
|
||||
**`产品网址`**
|
||||
- [产品官网:https://knowstreaming.com](https://knowstreaming.com)
|
||||
- [体验环境:https://demo.knowstreaming.com](https://demo.knowstreaming.com),登陆账号:admin/admin
|
||||
|
||||
|
||||
|
||||
@@ -143,7 +146,7 @@ PS: 提问请尽量把问题一次性描述清楚,并告知环境信息情况
|
||||
|
||||
**`2、微信群`**
|
||||
|
||||
微信加群:添加`mike_zhangliang`、`PenceXie` 、`szzdzhp001`的微信号备注KnowStreaming加群。
|
||||
微信加群:添加`PenceXie` 、`szzdzhp001`的微信号备注KnowStreaming加群。
|
||||
<br/>
|
||||
|
||||
加群之前有劳点一下 star,一个小小的 star 是对KnowStreaming作者们努力建设社区的动力。
|
||||
@@ -155,3 +158,4 @@ PS: 提问请尽量把问题一次性描述清楚,并告知环境信息情况
|
||||
## Star History
|
||||
|
||||
[](https://star-history.com/#didi/KnowStreaming&Date)
|
||||
|
||||
|
||||
@@ -1,4 +1,129 @@
|
||||
|
||||
## v3.4.0
|
||||
|
||||
|
||||
|
||||
**问题修复**
|
||||
- [Bugfix]修复 Overview 指标文案错误的错误 ([#1190](https://github.com/didi/KnowStreaming/issues/1190))
|
||||
- [Bugfix]修复删除 Kafka 集群后,Connect 集群任务出现 NPE 问题 ([#1129](https://github.com/didi/KnowStreaming/issues/1129))
|
||||
- [Bugfix]修复在 Ldap 登录时,设置 auth-user-registration: false 会导致空指针的问题 ([#1117](https://github.com/didi/KnowStreaming/issues/1117))
|
||||
- [Bugfix]修复 Ldap 登录,调用 user.getId() 出现 NPE 的问题 ([#1108](https://github.com/didi/KnowStreaming/issues/1108))
|
||||
- [Bugfix]修复前端新增角色失败等问题 ([#1107](https://github.com/didi/KnowStreaming/issues/1107))
|
||||
- [Bugfix]修复 ZK 四字命令解析错误的问题
|
||||
- [Bugfix]修复 zk standalone 模式下,状态获取错误的问题
|
||||
- [Bugfix]修复 Broker 元信息解析方法未调用导致接入集群失败的问题 ([#993](https://github.com/didi/KnowStreaming/issues/993))
|
||||
- [Bugfix]修复 ConsumerAssignment 类型转换错误的问题
|
||||
- [Bugfix]修复对 Connect 集群的 clusterUrl 的动态更新导致配置不生效的问题 ([#1079](https://github.com/didi/KnowStreaming/issues/1079))
|
||||
- [Bugfix]修复消费组不支持重置到最旧 Offset 的问题 ([#1059](https://github.com/didi/KnowStreaming/issues/1059))
|
||||
- [Bugfix]后端增加查看 User 密码的权限点 ([#1095](https://github.com/didi/KnowStreaming/issues/1095))
|
||||
- [Bugfix]修复 Connect-JMX 端口维护信息错误的问题 ([#1146](https://github.com/didi/KnowStreaming/issues/1146))
|
||||
- [Bugfix]修复系统管理子应用无法正常启动的问题 ([#1167](https://github.com/didi/KnowStreaming/issues/1167))
|
||||
- [Bugfix]修复 Security 模块,权限点缺失问题 ([#1069](https://github.com/didi/KnowStreaming/issues/1069)), ([#1154](https://github.com/didi/KnowStreaming/issues/1154))
|
||||
- [Bugfix]修复 Connect-Worker Jmx 不生效的问题 ([#1067](https://github.com/didi/KnowStreaming/issues/1067))
|
||||
- [Bugfix]修复权限 ACL 管理中,消费组列表展示错误的问题 ([#1037](https://github.com/didi/KnowStreaming/issues/1037))
|
||||
- [Bugfix]修复 Connect 模块没有默认勾选指标的问题([#1022](https://github.com/didi/KnowStreaming/issues/1022))
|
||||
- [Bugfix]修复 es 索引 create/delete 死循环的问题 ([#1021](https://github.com/didi/KnowStreaming/issues/1021))
|
||||
- [Bugfix]修复 Connect-GroupDescription 解析失败的问题 ([#1015](https://github.com/didi/KnowStreaming/issues/1015))
|
||||
- [Bugfix]修复 Prometheus 开放接口中,Partition 指标 tag 缺失的问题 ([#1014](https://github.com/didi/KnowStreaming/issues/1014))
|
||||
- [Bugfix]修复 Topic 消息展示,offset 为 0 不显示的问题 ([#1192](https://github.com/didi/KnowStreaming/issues/1192))
|
||||
- [Bugfix]修复重置offset接口调用过多问题
|
||||
- [Bugfix]Connect 提交任务变更为只保存用户修改的配置,并修复 JSON 模式下配置展示不全的问题 ([#1158](https://github.com/didi/KnowStreaming/issues/1158))
|
||||
- [Bugfix]修复消费组 Offset 重置后,提示重置成功,但是前端不刷新数据,Offset 无变化的问题 ([#1090](https://github.com/didi/KnowStreaming/issues/1090))
|
||||
- [Bugfix]修复未勾选系统管理查看权限,但是依然可以查看系统管理的问题 ([#1105](https://github.com/didi/KnowStreaming/issues/1105))
|
||||
|
||||
|
||||
**产品优化**
|
||||
- [Optimize]补充接入集群时,可选的 Kafka 版本列表 ([#1204](https://github.com/didi/KnowStreaming/issues/1204))
|
||||
- [Optimize]GroupTopic 信息修改为实时获取 ([#1196](https://github.com/didi/KnowStreaming/issues/1196))
|
||||
- [Optimize]增加 AdminClient 观测信息 ([#1111](https://github.com/didi/KnowStreaming/issues/1111))
|
||||
- [Optimize]增加 Connector 运行状态指标 ([#1110](https://github.com/didi/KnowStreaming/issues/1110))
|
||||
- [Optimize]统一 DB 元信息更新格式 ([#1127](https://github.com/didi/KnowStreaming/issues/1127)), ([#1125](https://github.com/didi/KnowStreaming/issues/1125)), ([#1006](https://github.com/didi/KnowStreaming/issues/1006))
|
||||
- [Optimize]日志输出增加支持 MDC,方便用户在 logback.xml 中 json 格式化日志 ([#1032](https://github.com/didi/KnowStreaming/issues/1032))
|
||||
- [Optimize]Jmx 相关日志优化 ([#1082](https://github.com/didi/KnowStreaming/issues/1082))
|
||||
- [Optimize]Topic-Partitions增加主动超时功能 ([#1076](https://github.com/didi/KnowStreaming/issues/1076))
|
||||
- [Optimize]Topic-Messages页面后端增加按照Partition和Offset纬度的排序 ([#1075](https://github.com/didi/KnowStreaming/issues/1075))
|
||||
- [Optimize]Connect-JSON模式下的JSON格式和官方API的格式不一致 ([#1080](https://github.com/didi/KnowStreaming/issues/1080)), ([#1153](https://github.com/didi/KnowStreaming/issues/1153)), ([#1192](https://github.com/didi/KnowStreaming/issues/1192))
|
||||
- [Optimize]登录页面展示的 star 数量修改为最新的数量
|
||||
- [Optimize]Group 列表的 maxLag 指标调整为实时获取 ([#1074](https://github.com/didi/KnowStreaming/issues/1074))
|
||||
- [Optimize]Connector增加重启、编辑、删除等权限点 ([#1066](https://github.com/didi/KnowStreaming/issues/1066)), ([#1147](https://github.com/didi/KnowStreaming/issues/1147))
|
||||
- [Optimize]优化 pom.xml 中,KS版本的标签名
|
||||
- [Optimize]优化集群Brokers中, Controller显示存在延迟的问题 ([#1162](https://github.com/didi/KnowStreaming/issues/1162))
|
||||
- [Optimize]bump jackson version to 2.13.5
|
||||
- [Optimize]权限新增 ACL,自定义权限配置,资源 TransactionalId 优化 ([#1192](https://github.com/didi/KnowStreaming/issues/1192))
|
||||
- [Optimize]Connect 样式优化
|
||||
- [Optimize]消费组详情控制数据实时刷新
|
||||
|
||||
|
||||
**功能新增**
|
||||
- [Feature]新增删除 Group 或 GroupOffset 功能 ([#1064](https://github.com/didi/KnowStreaming/issues/1064)), ([#1084](https://github.com/didi/KnowStreaming/issues/1084)), ([#1040](https://github.com/didi/KnowStreaming/issues/1040)), ([#1144](https://github.com/didi/KnowStreaming/issues/1144))
|
||||
- [Feature]增加 Truncate 数据功能 ([#1062](https://github.com/didi/KnowStreaming/issues/1062)), ([#1043](https://github.com/didi/KnowStreaming/issues/1043)), ([#1145](https://github.com/didi/KnowStreaming/issues/1145))
|
||||
- [Feature]支持指定 Server 的具体 Jmx 端口 ([#965](https://github.com/didi/KnowStreaming/issues/965))
|
||||
|
||||
|
||||
**文档更新**
|
||||
- [Doc]FAQ 补充 ES 8.x 版本使用说明 ([#1189](https://github.com/didi/KnowStreaming/issues/1189))
|
||||
- [Doc]补充启动失败的说明 ([#1126](https://github.com/didi/KnowStreaming/issues/1126))
|
||||
- [Doc]补充 ZK 无数据排查说明 ([#1004](https://github.com/didi/KnowStreaming/issues/1004))
|
||||
- [Doc]无数据排查文档,补充 ES 集群 Shard 满的异常日志
|
||||
- [Doc]README 补充页面无数据排查手册链接
|
||||
- [Doc]补充连接特定 Jmx 端口的说明 ([#965](https://github.com/didi/KnowStreaming/issues/965))
|
||||
- [Doc]补充 zk_properties 字段的使用说明 ([#1003](https://github.com/didi/KnowStreaming/issues/1003))
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## v3.3.0
|
||||
|
||||
**问题修复**
|
||||
- 修复 Connect 的 JMX-Port 配置未生效问题;
|
||||
- 修复 不存在 Connector 时,OverView 页面的数据一直处于加载中的问题;
|
||||
- 修复 Group 分区信息,分页时展示不全的问题;
|
||||
- 修复采集副本指标时,参数传递错误的问题;
|
||||
- 修复用户信息修改后,用户列表会抛出空指针异常的问题;
|
||||
- 修复 Topic 详情页面,查看消息时,选择分区不生效问题;
|
||||
- 修复对 ZK 客户端进行配置后不生效的问题;
|
||||
- 修复 connect 模块,指标中缺少健康巡检项通过数的问题;
|
||||
- 修复 connect 模块,指标获取方法存在映射错误的问题;
|
||||
- 修复 connect 模块,max 纬度指标获取错误的问题;
|
||||
- 修复 Topic 指标大盘 TopN 指标显示信息错误的问题;
|
||||
- 修复 Broker Similar Config 显示错误的问题;
|
||||
- 修复解析 ZK 四字命令时,数据类型设置错误导致空指针的问题;
|
||||
- 修复新增 Topic 时,清理策略选项版本控制错误的问题;
|
||||
- 修复新接入集群时 Controller-Host 信息不显示的问题;
|
||||
- 修复 Connector 和 MM2 列表搜索不生效的问题;
|
||||
- 修复 Zookeeper 页面,Leader 显示存在异常的问题;
|
||||
- 修复前端打包失败的问题;
|
||||
|
||||
|
||||
**产品优化**
|
||||
- ZK Overview 页面补充默认展示的指标;
|
||||
- 统一初始化 ES 索引模版的脚本为 init_es_template.sh,同时新增缺失的 connect 索引模版初始化脚本,去除多余的 replica 和 zookeper 索引模版初始化脚本;
|
||||
- 指标大盘页面,优化指标筛选操作后,无指标数据的指标卡片由不显示改为显示,并增加无数据的兜底;
|
||||
- 删除从 ES 读写 replica 指标的相关代码;
|
||||
- 优化 Topic 健康巡检的日志,明确错误的原因;
|
||||
- 优化无 ZK 模块时,巡检详情忽略对 ZK 的展示;
|
||||
- 优化本地缓存大小为可配置;
|
||||
- Task 模块中的返回中,补充任务的分组信息;
|
||||
- FAQ 补充 Ldap 的配置说明;
|
||||
- FAQ 补充接入 Kerberos 认证的 Kafka 集群的配置说明;
|
||||
- ks_km_kafka_change_record 表增加时间纬度的索引,优化查询性能;
|
||||
- 优化 ZK 健康巡检的日志,便于问题的排查;
|
||||
|
||||
**功能新增**
|
||||
- 新增基于滴滴 Kafka 的 Topic 复制功能(需使用滴滴 Kafka 才可具备该能力);
|
||||
- Topic 指标大盘,新增 Topic 复制相关的指标;
|
||||
- 新增基于 TestContainers 的单测;
|
||||
|
||||
|
||||
**Kafka MM2 Beta版 (v3.3.0版本新增发布)**
|
||||
- MM2 任务的增删改查;
|
||||
- MM2 任务的指标大盘;
|
||||
- MM2 任务的健康状态;
|
||||
|
||||
---
|
||||
|
||||
|
||||
## v3.2.0
|
||||
|
||||
**问题修复**
|
||||
|
||||
@@ -13,7 +13,7 @@ curl -s --connect-timeout 10 -o /dev/null -X POST -H 'cache-control: no-cache' -
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "10"
|
||||
"number_of_shards" : "2"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
@@ -115,7 +115,7 @@ curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: appl
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "10"
|
||||
"number_of_shards" : "2"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
@@ -302,7 +302,7 @@ curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: appl
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "10"
|
||||
"number_of_shards" : "6"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
@@ -377,7 +377,7 @@ curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: appl
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "10"
|
||||
"number_of_shards" : "6"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
@@ -436,72 +436,6 @@ curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: appl
|
||||
"aliases" : { }
|
||||
}'
|
||||
|
||||
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_replication_metric -d '{
|
||||
"order" : 10,
|
||||
"index_patterns" : [
|
||||
"ks_kafka_replication_metric*"
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "10"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
"properties" : {
|
||||
"brokerId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"partitionId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"routingValue" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"clusterPhyId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"topic" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"metrics" : {
|
||||
"properties" : {
|
||||
"LogStartOffset" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"Messages" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"LogEndOffset" : {
|
||||
"type" : "float"
|
||||
}
|
||||
}
|
||||
},
|
||||
"key" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"timestamp" : {
|
||||
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
|
||||
"index" : true,
|
||||
"type" : "date",
|
||||
"doc_values" : true
|
||||
}
|
||||
}
|
||||
},
|
||||
"aliases" : { }
|
||||
}'
|
||||
|
||||
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_topic_metric -d '{
|
||||
"order" : 10,
|
||||
"index_patterns" : [
|
||||
@@ -509,7 +443,7 @@ curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: appl
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "10"
|
||||
"number_of_shards" : "6"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
@@ -626,7 +560,7 @@ curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: appl
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "10"
|
||||
"number_of_shards" : "2"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
@@ -704,6 +638,388 @@ curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: appl
|
||||
"aliases" : { }
|
||||
}'
|
||||
|
||||
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${SERVER_ES_ADDRESS}/_template/ks_kafka_connect_cluster_metric -d '{
|
||||
"order" : 10,
|
||||
"index_patterns" : [
|
||||
"ks_kafka_connect_cluster_metric*"
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "2"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
"properties" : {
|
||||
"connectClusterId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"routingValue" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"clusterPhyId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"metrics" : {
|
||||
"properties" : {
|
||||
"ConnectorCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TaskCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorStartupAttemptsTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorStartupFailurePercentage" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorStartupFailureTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorStartupSuccessPercentage" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorStartupSuccessTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TaskStartupAttemptsTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TaskStartupFailurePercentage" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TaskStartupFailureTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TaskStartupSuccessPercentage" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TaskStartupSuccessTotal" : {
|
||||
"type" : "float"
|
||||
}
|
||||
}
|
||||
},
|
||||
"key" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"timestamp" : {
|
||||
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
|
||||
"index" : true,
|
||||
"type" : "date",
|
||||
"doc_values" : true
|
||||
}
|
||||
}
|
||||
},
|
||||
"aliases" : { }
|
||||
}'
|
||||
|
||||
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${SERVER_ES_ADDRESS}/_template/ks_kafka_connect_connector_metric -d '{
|
||||
"order" : 10,
|
||||
"index_patterns" : [
|
||||
"ks_kafka_connect_connector_metric*"
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "2"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
"properties" : {
|
||||
"connectClusterId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"routingValue" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"connectorName" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"connectorNameAndClusterId" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"clusterPhyId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"metrics" : {
|
||||
"properties" : {
|
||||
"HealthState" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorTotalTaskCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"HealthCheckPassed" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"HealthCheckTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorRunningTaskCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorPausedTaskCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorFailedTaskCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorUnassignedTaskCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"BatchSizeAvg" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"BatchSizeMax" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"OffsetCommitAvgTimeMs" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"OffsetCommitMaxTimeMs" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"OffsetCommitFailurePercentage" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"OffsetCommitSuccessPercentage" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"PollBatchAvgTimeMs" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"PollBatchMaxTimeMs" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SourceRecordActiveCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SourceRecordActiveCountAvg" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SourceRecordActiveCountMax" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SourceRecordPollRate" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SourceRecordPollTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SourceRecordWriteRate" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SourceRecordWriteTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"OffsetCommitCompletionRate" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"OffsetCommitCompletionTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"OffsetCommitSkipRate" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"OffsetCommitSkipTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"PartitionCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"PutBatchAvgTimeMs" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"PutBatchMaxTimeMs" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SinkRecordActiveCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SinkRecordActiveCountAvg" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SinkRecordActiveCountMax" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SinkRecordLagMax" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SinkRecordReadRate" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SinkRecordReadTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SinkRecordSendRate" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SinkRecordSendTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"DeadletterqueueProduceFailures" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"DeadletterqueueProduceRequests" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"LastErrorTimestamp" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TotalErrorsLogged" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TotalRecordErrors" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TotalRecordFailures" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TotalRecordsSkipped" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TotalRetries" : {
|
||||
"type" : "float"
|
||||
}
|
||||
}
|
||||
},
|
||||
"key" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"timestamp" : {
|
||||
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
|
||||
"index" : true,
|
||||
"type" : "date",
|
||||
"doc_values" : true
|
||||
}
|
||||
}
|
||||
},
|
||||
"aliases" : { }
|
||||
}'
|
||||
|
||||
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${SERVER_ES_ADDRESS}/_template/ks_kafka_connect_mirror_maker_metric -d '{
|
||||
"order" : 10,
|
||||
"index_patterns" : [
|
||||
"ks_kafka_connect_mirror_maker_metric*"
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "2"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
"properties" : {
|
||||
"connectClusterId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"routingValue" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"connectorName" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"connectorNameAndClusterId" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"clusterPhyId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"metrics" : {
|
||||
"properties" : {
|
||||
"HealthState" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"HealthCheckTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ByteCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ByteRate" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"RecordAgeMs" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"RecordAgeMsAvg" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"RecordAgeMsMax" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"RecordAgeMsMin" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"RecordCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"RecordRate" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ReplicationLatencyMs" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ReplicationLatencyMsAvg" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ReplicationLatencyMsMax" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ReplicationLatencyMsMin" : {
|
||||
"type" : "float"
|
||||
}
|
||||
}
|
||||
},
|
||||
"key" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"timestamp" : {
|
||||
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
|
||||
"index" : true,
|
||||
"type" : "date",
|
||||
"doc_values" : true
|
||||
}
|
||||
}
|
||||
},
|
||||
"aliases" : { }
|
||||
}'
|
||||
|
||||
|
||||
for i in {0..6};
|
||||
do
|
||||
logdate=_$(date -d "${i} day ago" +%Y-%m-%d)
|
||||
@@ -711,8 +1027,10 @@ do
|
||||
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_cluster_metric${logdate} && \
|
||||
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_group_metric${logdate} && \
|
||||
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_partition_metric${logdate} && \
|
||||
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_replication_metric${logdate} && \
|
||||
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_zookeeper_metric${logdate} && \
|
||||
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_connect_cluster_metric${logdate} && \
|
||||
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_connect_connector_metric${logdate} && \
|
||||
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_connect_mirror_maker_metric${logdate} && \
|
||||
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_topic_metric${logdate} || \
|
||||
exit 2
|
||||
done
|
||||
|
||||
111
docs/contribute_guide/assets/分支管理.drawio
Normal file
111
docs/contribute_guide/assets/分支管理.drawio
Normal file
@@ -0,0 +1,111 @@
|
||||
<mxfile host="65bd71144e">
|
||||
<diagram id="vxzhwhZdNVAY19FZ4dgb" name="Page-1">
|
||||
<mxGraphModel dx="1194" dy="733" grid="0" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="1169" pageHeight="827" math="0" shadow="0">
|
||||
<root>
|
||||
<mxCell id="0"/>
|
||||
<mxCell id="1" parent="0"/>
|
||||
<mxCell id="4" style="edgeStyle=none;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;startArrow=none;strokeWidth=2;strokeColor=#6666FF;" edge="1" parent="1" source="16">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="200" y="540" as="targetPoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="7" style="edgeStyle=none;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;exitPerimeter=0;strokeColor=#33FF33;strokeWidth=2;" edge="1" parent="1" source="2">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="360" y="240" as="targetPoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="5" style="edgeStyle=none;html=1;startArrow=none;strokeColor=#33FF33;strokeWidth=2;" edge="1" parent="1">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="200" y="400" as="targetPoint"/>
|
||||
<mxPoint x="360" y="360" as="sourcePoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="3" value="C3" style="verticalLabelPosition=middle;verticalAlign=middle;html=1;shape=mxgraph.flowchart.on-page_reference;labelPosition=center;align=center;strokeColor=#FF8000;strokeWidth=2;" vertex="1" parent="1">
|
||||
<mxGeometry x="340" y="280" width="40" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="18" style="edgeStyle=none;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;entryPerimeter=0;endArrow=none;endFill=0;strokeColor=#FF8000;strokeWidth=2;" edge="1" parent="1" source="8" target="3">
|
||||
<mxGeometry relative="1" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="8" value="fix_928" style="rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;" vertex="1" parent="1">
|
||||
<mxGeometry x="320" y="40" width="80" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="9" value="github_master" style="rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;" vertex="1" parent="1">
|
||||
<mxGeometry x="160" y="40" width="80" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="10" value="" style="edgeStyle=none;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;endArrow=classic;startArrow=none;endFill=1;strokeWidth=2;strokeColor=#6666FF;" edge="1" parent="1" source="11" target="2">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="200" y="640" as="targetPoint"/>
|
||||
<mxPoint x="200" y="80" as="sourcePoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="2" value="C2" style="verticalLabelPosition=middle;verticalAlign=middle;html=1;shape=mxgraph.flowchart.on-page_reference;labelPosition=center;align=center;strokeColor=#6666FF;strokeWidth=2;" vertex="1" parent="1">
|
||||
<mxGeometry x="180" y="200" width="40" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="12" value="" style="edgeStyle=none;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;endArrow=classic;endFill=1;strokeWidth=2;strokeColor=#6666FF;" edge="1" parent="1" source="9" target="11">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="200" y="200" as="targetPoint"/>
|
||||
<mxPoint x="200" y="80" as="sourcePoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="11" value="C1" style="verticalLabelPosition=middle;verticalAlign=middle;html=1;shape=mxgraph.flowchart.on-page_reference;labelPosition=center;align=center;strokeColor=#6666FF;strokeWidth=2;" vertex="1" parent="1">
|
||||
<mxGeometry x="180" y="120" width="40" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="23" style="edgeStyle=none;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;exitPerimeter=0;endArrow=none;endFill=0;strokeColor=#FF8000;strokeWidth=2;" edge="1" parent="1" source="3">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="360" y="360" as="targetPoint"/>
|
||||
<mxPoint x="360" y="400" as="sourcePoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="17" value="" style="edgeStyle=none;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;startArrow=none;endArrow=none;strokeWidth=2;strokeColor=#6666FF;" edge="1" parent="1" source="2" target="16">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="200" y="640" as="targetPoint"/>
|
||||
<mxPoint x="200" y="240" as="sourcePoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="16" value="C4" style="verticalLabelPosition=middle;verticalAlign=middle;html=1;shape=mxgraph.flowchart.on-page_reference;labelPosition=center;align=center;strokeColor=#6666FF;strokeWidth=2;" vertex="1" parent="1">
|
||||
<mxGeometry x="180" y="440" width="40" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="22" value="Tag-v3.2.0" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;fillColor=none;strokeColor=none;" vertex="1" parent="1">
|
||||
<mxGeometry x="100" y="120" width="80" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="24" value="Tag-v3.2.1" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;fillColor=none;strokeColor=none;" vertex="1" parent="1">
|
||||
<mxGeometry x="100" y="440" width="80" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="27" value="切换到主分支:git checkout github_master" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;labelPosition=center;verticalLabelPosition=middle;align=center;verticalAlign=middle;" vertex="1" parent="1">
|
||||
<mxGeometry x="520" y="90" width="240" height="30" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="34" style="edgeStyle=none;html=1;exitX=0;exitY=0;exitDx=0;exitDy=0;entryX=0.855;entryY=0.145;entryDx=0;entryDy=0;entryPerimeter=0;dashed=1;dashPattern=8 8;fontSize=18;endArrow=none;endFill=0;" edge="1" parent="1" source="28" target="2">
|
||||
<mxGeometry relative="1" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="28" value="主分支拉最新代码:git pull" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;labelPosition=center;verticalLabelPosition=middle;align=center;verticalAlign=middle;" vertex="1" parent="1">
|
||||
<mxGeometry x="520" y="120" width="160" height="30" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="35" style="edgeStyle=none;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;dashed=1;dashPattern=8 8;fontSize=18;endArrow=none;endFill=0;" edge="1" parent="1" source="29">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="270" y="225" as="targetPoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="29" value="基于主分支拉新分支:git checkout -b fix_928" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;labelPosition=center;verticalLabelPosition=middle;align=center;verticalAlign=middle;" vertex="1" parent="1">
|
||||
<mxGeometry x="520" y="210" width="250" height="30" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="37" style="edgeStyle=none;html=1;exitX=0;exitY=1;exitDx=0;exitDy=0;entryX=1;entryY=0.5;entryDx=0;entryDy=0;entryPerimeter=0;dashed=1;dashPattern=8 8;fontSize=18;endArrow=none;endFill=0;" edge="1" parent="1" source="30" target="3">
|
||||
<mxGeometry relative="1" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="30" value="提交代码:git commit -m "[Optimize]优化xxx问题(#928)"" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;labelPosition=center;verticalLabelPosition=middle;align=center;verticalAlign=middle;" vertex="1" parent="1">
|
||||
<mxGeometry x="520" y="270" width="320" height="30" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="31" value="提交到自己远端仓库:git push --set-upstream origin fix_928" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;labelPosition=center;verticalLabelPosition=middle;align=center;verticalAlign=middle;" vertex="1" parent="1">
|
||||
<mxGeometry x="520" y="300" width="334" height="30" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="38" style="edgeStyle=none;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;dashed=1;dashPattern=8 8;fontSize=18;endArrow=none;endFill=0;" edge="1" parent="1" source="32">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="280" y="380" as="targetPoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="32" value="GitHub页面发起Pull Request请求,管理员合入主仓库" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=0;labelPosition=center;verticalLabelPosition=middle;align=center;verticalAlign=middle;" vertex="1" parent="1">
|
||||
<mxGeometry x="520" y="360" width="300" height="30" as="geometry"/>
|
||||
</mxCell>
|
||||
</root>
|
||||
</mxGraphModel>
|
||||
</diagram>
|
||||
</mxfile>
|
||||
BIN
docs/contribute_guide/assets/分支管理.png
Normal file
BIN
docs/contribute_guide/assets/分支管理.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 64 KiB |
BIN
docs/contribute_guide/assets/环境初始化.jpg
Normal file
BIN
docs/contribute_guide/assets/环境初始化.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 180 KiB |
BIN
docs/contribute_guide/assets/申请合并.jpg
Normal file
BIN
docs/contribute_guide/assets/申请合并.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 80 KiB |
BIN
docs/contribute_guide/assets/问题认领.jpg
Normal file
BIN
docs/contribute_guide/assets/问题认领.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 631 KiB |
100
docs/contribute_guide/贡献名单.md
Normal file
100
docs/contribute_guide/贡献名单.md
Normal file
@@ -0,0 +1,100 @@
|
||||
# 贡献名单
|
||||
|
||||
- [贡献名单](#贡献名单)
|
||||
- [1、贡献者角色](#1贡献者角色)
|
||||
- [1.1、Maintainer](#11maintainer)
|
||||
- [1.2、Committer](#12committer)
|
||||
- [1.3、Contributor](#13contributor)
|
||||
- [2、贡献者名单](#2贡献者名单)
|
||||
|
||||
|
||||
## 1、贡献者角色
|
||||
|
||||
KnowStreaming 开发者包含 Maintainer、Committer、Contributor 三种角色,每种角色的标准定义如下。
|
||||
|
||||
### 1.1、Maintainer
|
||||
|
||||
Maintainer 是对 KnowStreaming 项目的演进和发展做出显著贡献的个人。具体包含以下的标准:
|
||||
|
||||
- 完成多个关键模块或者工程的设计与开发,是项目的核心开发人员;
|
||||
- 持续的投入和激情,能够积极参与社区、官网、issue、PR 等项目相关事项的维护;
|
||||
- 在社区中具有有目共睹的影响力,能够代表 KnowStreaming 参加重要的社区会议和活动;
|
||||
- 具有培养 Committer 和 Contributor 的意识和能力;
|
||||
|
||||
### 1.2、Committer
|
||||
|
||||
Committer 是具有 KnowStreaming 仓库写权限的个人,包含以下的标准:
|
||||
|
||||
- 能够在长时间内做持续贡献 issue、PR 的个人;
|
||||
- 参与 issue 列表的维护及重要 feature 的讨论;
|
||||
- 参与 code review;
|
||||
|
||||
### 1.3、Contributor
|
||||
|
||||
Contributor 是对 KnowStreaming 项目有贡献的个人,标准为:
|
||||
|
||||
- 提交过 PR 并被合并;
|
||||
|
||||
---
|
||||
|
||||
## 2、贡献者名单
|
||||
|
||||
开源贡献者名单(不定期更新)
|
||||
|
||||
在名单内,但是没有收到贡献者礼品的同学,可以联系:szzdzhp001
|
||||
|
||||
| 姓名 | Github | 角色 | 公司 |
|
||||
| ------------------- | ---------------------------------------------------------- | ----------- | -------- |
|
||||
| 张亮 | [@zhangliangboy](https://github.com/zhangliangboy) | Maintainer | 滴滴出行 |
|
||||
| 谢鹏 | [@PenceXie](https://github.com/PenceXie) | Maintainer | 滴滴出行 |
|
||||
| 赵情融 | [@zqrferrari](https://github.com/zqrferrari) | Maintainer | 滴滴出行 |
|
||||
| 石臻臻 | [@shirenchuang](https://github.com/shirenchuang) | Maintainer | 滴滴出行 |
|
||||
| 曾巧 | [@ZQKC](https://github.com/ZQKC) | Maintainer | 滴滴出行 |
|
||||
| 孙超 | [@lucasun](https://github.com/lucasun) | Maintainer | 滴滴出行 |
|
||||
| 洪华驰 | [@brodiehong](https://github.com/brodiehong) | Maintainer | 滴滴出行 |
|
||||
| 许喆 | [@potaaaaaato](https://github.com/potaaaaaato) | Committer | 滴滴出行 |
|
||||
| 郭宇航 | [@GraceWalk](https://github.com/GraceWalk) | Committer | 滴滴出行 |
|
||||
| 李伟 | [@velee](https://github.com/velee) | Committer | 滴滴出行 |
|
||||
| 张占昌 | [@zzccctv](https://github.com/zzccctv) | Committer | 滴滴出行 |
|
||||
| 王东方 | [@wangdongfang-aden](https://github.com/wangdongfang-aden) | Committer | 滴滴出行 |
|
||||
| 王耀波 | [@WYAOBO](https://github.com/WYAOBO) | Committer | 滴滴出行 |
|
||||
| 赵寅锐 | [@ZHAOYINRUI](https://github.com/ZHAOYINRUI) | Maintainer | 字节跳动 |
|
||||
| haoqi123 | [@haoqi123](https://github.com/haoqi123) | Contributor | 前程无忧 |
|
||||
| chaixiaoxue | [@chaixiaoxue](https://github.com/chaixiaoxue) | Contributor | SYNNEX |
|
||||
| 陆晗 | [@luhea](https://github.com/luhea) | Contributor | 竞技世界 |
|
||||
| Mengqi777 | [@Mengqi777](https://github.com/Mengqi777) | Contributor | 腾讯 |
|
||||
| ruanliang-hualun | [@ruanliang-hualun](https://github.com/ruanliang-hualun) | Contributor | 网易 |
|
||||
| 17hao | [@17hao](https://github.com/17hao) | Contributor | |
|
||||
| Huyueeer | [@Huyueeer](https://github.com/Huyueeer) | Contributor | INVENTEC |
|
||||
| lomodays207 | [@lomodays207](https://github.com/lomodays207) | Contributor | 建信金科 |
|
||||
| Super .Wein(星痕) | [@superspeedone](https://github.com/superspeedone) | Contributor | 韵达 |
|
||||
| Hongten | [@Hongten](https://github.com/Hongten) | Contributor | Shopee |
|
||||
| 徐正熙 | [@hyper-xx)](https://github.com/hyper-xx) | Contributor | 滴滴出行 |
|
||||
| RichardZhengkay | [@RichardZhengkay](https://github.com/RichardZhengkay) | Contributor | 趣街 |
|
||||
| 罐子里的茶 | [@gzldc](https://github.com/gzldc) | Contributor | 道富 |
|
||||
| 陈忠玉 | [@paula](https://github.com/chenzhongyu11) | Contributor | 平安产险 |
|
||||
| 杨光 | [@yaangvipguang](https://github.com/yangvipguang) | Contributor |
|
||||
| 王亚聪 | [@wangyacongi](https://github.com/wangyacongi) | Contributor |
|
||||
| Yang Jing | [@yangbajing](https://github.com/yangbajing) | Contributor | |
|
||||
| 刘新元 Liu XinYuan | [@Liu-XinYuan](https://github.com/Liu-XinYuan) | Contributor | |
|
||||
| Joker | [@LiubeyJokerQueue](https://github.com/JokerQueue) | Contributor | 丰巢 |
|
||||
| Eason Lau | [@Liubey](https://github.com/Liubey) | Contributor | |
|
||||
| hailanxin | [@hailanxin](https://github.com/hailanxin) | Contributor | |
|
||||
| Qi Zhang | [@zzzhangqi](https://github.com/zzzhangqi) | Contributor | 好雨科技 |
|
||||
| fengxsong | [@fengxsong](https://github.com/fengxsong) | Contributor | |
|
||||
| 谢晓东 | [@Strangevy](https://github.com/Strangevy) | Contributor | 花生日记 |
|
||||
| ZhaoXinlong | [@ZhaoXinlong](https://github.com/ZhaoXinlong) | Contributor | |
|
||||
| xuehaipeng | [@xuehaipeng](https://github.com/xuehaipeng) | Contributor | |
|
||||
| 孔令续 | [@mrazkong](https://github.com/mrazkong) | Contributor | |
|
||||
| pierre xiong | [@pierre94](https://github.com/pierre94) | Contributor | |
|
||||
| PengShuaixin | [@PengShuaixin](https://github.com/PengShuaixin) | Contributor | |
|
||||
| 梁壮 | [@lz](https://github.com/silent-night-no-trace) | Contributor | |
|
||||
| 张晓寅 | [@ahu0605](https://github.com/ahu0605) | Contributor | 电信数智 |
|
||||
| 黄海婷 | [@Huanghaiting](https://github.com/Huanghaiting) | Contributor | 云徙科技 |
|
||||
| 任祥德 | [@RenChauncy](https://github.com/RenChauncy) | Contributor | 探马企服 |
|
||||
| 胡圣林 | [@slhu997](https://github.com/slhu997) | Contributor | |
|
||||
| 史泽颖 | [@shizeying](https://github.com/shizeying) | Contributor | |
|
||||
| 王玉博 | [@Wyb7290](https://github.com/Wyb7290) | Committer | |
|
||||
| 伍璇 | [@Luckywustone](https://github.com/Luckywustone) | Contributor ||
|
||||
| 邓苑 | [@CatherineDY](https://github.com/CatherineDY) | Contributor ||
|
||||
| 封琼凤 | [@Luckywustone](https://github.com/fengqiongfeng) | Committer ||
|
||||
168
docs/contribute_guide/贡献指南.md
Normal file
168
docs/contribute_guide/贡献指南.md
Normal file
@@ -0,0 +1,168 @@
|
||||
# 贡献指南
|
||||
|
||||
- [贡献指南](#贡献指南)
|
||||
- [1、行为准则](#1行为准则)
|
||||
- [2、仓库规范](#2仓库规范)
|
||||
- [2.1、Issue 规范](#21issue-规范)
|
||||
- [2.2、Commit-Log 规范](#22commit-log-规范)
|
||||
- [2.3、Pull-Request 规范](#23pull-request-规范)
|
||||
- [3、操作示例](#3操作示例)
|
||||
- [3.1、初始化环境](#31初始化环境)
|
||||
- [3.2、认领问题](#32认领问题)
|
||||
- [3.3、处理问题 \& 提交解决](#33处理问题--提交解决)
|
||||
- [3.4、请求合并](#34请求合并)
|
||||
- [4、常见问题](#4常见问题)
|
||||
- [4.1、如何将多个 Commit-Log 合并为一个?](#41如何将多个-commit-log-合并为一个)
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
欢迎 👏🏻 👏🏻 👏🏻 来到 `KnowStreaming`。本文档是关于如何为 `KnowStreaming` 做出贡献的指南。如果您发现不正确或遗漏的内容, 请留下您的意见/建议。
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## 1、行为准则
|
||||
|
||||
请务必阅读并遵守我们的:[行为准则](https://github.com/didi/KnowStreaming/blob/master/CODE_OF_CONDUCT.md)。
|
||||
|
||||
|
||||
## 2、仓库规范
|
||||
|
||||
### 2.1、Issue 规范
|
||||
|
||||
按要求,在 [创建Issue](https://github.com/didi/KnowStreaming/issues/new/choose) 中创建ISSUE即可。
|
||||
|
||||
需要重点说明的是:
|
||||
- 提供出现问题的环境信息,包括使用的系统,使用的KS版本等;
|
||||
- 提供出现问题的复现方式;
|
||||
|
||||
|
||||
### 2.2、Commit-Log 规范
|
||||
|
||||
`Commit-Log` 包含三部分 `Header`、`Body`、`Footer`。其中 `Header` 是必须的,格式固定,`Body` 在变更有必要详细解释时使用。
|
||||
|
||||
|
||||
**1、`Header` 规范**
|
||||
|
||||
`Header` 格式为 `[Type]Message`, 主要有三部分组成,分别是`Type`、`Message`,
|
||||
|
||||
- `Type`:说明这个提交是哪一个类型的,比如有 Bugfix、Feature、Optimize等;
|
||||
- `Message`:说明提交的信息,比如修复xx问题;
|
||||
|
||||
|
||||
实际例子:[`[Bugfix]修复新接入的集群,Controller-Host不显示的问题`](https://github.com/didi/KnowStreaming/pull/933/commits)
|
||||
|
||||
|
||||
|
||||
**2、`Body` 规范**
|
||||
|
||||
一般不需要,如果解决了较复杂问题,或者代码较多,需要 `Body` 说清楚解决的问题,解决的思路等信息。
|
||||
|
||||
---
|
||||
|
||||
**3、实际例子**
|
||||
|
||||
```
|
||||
[Optimize]优化 MySQL & ES 测试容器的初始化
|
||||
|
||||
主要的变更
|
||||
1、knowstreaming/knowstreaming-manager 容器;
|
||||
2、knowstreaming/knowstreaming-mysql 容器调整为使用 mysql:5.7 容器;
|
||||
3、初始化 mysql:5.7 容器后,增加初始化 MySQL 表及数据的动作;
|
||||
|
||||
被影响的变更:
|
||||
1、移动 km-dist/init/sql 下的MySQL初始化脚本至 km-persistence/src/main/resource/sql 下,以便项目测试时加载到所需的初始化 SQL;
|
||||
2、删除无用的 km-dist/init/template 目录;
|
||||
3、因为 km-dist/init/sql 和 km-dist/init/template 目录的调整,因此也调整 ReleaseKnowStreaming.xml 内的文件内容;
|
||||
```
|
||||
|
||||
|
||||
**TODO : 后续有兴趣的同学,可以考虑引入 Git 的 Hook 进行更好的 Commit-Log 的管理。**
|
||||
|
||||
|
||||
### 2.3、Pull-Request 规范
|
||||
|
||||
详细见:[PULL-REQUEST 模版](../../.github/PULL_REQUEST_TEMPLATE.md)
|
||||
|
||||
需要重点说明的是:
|
||||
|
||||
- <font color=red > 任何 PR 都必须与有效 ISSUE 相关联。否则, PR 将被拒绝;</font>
|
||||
- <font color=red> 一个分支只修改一件事,一个 PR 只修改一件事;</b></font>
|
||||
|
||||
---
|
||||
|
||||
|
||||
## 3、操作示例
|
||||
|
||||
本节主要介绍对 `KnowStreaming` 进行代码贡献时,相关的操作方式及操作命令。
|
||||
|
||||
名词说明:
|
||||
- 主仓库:https://github.com/didi/KnowStreaming 这个仓库为主仓库。
|
||||
- 分仓库:Fork 到自己账号下的 KnowStreaming 仓库为分仓库;
|
||||
|
||||
|
||||
### 3.1、初始化环境
|
||||
|
||||
1. `Fork KnowStreaming` 主仓库至自己账号下,见 https://github.com/didi/KnowStreaming 地址右上角的 `Fork` 按钮;
|
||||
2. 克隆分仓库至本地:`git clone git@github.com:xxxxxxx/KnowStreaming.git`,该仓库的简写名通常是`origin`;
|
||||
3. 添加主仓库至本地:`git remote add upstream https://github.com/didi/KnowStreaming`,`upstream`是主仓库在本地的简写名,可以随意命名,前后保持一致即可;
|
||||
4. 拉取主仓库代码:`git fetch upstream`;
|
||||
5. 拉取分仓库代码:`git fetch origin`;
|
||||
6. 将主仓库的`master`分支,拉取到本地并命名为`github_master`:`git checkout -b upstream/master`;
|
||||
|
||||
最后,我们来看一下初始化完成之后的大致效果,具体如下图所示:
|
||||

|
||||
|
||||
|
||||
至此,我们的环境就初始化好了。后续,`github_master` 分支就是主仓库的`master`分支,我们可以使用`git pull`拉取该分支的最新代码,还可以使用`git checkout -b xxx`拉取我们想要的分支。
|
||||
|
||||
|
||||
|
||||
### 3.2、认领问题
|
||||
|
||||
在文末评论说明自己要处理该问题即可,具体如下图所示:
|
||||
|
||||

|
||||
|
||||
|
||||
### 3.3、处理问题 & 提交解决
|
||||
|
||||
本节主要介绍一下处理问题 & 提交解决过程中的分支管理,具体如下图所示:
|
||||
|
||||

|
||||
|
||||
1. 切换到主分支:`git checkout github_master`;
|
||||
2. 主分支拉最新代码:`git pull`;
|
||||
3. 基于主分支拉新分支:`git checkout -b fix_928`;
|
||||
4. 提交代码,安装commit的规范进行提交,例如:`git commit -m "[Optimize]优化xxx问题"`;
|
||||
5. 提交到自己远端仓库:`git push --set-upstream origin fix_928`;
|
||||
6. `GitHub` 页面发起 `Pull Request` 请求,管理员合入主仓库。这部分详细见下一节;
|
||||
|
||||
|
||||
### 3.4、请求合并
|
||||
|
||||
代码在提交到 `GitHub` 分仓库之后,就可以在 `GitHub` 的网站创建 `Pull Request`,申请将代码合入主仓库了。 `Pull Request` 具体见下图所示:
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
[Pull Request 创建的例子](https://github.com/didi/KnowStreaming/pull/945)
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## 4、常见问题
|
||||
|
||||
### 4.1、如何将多个 Commit-Log 合并为一个?
|
||||
|
||||
可以不需要将多个commit合并为一个,如果要合并,可以使用 `git rebase -i` 命令进行解决。
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
|
||||
开源贡献者证书发放名单(定期更新)
|
||||
|
||||
|
||||
贡献者名单请看:[贡献者名单](https://doc.knowstreaming.com/product/10-contribution#106-贡献者名单)
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
|
||||
|
||||
<br>
|
||||
<br>
|
||||
|
||||
请点击:[贡献流程](https://doc.knowstreaming.com/product/10-contribution#102-贡献流程)
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 382 KiB |
@@ -6,72 +6,72 @@
|
||||
|
||||
### 3.3.1、Cluster 指标
|
||||
|
||||
| 指标名称 | 指标单位 | 指标含义 | kafka 版本 | 企业/开源版指标 |
|
||||
| ------------------------- | -------- | ------------------------------------ | ---------------- | --------------- |
|
||||
| HealthScore | 分 | 集群总体的健康分 | 全部版本 | 开源版 |
|
||||
| HealthCheckPassed | 个 | 集群总体健康检查通过数 | 全部版本 | 开源版 |
|
||||
| HealthCheckTotal | 个 | 集群总体健康检查总数 | 全部版本 | 开源版 |
|
||||
| 指标名称 | 指标单位 | 指标含义 | kafka 版本 | 企业/开源版指标 |
|
||||
| ------------------------- | -------- |--------------------------------| ---------------- | --------------- |
|
||||
| HealthScore | 分 | 集群总体的健康分 | 全部版本 | 开源版 |
|
||||
| HealthCheckPassed | 个 | 集群总体健康检查通过数 | 全部版本 | 开源版 |
|
||||
| HealthCheckTotal | 个 | 集群总体健康检查总数 | 全部版本 | 开源版 |
|
||||
| HealthScore_Topics | 分 | 集群 Topics 的健康分 | 全部版本 | 开源版 |
|
||||
| HealthCheckPassed_Topics | 个 | 集群 Topics 健康检查通过数 | 全部版本 | 开源版 |
|
||||
| HealthCheckTotal_Topics | 个 | 集群 Topics 健康检查总数 | 全部版本 | 开源版 |
|
||||
| HealthCheckPassed_Topics | 个 | 集群 Topics 健康检查通过数 | 全部版本 | 开源版 |
|
||||
| HealthCheckTotal_Topics | 个 | 集群 Topics 健康检查总数 | 全部版本 | 开源版 |
|
||||
| HealthScore_Brokers | 分 | 集群 Brokers 的健康分 | 全部版本 | 开源版 |
|
||||
| HealthCheckPassed_Brokers | 个 | 集群 Brokers 健康检查通过数 | 全部版本 | 开源版 |
|
||||
| HealthCheckTotal_Brokers | 个 | 集群 Brokers 健康检查总数 | 全部版本 | 开源版 |
|
||||
| HealthCheckPassed_Brokers | 个 | 集群 Brokers 健康检查通过数 | 全部版本 | 开源版 |
|
||||
| HealthCheckTotal_Brokers | 个 | 集群 Brokers 健康检查总数 | 全部版本 | 开源版 |
|
||||
| HealthScore_Groups | 分 | 集群 Groups 的健康分 | 全部版本 | 开源版 |
|
||||
| HealthCheckPassed_Groups | 个 | 集群 Groups 健康检查总数 | 全部版本 | 开源版 |
|
||||
| HealthCheckTotal_Groups | 个 | 集群 Groups 健康检查总数 | 全部版本 | 开源版 |
|
||||
| HealthScore_Cluster | 分 | 集群自身的健康分 | 全部版本 | 开源版 |
|
||||
| HealthCheckPassed_Cluster | 个 | 集群自身健康检查通过数 | 全部版本 | 开源版 |
|
||||
| HealthCheckTotal_Cluster | 个 | 集群自身健康检查总数 | 全部版本 | 开源版 |
|
||||
| TotalRequestQueueSize | 个 | 集群中总的请求队列数 | 全部版本 | 开源版 |
|
||||
| TotalResponseQueueSize | 个 | 集群中总的响应队列数 | 全部版本 | 开源版 |
|
||||
| HealthCheckPassed_Groups | 个 | 集群 Groups 健康检查总数 | 全部版本 | 开源版 |
|
||||
| HealthCheckTotal_Groups | 个 | 集群 Groups 健康检查总数 | 全部版本 | 开源版 |
|
||||
| HealthScore_Cluster | 分 | 集群自身的健康分 | 全部版本 | 开源版 |
|
||||
| HealthCheckPassed_Cluster | 个 | 集群自身健康检查通过数 | 全部版本 | 开源版 |
|
||||
| HealthCheckTotal_Cluster | 个 | 集群自身健康检查总数 | 全部版本 | 开源版 |
|
||||
| TotalRequestQueueSize | 个 | 集群中总的请求队列数 | 全部版本 | 开源版 |
|
||||
| TotalResponseQueueSize | 个 | 集群中总的响应队列数 | 全部版本 | 开源版 |
|
||||
| EventQueueSize | 个 | 集群中 Controller 的 EventQueue 大小 | 2.0.0 及以上版本 | 开源版 |
|
||||
| ActiveControllerCount | 个 | 集群中存活的 Controller 数 | 全部版本 | 开源版 |
|
||||
| TotalProduceRequests | 个 | 集群中的 Produce 每秒请求数 | 全部版本 | 开源版 |
|
||||
| TotalLogSize | byte | 集群总的已使用的磁盘大小 | 全部版本 | 开源版 |
|
||||
| ConnectionsCount | 个 | 集群的连接(Connections)个数 | 全部版本 | 开源版 |
|
||||
| Zookeepers | 个 | 集群中存活的 zk 节点个数 | 全部版本 | 开源版 |
|
||||
| ActiveControllerCount | 个 | 集群中存活的 Controller 数 | 全部版本 | 开源版 |
|
||||
| TotalProduceRequests | 个 | 集群中的 Produce 每秒请求数 | 全部版本 | 开源版 |
|
||||
| TotalLogSize | byte | 集群总的已使用的磁盘大小 | 全部版本 | 开源版 |
|
||||
| ConnectionsCount | 个 | 集群的连接(Connections)个数 | 全部版本 | 开源版 |
|
||||
| Zookeepers | 个 | 集群中存活的 zk 节点个数 | 全部版本 | 开源版 |
|
||||
| ZookeepersAvailable | 是/否 | ZK 地址是否合法 | 全部版本 | 开源版 |
|
||||
| Brokers | 个 | 集群的 broker 的总数 | 全部版本 | 开源版 |
|
||||
| BrokersAlive | 个 | 集群的 broker 的存活数 | 全部版本 | 开源版 |
|
||||
| BrokersNotAlive | 个 | 集群的 broker 的未存活数 | 全部版本 | 开源版 |
|
||||
| BrokersAlive | 个 | 集群的 broker 的存活数 | 全部版本 | 开源版 |
|
||||
| BrokersNotAlive | 个 | 集群的 broker 的未存活数 | 全部版本 | 开源版 |
|
||||
| Replicas | 个 | 集群中 Replica 的总数 | 全部版本 | 开源版 |
|
||||
| Topics | 个 | 集群中 Topic 的总数 | 全部版本 | 开源版 |
|
||||
| Partitions | 个 | 集群的 Partitions 总数 | 全部版本 | 开源版 |
|
||||
| Partitions | 个 | 集群的 Partitions 总数 | 全部版本 | 开源版 |
|
||||
| PartitionNoLeader | 个 | 集群中的 PartitionNoLeader 总数 | 全部版本 | 开源版 |
|
||||
| PartitionMinISR_S | 个 | 集群中的小于 PartitionMinISR 总数 | 全部版本 | 开源版 |
|
||||
| PartitionMinISR_E | 个 | 集群中的等于 PartitionMinISR 总数 | 全部版本 | 开源版 |
|
||||
| PartitionURP | 个 | 集群中的未同步的 Partition 总数 | 全部版本 | 开源版 |
|
||||
| MessagesIn | 条/s | 集群每条消息写入条数 | 全部版本 | 开源版 |
|
||||
| Messages | 条 | 集群总的消息条数 | 全部版本 | 开源版 |
|
||||
| LeaderMessages | 条 | 集群中 leader 总的消息条数 | 全部版本 | 开源版 |
|
||||
| BytesIn | byte/s | 集群的每秒写入字节数 | 全部版本 | 开源版 |
|
||||
| BytesIn_min_5 | byte/s | 集群的每秒写入字节数,5 分钟均值 | 全部版本 | 开源版 |
|
||||
| BytesIn_min_15 | byte/s | 集群的每秒写入字节数,15 分钟均值 | 全部版本 | 开源版 |
|
||||
| BytesOut | byte/s | 集群的每秒流出字节数 | 全部版本 | 开源版 |
|
||||
| BytesOut_min_5 | byte/s | 集群的每秒流出字节数,5 分钟均值 | 全部版本 | 开源版 |
|
||||
| BytesOut_min_15 | byte/s | 集群的每秒流出字节数,15 分钟均值 | 全部版本 | 开源版 |
|
||||
| PartitionMinISR_S | 个 | 集群中的小于 PartitionMinISR 总数 | 全部版本 | 开源版 |
|
||||
| PartitionMinISR_E | 个 | 集群中的等于 PartitionMinISR 总数 | 全部版本 | 开源版 |
|
||||
| PartitionURP | 个 | 集群中的未同步的 Partition 总数 | 全部版本 | 开源版 |
|
||||
| MessagesIn | 条/s | 集群每秒消息写入条数 | 全部版本 | 开源版 |
|
||||
| Messages | 条 | 集群总的消息条数 | 全部版本 | 开源版 |
|
||||
| LeaderMessages | 条 | 集群中 leader 总的消息条数 | 全部版本 | 开源版 |
|
||||
| BytesIn | byte/s | 集群的每秒写入字节数 | 全部版本 | 开源版 |
|
||||
| BytesIn_min_5 | byte/s | 集群的每秒写入字节数,5 分钟均值 | 全部版本 | 开源版 |
|
||||
| BytesIn_min_15 | byte/s | 集群的每秒写入字节数,15 分钟均值 | 全部版本 | 开源版 |
|
||||
| BytesOut | byte/s | 集群的每秒流出字节数 | 全部版本 | 开源版 |
|
||||
| BytesOut_min_5 | byte/s | 集群的每秒流出字节数,5 分钟均值 | 全部版本 | 开源版 |
|
||||
| BytesOut_min_15 | byte/s | 集群的每秒流出字节数,15 分钟均值 | 全部版本 | 开源版 |
|
||||
| Groups | 个 | 集群中 Group 的总数 | 全部版本 | 开源版 |
|
||||
| GroupActives | 个 | 集群中 ActiveGroup 的总数 | 全部版本 | 开源版 |
|
||||
| GroupEmptys | 个 | 集群中 EmptyGroup 的总数 | 全部版本 | 开源版 |
|
||||
| GroupRebalances | 个 | 集群中 RebalanceGroup 的总数 | 全部版本 | 开源版 |
|
||||
| GroupDeads | 个 | 集群中 DeadGroup 的总数 | 全部版本 | 开源版 |
|
||||
| Alive | 是/否 | 集群是否存活,1:存活;0:没有存活 | 全部版本 | 开源版 |
|
||||
| AclEnable | 是/否 | 集群是否开启 Acl,1:是;0:否 | 全部版本 | 开源版 |
|
||||
| Acls | 个 | ACL 数 | 全部版本 | 开源版 |
|
||||
| AclUsers | 个 | ACL-KafkaUser 数 | 全部版本 | 开源版 |
|
||||
| AclTopics | 个 | ACL-Topic 数 | 全部版本 | 开源版 |
|
||||
| AclGroups | 个 | ACL-Group 数 | 全部版本 | 开源版 |
|
||||
| Alive | 是/否 | 集群是否存活,1:存活;0:没有存活 | 全部版本 | 开源版 |
|
||||
| AclEnable | 是/否 | 集群是否开启 Acl,1:是;0:否 | 全部版本 | 开源版 |
|
||||
| Acls | 个 | ACL 数 | 全部版本 | 开源版 |
|
||||
| AclUsers | 个 | ACL-KafkaUser 数 | 全部版本 | 开源版 |
|
||||
| AclTopics | 个 | ACL-Topic 数 | 全部版本 | 开源版 |
|
||||
| AclGroups | 个 | ACL-Group 数 | 全部版本 | 开源版 |
|
||||
| Jobs | 个 | 集群任务总数 | 全部版本 | 开源版 |
|
||||
| JobsRunning | 个 | 集群 running 任务总数 | 全部版本 | 开源版 |
|
||||
| JobsWaiting | 个 | 集群 waiting 任务总数 | 全部版本 | 开源版 |
|
||||
| JobsSuccess | 个 | 集群 success 任务总数 | 全部版本 | 开源版 |
|
||||
| JobsFailed | 个 | 集群 failed 任务总数 | 全部版本 | 开源版 |
|
||||
| LoadReBalanceEnable | 是/否 | 是否开启均衡, 1:是;0:否 | 全部版本 | 企业版 |
|
||||
| LoadReBalanceCpu | 是/否 | CPU 是否均衡, 1:是;0:否 | 全部版本 | 企业版 |
|
||||
| LoadReBalanceNwIn | 是/否 | BytesIn 是否均衡, 1:是;0:否 | 全部版本 | 企业版 |
|
||||
| LoadReBalanceNwOut | 是/否 | BytesOut 是否均衡, 1:是;0:否 | 全部版本 | 企业版 |
|
||||
| LoadReBalanceDisk | 是/否 | Disk 是否均衡, 1:是;0:否 | 全部版本 | 企业版 |
|
||||
| LoadReBalanceEnable | 是/否 | 是否开启均衡, 1:是;0:否 | 全部版本 | 企业版 |
|
||||
| LoadReBalanceCpu | 是/否 | CPU 是否均衡, 1:是;0:否 | 全部版本 | 企业版 |
|
||||
| LoadReBalanceNwIn | 是/否 | BytesIn 是否均衡, 1:是;0:否 | 全部版本 | 企业版 |
|
||||
| LoadReBalanceNwOut | 是/否 | BytesOut 是否均衡, 1:是;0:否 | 全部版本 | 企业版 |
|
||||
| LoadReBalanceDisk | 是/否 | Disk 是否均衡, 1:是;0:否 | 全部版本 | 企业版 |
|
||||
|
||||
### 3.3.2、Broker 指标
|
||||
|
||||
|
||||
180
docs/dev_guide/接入ZK带认证Kafka集群.md
Normal file
180
docs/dev_guide/接入ZK带认证Kafka集群.md
Normal file
@@ -0,0 +1,180 @@
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
# 接入 ZK 带认证的 Kafka 集群
|
||||
|
||||
- [接入 ZK 带认证的 Kafka 集群](#接入-zk-带认证的-kafka-集群)
|
||||
- [1、简要说明](#1简要说明)
|
||||
- [2、支持 Digest-MD5 认证](#2支持-digest-md5-认证)
|
||||
- [3、支持 Kerberos 认证](#3支持-kerberos-认证)
|
||||
|
||||
|
||||
|
||||
## 1、简要说明
|
||||
|
||||
- 1、当前 KnowStreaming 暂无页面可以直接配置 ZK 的认证信息,但是 KnowStreaming 的后端预留了 MySQL 的字段用于存储 ZK 的认证信息,用户可通过将认证信息存储至该字段,从而达到支持接入 ZK 带认证的 Kafka 集群。
|
||||
|
||||
|
||||
- 2、该字段位于 MySQL 库 ks_km_physical_cluster 表中的 zk_properties 字段,该字段的格式是:
|
||||
```json
|
||||
{
|
||||
"openSecure": false, # 是否开启认证,开启时配置为true
|
||||
"sessionTimeoutUnitMs": 15000, # session超时时间
|
||||
"requestTimeoutUnitMs": 5000, # request超时时间
|
||||
"otherProps": { # 其他配置,认证信息主要配置在该位置
|
||||
"zookeeper.sasl.clientconfig": "kafkaClusterZK1" # 例子,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- 3、实际生效的代码位置
|
||||
```java
|
||||
// 代码位置:https://github.com/didi/KnowStreaming/blob/master/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/KafkaAdminZKClient.java
|
||||
|
||||
kafkaZkClient = KafkaZkClient.apply(
|
||||
clusterPhy.getZookeeper(),
|
||||
zkConfig.getOpenSecure(), // 是否开启认证,开启时配置为true
|
||||
zkConfig.getSessionTimeoutUnitMs(), // session超时时间
|
||||
zkConfig.getRequestTimeoutUnitMs(), // request超时时间
|
||||
5,
|
||||
Time.SYSTEM,
|
||||
"KS-ZK-ClusterPhyId-" + clusterPhyId,
|
||||
"KS-ZK-SessionExpireListener-clusterPhyId-" + clusterPhyId,
|
||||
Option.apply("KS-ZK-ClusterPhyId-" + clusterPhyId),
|
||||
Option.apply(this.getZKConfig(clusterPhyId, zkConfig.getOtherProps())) // 其他配置,认证信息主要配置在该位置
|
||||
);
|
||||
```
|
||||
|
||||
- 4、SQL例子
|
||||
```sql
|
||||
update ks_km_physical_cluster set zk_properties='{ "openSecure": true, "otherProps": { "zookeeper.sasl.clientconfig": "kafkaClusterZK1" } }' where id=集群1的ID;
|
||||
```
|
||||
|
||||
|
||||
- 5、zk_properties 字段不能覆盖所有的场景,所以实际使用过程中还可能需要在此基础之上,进行其他的调整。比如,`Digest-MD5 认证` 和 `Kerberos 认证` 都还需要修改启动脚本等。后续看能否通过修改 ZK 客户端的源码,使得 ZK 认证的相关配置能和 Kafka 认证的配置一样方便。
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## 2、支持 Digest-MD5 认证
|
||||
|
||||
1. 假设你有两个 Kafka 集群, 对应两个 ZK 集群;
|
||||
2. 两个 ZK 集群的认证信息如下所示
|
||||
|
||||
```bash
|
||||
# ZK1集群的认证信息,这里的 kafkaClusterZK1 可以是随意的名称,只需要和后续数据库的配置对应上即可。
|
||||
kafkaClusterZK1 {
|
||||
org.apache.zookeeper.server.auth.DigestLoginModule required
|
||||
username="zk1"
|
||||
password="zk1-passwd";
|
||||
};
|
||||
|
||||
# ZK2集群的认证信息,这里的 kafkaClusterZK2 可以是随意的名称,只需要和后续数据库的配置对应上即可。
|
||||
kafkaClusterZK2 {
|
||||
org.apache.zookeeper.server.auth.DigestLoginModule required
|
||||
username="zk2"
|
||||
password="zk2-passwd";
|
||||
};
|
||||
```
|
||||
|
||||
3. 将这两个ZK集群的认证信息存储到 `/xxx/zk_client_jaas.conf` 文件中,文件中的内容如下所示:
|
||||
|
||||
```bash
|
||||
kafkaClusterZK1 {
|
||||
org.apache.zookeeper.server.auth.DigestLoginModule required
|
||||
username="zk1"
|
||||
password="zk1-passwd";
|
||||
};
|
||||
|
||||
kafkaClusterZK2 {
|
||||
org.apache.zookeeper.server.auth.DigestLoginModule required
|
||||
username="zk2"
|
||||
password="zk2-passwd";
|
||||
};
|
||||
|
||||
```
|
||||
|
||||
4. 修改 KnowStreaming 的启动脚本
|
||||
|
||||
```bash
|
||||
# `KnowStreaming/bin/startup.sh` 中的 47 行的 JAVA_OPT 中追加如下设置
|
||||
|
||||
-Djava.security.auth.login.config=/xxx/zk_client_jaas.conf
|
||||
```
|
||||
|
||||
5. 修改 KnowStreaming 的表数据
|
||||
|
||||
```sql
|
||||
# 这里的 kafkaClusterZK1 要和 /xxx/zk_client_jaas.conf 中的对应上
|
||||
update ks_km_physical_cluster set zk_properties='{ "openSecure": true, "otherProps": { "zookeeper.sasl.clientconfig": "kafkaClusterZK1" } }' where id=集群1的ID;
|
||||
|
||||
update ks_km_physical_cluster set zk_properties='{ "openSecure": true, "otherProps": { "zookeeper.sasl.clientconfig": "kafkaClusterZK2" } }' where id=集群2的ID;
|
||||
```
|
||||
|
||||
6. 重启 KnowStreaming
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## 3、支持 Kerberos 认证
|
||||
|
||||
**第一步:查看用户在ZK的ACL**
|
||||
|
||||
假设我们使用的用户是 `kafka` 这个用户。
|
||||
|
||||
- 1、查看 server.properties 的配置的 zookeeper.connect 的地址;
|
||||
- 2、使用 `zkCli.sh -serve zookeeper.connect的地址` 登录到ZK页面;
|
||||
- 3、ZK页面上,执行命令 `getAcl /kafka` 查看 `kafka` 用户的权限;
|
||||
|
||||
此时,我们可以看到如下信息:
|
||||

|
||||
|
||||
`kafka` 用户需要的权限是 `cdrwa`。如果用户没有 `cdrwa` 权限的话,需要创建用户并授权,授权命令为:`setAcl`
|
||||
|
||||
|
||||
**第二步:创建Kerberos的keytab并修改 KnowStreaming 主机**
|
||||
|
||||
- 1、在 Kerberos 的域中创建 `kafka/_HOST` 的 `keytab`,并导出。例如:`kafka/dbs-kafka-test-8-53`;
|
||||
- 2、导出 keytab 后上传到安装 KS 的机器的 `/etc/keytab` 下;
|
||||
- 3、在 KS 机器上,执行 `kinit -kt zookeepe.keytab kafka/dbs-kafka-test-8-53` 看是否能进行 `Kerberos` 登录;
|
||||
- 4、可以登录后,配置 `/opt/zookeeper.jaas` 文件,例子如下:
|
||||
```bash
|
||||
Client {
|
||||
com.sun.security.auth.module.Krb5LoginModule required
|
||||
useKeyTab=true
|
||||
storeKey=false
|
||||
serviceName="zookeeper"
|
||||
keyTab="/etc/keytab/zookeeper.keytab"
|
||||
principal="kafka/dbs-kafka-test-8-53@XXX.XXX.XXX";
|
||||
};
|
||||
```
|
||||
- 5、需要配置 `KDC-Server` 对 `KnowStreaming` 的机器开通防火墙,并在KS的机器 `/etc/host/` 配置 `kdc-server` 的 `hostname`。并将 `krb5.conf` 导入到 `/etc` 下;
|
||||
|
||||
|
||||
**第三步:修改 KnowStreaming 的配置**
|
||||
|
||||
- 1、修改数据库,开启ZK的认证
|
||||
```sql
|
||||
update ks_km_physical_cluster set zk_properties='{ "openSecure": true }' where id=集群1的ID;
|
||||
```
|
||||
|
||||
- 2、在 `KnowStreaming/bin/startup.sh` 中的47行的JAVA_OPT中追加如下设置
|
||||
```bash
|
||||
-Dsun.security.krb5.debug=true -Djava.security.krb5.conf=/etc/krb5.conf -Djava.security.auth.login.config=/opt/zookeeper.jaas
|
||||
```
|
||||
|
||||
- 3、重启KS集群后再 start.out 中看到如下信息,则证明Kerberos配置成功;
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
|
||||
**第四步:补充说明**
|
||||
|
||||
- 1、多Kafka集群如果用的是一样的Kerberos域的话,只需在每个`ZK`中给`kafka`用户配置`crdwa`权限即可,这样集群初始化的时候`zkclient`是都可以认证;
|
||||
- 2、多个Kerberos域暂时未适配;
|
||||
@@ -1,69 +0,0 @@
|
||||
|
||||
## 支持Kerberos认证的ZK
|
||||
|
||||
|
||||
### 1、修改 KnowStreaming 代码
|
||||
|
||||
代码位置:`src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/KafkaAdminZKClient.java`
|
||||
|
||||
将 `createZKClient` 的 `135行 的 false 改为 true
|
||||

|
||||
|
||||
|
||||
修改完后重新进行打包编译,打包编译见:[打包编译](https://github.com/didi/KnowStreaming/blob/master/docs/install_guide/%E6%BA%90%E7%A0%81%E7%BC%96%E8%AF%91%E6%89%93%E5%8C%85%E6%89%8B%E5%86%8C.md
|
||||
)
|
||||
|
||||
|
||||
|
||||
### 2、查看用户在ZK的ACL
|
||||
|
||||
假设我们使用的用户是 `kafka` 这个用户。
|
||||
|
||||
- 1、查看 server.properties 的配置的 zookeeper.connect 的地址;
|
||||
- 2、使用 `zkCli.sh -serve zookeeper.connect的地址` 登录到ZK页面;
|
||||
- 3、ZK页面上,执行命令 `getAcl /kafka` 查看 `kafka` 用户的权限;
|
||||
|
||||
此时,我们可以看到如下信息:
|
||||

|
||||
|
||||
`kafka` 用户需要的权限是 `cdrwa`。如果用户没有 `cdrwa` 权限的话,需要创建用户并授权,授权命令为:`setAcl`
|
||||
|
||||
|
||||
### 3、创建Kerberos的keytab并修改 KnowStreaming 主机
|
||||
|
||||
- 1、在 Kerberos 的域中创建 `kafka/_HOST` 的 `keytab`,并导出。例如:`kafka/dbs-kafka-test-8-53`;
|
||||
- 2、导出 keytab 后上传到安装 KS 的机器的 `/etc/keytab` 下;
|
||||
- 3、在 KS 机器上,执行 `kinit -kt zookeepe.keytab kafka/dbs-kafka-test-8-53` 看是否能进行 `Kerberos` 登录;
|
||||
- 4、可以登录后,配置 `/opt/zookeeper.jaas` 文件,例子如下:
|
||||
```sql
|
||||
Client {
|
||||
com.sun.security.auth.module.Krb5LoginModule required
|
||||
useKeyTab=true
|
||||
storeKey=false
|
||||
serviceName="zookeeper"
|
||||
keyTab="/etc/keytab/zookeeper.keytab"
|
||||
principal="kafka/dbs-kafka-test-8-53@XXX.XXX.XXX";
|
||||
};
|
||||
```
|
||||
- 5、需要配置 `KDC-Server` 对 `KnowStreaming` 的机器开通防火墙,并在KS的机器 `/etc/host/` 配置 `kdc-server` 的 `hostname`。并将 `krb5.conf` 导入到 `/etc` 下;
|
||||
|
||||
|
||||
### 4、修改 KnowStreaming 的配置
|
||||
|
||||
- 1、在 `/usr/local/KnowStreaming/KnowStreaming/bin/startup.sh` 中的47行的JAVA_OPT中追加如下设置
|
||||
```bash
|
||||
-Dsun.security.krb5.debug=true -Djava.security.krb5.conf=/etc/krb5.conf -Djava.security.auth.login.config=/opt/zookeeper.jaas
|
||||
```
|
||||
|
||||
- 2、重启KS集群后再 start.out 中看到如下信息,则证明Kerberos配置成功;
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
|
||||
### 5、补充说明
|
||||
|
||||
- 1、多Kafka集群如果用的是一样的Kerberos域的话,只需在每个`ZK`中给`kafka`用户配置`crdwa`权限即可,这样集群初始化的时候`zkclient`是都可以认证;
|
||||
- 2、当前需要修改代码重新打包才可以支持,后续考虑通过页面支持Kerberos认证的ZK接入;
|
||||
- 3、多个Kerberos域暂时未适配;
|
||||
@@ -1,286 +0,0 @@
|
||||
## 1、集群接入错误
|
||||
|
||||
### 1.1、异常现象
|
||||
|
||||
如下图所示,集群非空时,大概率为地址配置错误导致。
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_BRiXBvqYFK2dxSF1aqgZ width="80%">
|
||||
|
||||
|
||||
|
||||
### 1.2、解决方案
|
||||
|
||||
接入集群时,依据提示的错误,进行相应的解决。例如:
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_Yn4LhV8aeSEKX1zrrkUi width="50%">
|
||||
|
||||
### 1.3、正常情况
|
||||
|
||||
接入集群时,页面信息都自动正常出现,没有提示错误。
|
||||
|
||||
|
||||
|
||||
## 2、JMX连接失败(需使用3.0.1及以上版本)
|
||||
|
||||
### 2.1异常现象
|
||||
|
||||
Broker列表的JMX Port列出现红色感叹号,则该Broker的JMX连接异常。
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_MLlLCfAktne4X6MBtBUd width="90%">
|
||||
|
||||
|
||||
|
||||
#### 2.1.1、原因一:JMX未开启
|
||||
|
||||
##### 2.1.1.1、异常现象
|
||||
|
||||
broker列表的JMX Port值为-1,对应Broker的JMX未开启。
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_E1PD8tPsMeR2zYLFBFAu width="90%">
|
||||
|
||||
##### 2.1.1.2、解决方案
|
||||
|
||||
开启JMX,开启流程如下:
|
||||
|
||||
1、修改kafka的bin目录下面的:`kafka-server-start.sh`文件
|
||||
|
||||
```
|
||||
# 在这个下面增加JMX端口的配置
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G"
|
||||
export JMX_PORT=9999 # 增加这个配置, 这里的数值并不一定是要9999
|
||||
fi
|
||||
```
|
||||
|
||||
|
||||
|
||||
2、修改kafka的bin目录下面对的:`kafka-run-class.sh`文件
|
||||
|
||||
```
|
||||
# JMX settings
|
||||
if [ -z "$KAFKA_JMX_OPTS" ]; then
|
||||
KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false
|
||||
-Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=${当前机器的IP}"
|
||||
fi
|
||||
|
||||
# JMX port to use
|
||||
if [ $JMX_PORT ]; then
|
||||
KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT - Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
|
||||
fi
|
||||
```
|
||||
|
||||
|
||||
|
||||
3、重启Kafka-Broker。
|
||||
|
||||
|
||||
|
||||
#### 2.1.2、原因二:JMX配置错误
|
||||
|
||||
##### 2.1.2.1、异常现象
|
||||
|
||||
错误日志:
|
||||
|
||||
```
|
||||
# 错误一: 错误提示的是真实的IP,这样的话基本就是JMX配置的有问题了。
|
||||
2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:192.168.0.1 port:9999. java.rmi.ConnectException: Connection refused to host: 192.168.0.1; nested exception is:
|
||||
|
||||
# 错误二:错误提示的是127.0.0.1这个IP,这个是机器的hostname配置的可能有问题。
|
||||
2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:127.0.0.1 port:9999. java.rmi.ConnectException: Connection refused to host: 127.0.0.1;; nested exception is:
|
||||
```
|
||||
|
||||
|
||||
|
||||
##### 2.1.2.2、解决方案
|
||||
|
||||
开启JMX,开启流程如下:
|
||||
|
||||
1、修改kafka的bin目录下面的:`kafka-server-start.sh`文件
|
||||
|
||||
```
|
||||
# 在这个下面增加JMX端口的配置
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G"
|
||||
export JMX_PORT=9999 # 增加这个配置, 这里的数值并不一定是要9999
|
||||
fi
|
||||
```
|
||||
|
||||
|
||||
|
||||
2、修改kafka的bin目录下面对的:`kafka-run-class.sh`文件
|
||||
|
||||
```
|
||||
# JMX settings
|
||||
if [ -z "$KAFKA_JMX_OPTS" ]; then
|
||||
KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false
|
||||
-Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=${当前机器的IP}"
|
||||
fi
|
||||
|
||||
# JMX port to use
|
||||
if [ $JMX_PORT ]; then
|
||||
KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT - Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
|
||||
fi
|
||||
```
|
||||
|
||||
|
||||
|
||||
3、重启Kafka-Broker。
|
||||
|
||||
|
||||
|
||||
#### 2.1.3、原因三:JMX开启SSL
|
||||
|
||||
##### 2.1.3.1、解决方案
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_kNyCi8H9wtHSRkWurB6S width="50%">
|
||||
|
||||
#### 2.1.4、原因四:连接了错误IP
|
||||
|
||||
##### 2.1.4.1、异常现象
|
||||
|
||||
Broker 配置了内外网,而JMX在配置时,可能配置了内网IP或者外网IP,此时`KnowStreaming` 需要连接到特定网络的IP才可以进行访问。
|
||||
|
||||
比如:Broker在ZK的存储结构如下所示,我们期望连接到 `endpoints` 中标记为 `INTERNAL` 的地址,但是 `KnowStreaming` 却连接了 `EXTERNAL` 的地址。
|
||||
|
||||
```json
|
||||
{
|
||||
"listener_security_protocol_map": {
|
||||
"EXTERNAL": "SASL_PLAINTEXT",
|
||||
"INTERNAL": "SASL_PLAINTEXT"
|
||||
},
|
||||
"endpoints": [
|
||||
"EXTERNAL://192.168.0.1:7092",
|
||||
"INTERNAL://192.168.0.2:7093"
|
||||
],
|
||||
"jmx_port": 8099,
|
||||
"host": "192.168.0.1",
|
||||
"timestamp": "1627289710439",
|
||||
"port": -1,
|
||||
"version": 4
|
||||
}
|
||||
```
|
||||
|
||||
##### 2.1.4.2、解决方案
|
||||
|
||||
可以手动往`ks_km_physical_cluster`表的`jmx_properties`字段增加一个`useWhichEndpoint`字段,从而控制 `KnowStreaming` 连接到特定的JMX IP及PORT。
|
||||
|
||||
`jmx_properties`格式:
|
||||
|
||||
```json
|
||||
{
|
||||
"maxConn": 100, // KM对单台Broker的最大JMX连接数
|
||||
"username": "xxxxx", //用户名,可以不填写
|
||||
"password": "xxxx", // 密码,可以不填写
|
||||
"openSSL": true, //开启SSL, true表示开启ssl, false表示关闭
|
||||
"useWhichEndpoint": "EXTERNAL" //指定要连接的网络名称,填写EXTERNAL就是连接endpoints里面的EXTERNAL地址
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
SQL例子:
|
||||
|
||||
```sql
|
||||
UPDATE ks_km_physical_cluster SET jmx_properties='{ "maxConn": 10, "username": "xxxxx", "password": "xxxx", "openSSL": false , "useWhichEndpoint": "xxx"}' where id={xxx};
|
||||
```
|
||||
|
||||
### 2.2、正常情况
|
||||
|
||||
修改完成后,如果看到 JMX PORT这一列全部为绿色,则表示JMX已正常。
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_ymtDTCiDlzfrmSCez2lx width="90%">
|
||||
|
||||
|
||||
|
||||
## 3、Elasticsearch问题
|
||||
|
||||
注意:mac系统在执行curl指令时,可能报zsh错误。可参考以下操作。
|
||||
|
||||
```
|
||||
1 进入.zshrc 文件 vim ~/.zshrc
|
||||
2.在.zshrc中加入 setopt no_nomatch
|
||||
3.更新配置 source ~/.zshrc
|
||||
```
|
||||
|
||||
### 3.1、原因一:缺少索引
|
||||
|
||||
#### 3.1.1、异常现象
|
||||
|
||||
报错信息
|
||||
|
||||
```
|
||||
com.didiglobal.logi.elasticsearch.client.model.exception.ESIndexNotFoundException: method [GET], host[http://127.0.0.1:9200], URI [/ks_kafka_broker_metric_2022-10-21,ks_kafka_broker_metric_2022-10-22/_search], status line [HTTP/1.1 404 Not Found]
|
||||
```
|
||||
|
||||
curl http://{ES的IP地址}:{ES的端口号}/_cat/indices/ks_kafka* 查看KS索引列表,发现没有索引。
|
||||
|
||||
#### 3.1.2、解决方案
|
||||
|
||||
执行[/km-dist/init/template/template.sh](https://github.com/didi/KnowStreaming/blob/master/km-dist/init/template/template.sh)脚本创建索引。
|
||||
|
||||
|
||||
|
||||
### 3.2、原因二:索引模板错误
|
||||
|
||||
#### 3.2.1、异常现象
|
||||
|
||||
多集群列表有数据,集群详情页图标无数据。查询KS索引模板列表,发现不存在。
|
||||
|
||||
```
|
||||
curl {ES的IP地址}:{ES的端口号}/_cat/templates/ks_kafka*?v&h=name
|
||||
```
|
||||
|
||||
正常KS模板如下图所示。
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_l79bPYSci9wr6KFwZDA6 width="90%">
|
||||
|
||||
|
||||
|
||||
#### 3.2.2、解决方案
|
||||
|
||||
删除KS索引模板和索引
|
||||
|
||||
```
|
||||
curl -XDELETE {ES的IP地址}:{ES的端口号}/ks_kafka*
|
||||
curl -XDELETE {ES的IP地址}:{ES的端口号}/_template/ks_kafka*
|
||||
```
|
||||
|
||||
执行[/km-dist/init/template/template.sh](https://github.com/didi/KnowStreaming/blob/master/km-dist/init/template/template.sh)脚本初始化索引和模板。
|
||||
|
||||
|
||||
|
||||
### 3.3、原因三:集群Shard满
|
||||
|
||||
#### 3.3.1、异常现象
|
||||
|
||||
报错信息
|
||||
|
||||
```
|
||||
com.didiglobal.logi.elasticsearch.client.model.exception.ESIndexNotFoundException: method [GET], host[http://127.0.0.1:9200], URI [/ks_kafka_broker_metric_2022-10-21,ks_kafka_broker_metric_2022-10-22/_search], status line [HTTP/1.1 404 Not Found]
|
||||
```
|
||||
|
||||
尝试手动创建索引失败。
|
||||
|
||||
```
|
||||
#创建ks_kafka_cluster_metric_test索引的指令
|
||||
curl -s -XPUT http://{ES的IP地址}:{ES的端口号}/ks_kafka_cluster_metric_test
|
||||
```
|
||||
|
||||
#### 3.3.2、解决方案
|
||||
|
||||
ES索引的默认分片数量为1000,达到数量以后,索引创建失败。
|
||||
|
||||
+ 扩大ES索引数量上限,执行指令
|
||||
|
||||
```
|
||||
curl -XPUT -H"content-type:application/json" http://{ES的IP地址}:{ES的端口号}/_cluster/settings -d '
|
||||
{
|
||||
"persistent": {
|
||||
"cluster": {
|
||||
"max_shards_per_node":{索引上限,默认为1000}
|
||||
}
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
执行[/km-dist/init/template/template.sh](https://github.com/didi/KnowStreaming/blob/master/km-dist/init/template/template.sh)脚本补全索引。
|
||||
@@ -2,125 +2,275 @@
|
||||
|
||||

|
||||
|
||||
## JMX-连接失败问题解决
|
||||
|
||||
集群正常接入`KnowStreaming`之后,即可以看到集群的Broker列表,此时如果查看不了Topic的实时流量,或者是Broker的实时流量信息时,那么大概率就是`JMX`连接的问题了。
|
||||
## 2、解决连接 JMX 失败
|
||||
|
||||
下面我们按照步骤来一步一步的检查。
|
||||
|
||||
### 1、问题说明
|
||||
|
||||
**类型一:JMX配置未开启**
|
||||
|
||||
未开启时,直接到`2、解决方法`查看如何开启即可。
|
||||
|
||||

|
||||
- [2、解决连接 JMX 失败](#2解决连接-jmx-失败)
|
||||
- [2.1、正异常现象](#21正异常现象)
|
||||
- [2.2、异因一:JMX未开启](#22异因一jmx未开启)
|
||||
- [2.2.1、异常现象](#221异常现象)
|
||||
- [2.2.2、解决方案](#222解决方案)
|
||||
- [2.3、异原二:JMX配置错误](#23异原二jmx配置错误)
|
||||
- [2.3.1、异常现象](#231异常现象)
|
||||
- [2.3.2、解决方案](#232解决方案)
|
||||
- [2.4、异因三:JMX开启SSL](#24异因三jmx开启ssl)
|
||||
- [2.4.1、异常现象](#241异常现象)
|
||||
- [2.4.2、解决方案](#242解决方案)
|
||||
- [2.5、异因四:连接了错误IP](#25异因四连接了错误ip)
|
||||
- [2.5.1、异常现象](#251异常现象)
|
||||
- [2.5.2、解决方案](#252解决方案)
|
||||
- [2.6、异因五:连接了错误端口](#26异因五连接了错误端口)
|
||||
- [2.6.1、异常现象](#261异常现象)
|
||||
- [2.6.2、解决方案](#262解决方案)
|
||||
|
||||
|
||||
**类型二:配置错误**
|
||||
|
||||
`JMX`端口已经开启的情况下,有的时候开启的配置不正确,此时也会导致出现连接失败的问题。这里大概列举几种原因:
|
||||
|
||||
- `JMX`配置错误:见`2、解决方法`。
|
||||
- 存在防火墙或者网络限制:网络通的另外一台机器`telnet`试一下看是否可以连接上。
|
||||
- 需要进行用户名及密码的认证:见`3、解决方法 —— 认证的JMX`。
|
||||
背景:Kafka 通过 JMX 服务进行运行指标的暴露,因此 `KnowStreaming` 会主动连接 Kafka 的 JMX 服务进行指标采集。如果我们发现页面缺少指标,那么可能原因之一是 Kafka 的 JMX 端口配置的有问题导致指标获取失败,进而页面没有数据。
|
||||
|
||||
|
||||
错误日志例子:
|
||||
```
|
||||
# 错误一: 错误提示的是真实的IP,这样的话基本就是JMX配置的有问题了。
|
||||
2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:192.168.0.1 port:9999.
|
||||
java.rmi.ConnectException: Connection refused to host: 192.168.0.1; nested exception is:
|
||||
### 2.1、正异常现象
|
||||
|
||||
**1、异常现象**
|
||||
|
||||
Broker 列表的 JMX PORT 列出现红色感叹号,则表示 JMX 连接存在异常。
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_MLlLCfAktne4X6MBtBUd width="90%">
|
||||
|
||||
|
||||
# 错误二:错误提示的是127.0.0.1这个IP,这个是机器的hostname配置的可能有问题。
|
||||
2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:127.0.0.1 port:9999.
|
||||
java.rmi.ConnectException: Connection refused to host: 127.0.0.1;; nested exception is:
|
||||
```
|
||||
**2、正常现象**
|
||||
|
||||
**类型三:连接特定IP**
|
||||
Broker 列表的 JMX PORT 列出现绿色,则表示 JMX 连接正常。
|
||||
|
||||
Broker 配置了内外网,而JMX在配置时,可能配置了内网IP或者外网IP,此时 `KnowStreaming` 需要连接到特定网络的IP才可以进行访问。
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_ymtDTCiDlzfrmSCez2lx width="90%">
|
||||
|
||||
比如:
|
||||
|
||||
Broker在ZK的存储结构如下所示,我们期望连接到 `endpoints` 中标记为 `INTERNAL` 的地址,但是 `KnowStreaming` 却连接了 `EXTERNAL` 的地址,此时可以看 `4、解决方法 —— JMX连接特定网络` 进行解决。
|
||||
---
|
||||
|
||||
```json
|
||||
{
|
||||
"listener_security_protocol_map": {"EXTERNAL":"SASL_PLAINTEXT","INTERNAL":"SASL_PLAINTEXT"},
|
||||
"endpoints": ["EXTERNAL://192.168.0.1:7092","INTERNAL://192.168.0.2:7093"],
|
||||
"jmx_port": 8099,
|
||||
"host": "192.168.0.1",
|
||||
"timestamp": "1627289710439",
|
||||
"port": -1,
|
||||
"version": 4
|
||||
}
|
||||
```
|
||||
|
||||
### 2、解决方法
|
||||
|
||||
这里仅介绍一下比较通用的解决方式,如若有更好的方式,欢迎大家指导告知一下。
|
||||
|
||||
修改`kafka-server-start.sh`文件:
|
||||
```
|
||||
|
||||
|
||||
### 2.2、异因一:JMX未开启
|
||||
|
||||
#### 2.2.1、异常现象
|
||||
|
||||
broker列表的JMX Port值为-1,对应Broker的JMX未开启。
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_E1PD8tPsMeR2zYLFBFAu width="90%">
|
||||
|
||||
#### 2.2.2、解决方案
|
||||
|
||||
开启JMX,开启流程如下:
|
||||
|
||||
1、修改kafka的bin目录下面的:`kafka-server-start.sh`文件
|
||||
|
||||
```bash
|
||||
# 在这个下面增加JMX端口的配置
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G"
|
||||
export JMX_PORT=9999 # 增加这个配置, 这里的数值并不一定是要9999
|
||||
export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G"
|
||||
export JMX_PORT=9999 # 增加这个配置, 这里的数值并不一定是要9999
|
||||
fi
|
||||
```
|
||||
|
||||
|
||||
|
||||
修改`kafka-run-class.sh`文件
|
||||
```
|
||||
# JMX settings
|
||||
2、修改kafka的bin目录下面对的:`kafka-run-class.sh`文件
|
||||
|
||||
```bash
|
||||
# JMX settings
|
||||
if [ -z "$KAFKA_JMX_OPTS" ]; then
|
||||
KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=${当前机器的IP}"
|
||||
fi
|
||||
KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=当前机器的IP"
|
||||
fi
|
||||
|
||||
# JMX port to use
|
||||
# JMX port to use
|
||||
if [ $JMX_PORT ]; then
|
||||
KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
|
||||
KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
|
||||
fi
|
||||
```
|
||||
|
||||
|
||||
### 3、解决方法 —— 认证的JMX
|
||||
|
||||
如果您是直接看的这个部分,建议先看一下上一节:`2、解决方法`以确保`JMX`的配置没有问题了。
|
||||
3、重启Kafka-Broker。
|
||||
|
||||
在`JMX`的配置等都没有问题的情况下,如果是因为认证的原因导致连接不了的,可以在集群接入界面配置你的`JMX`认证信息。
|
||||
|
||||
<img src='http://img-ys011.didistatic.com/static/dc2img/do1_EUU352qMEX1Jdp7pxizp' width=350>
|
||||
---
|
||||
|
||||
|
||||
|
||||
### 4、解决方法 —— JMX连接特定网络
|
||||
|
||||
|
||||
|
||||
|
||||
### 2.3、异原二:JMX配置错误
|
||||
|
||||
#### 2.3.1、异常现象
|
||||
|
||||
错误日志:
|
||||
|
||||
```log
|
||||
# 错误一: 错误提示的是真实的IP,这样的话基本就是JMX配置的有问题了。
|
||||
2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:192.168.0.1 port:9999. java.rmi.ConnectException: Connection refused to host: 192.168.0.1; nested exception is:
|
||||
|
||||
# 错误二:错误提示的是127.0.0.1这个IP,这个是机器的hostname配置的可能有问题。
|
||||
2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:127.0.0.1 port:9999. java.rmi.ConnectException: Connection refused to host: 127.0.0.1;; nested exception is:
|
||||
```
|
||||
|
||||
#### 2.3.2、解决方案
|
||||
|
||||
开启JMX,开启流程如下:
|
||||
|
||||
1、修改kafka的bin目录下面的:`kafka-server-start.sh`文件
|
||||
|
||||
```bash
|
||||
# 在这个下面增加JMX端口的配置
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G"
|
||||
export JMX_PORT=9999 # 增加这个配置, 这里的数值并不一定是要9999
|
||||
fi
|
||||
```
|
||||
|
||||
2、修改kafka的bin目录下面对的:`kafka-run-class.sh`文件
|
||||
|
||||
```bash
|
||||
# JMX settings
|
||||
if [ -z "$KAFKA_JMX_OPTS" ]; then
|
||||
KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=当前机器的IP"
|
||||
fi
|
||||
|
||||
# JMX port to use
|
||||
if [ $JMX_PORT ]; then
|
||||
KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
|
||||
fi
|
||||
```
|
||||
|
||||
3、重启Kafka-Broker。
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### 2.4、异因三:JMX开启SSL
|
||||
|
||||
#### 2.4.1、异常现象
|
||||
|
||||
```log
|
||||
# 连接JMX的日志中,出现SSL认证失败的相关日志。TODO:欢迎补充具体日志案例。
|
||||
```
|
||||
|
||||
#### 2.4.2、解决方案
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_kNyCi8H9wtHSRkWurB6S width="50%">
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
### 2.5、异因四:连接了错误IP
|
||||
|
||||
#### 2.5.1、异常现象
|
||||
|
||||
Broker 配置了内外网,而JMX在配置时,可能配置了内网IP或者外网IP,此时`KnowStreaming` 需要连接到特定网络的IP才可以进行访问。
|
||||
|
||||
比如:Broker在ZK的存储结构如下所示,我们期望连接到 `endpoints` 中标记为 `INTERNAL` 的地址,但是 `KnowStreaming` 却连接了 `EXTERNAL` 的地址。
|
||||
|
||||
```json
|
||||
{
|
||||
"listener_security_protocol_map": {
|
||||
"EXTERNAL": "SASL_PLAINTEXT",
|
||||
"INTERNAL": "SASL_PLAINTEXT"
|
||||
},
|
||||
"endpoints": [
|
||||
"EXTERNAL://192.168.0.1:7092",
|
||||
"INTERNAL://192.168.0.2:7093"
|
||||
],
|
||||
"jmx_port": 8099,
|
||||
"host": "192.168.0.1",
|
||||
"timestamp": "1627289710439",
|
||||
"port": -1,
|
||||
"version": 4
|
||||
}
|
||||
```
|
||||
|
||||
#### 2.5.2、解决方案
|
||||
|
||||
可以手动往`ks_km_physical_cluster`表的`jmx_properties`字段增加一个`useWhichEndpoint`字段,从而控制 `KnowStreaming` 连接到特定的JMX IP及PORT。
|
||||
|
||||
`jmx_properties`格式:
|
||||
|
||||
```json
|
||||
{
|
||||
"maxConn": 100, # KM对单台Broker的最大JMX连接数
|
||||
"username": "xxxxx", # 用户名,可以不填写
|
||||
"password": "xxxx", # 密码,可以不填写
|
||||
"openSSL": true, # 开启SSL, true表示开启ssl, false表示关闭
|
||||
"useWhichEndpoint": "EXTERNAL" #指定要连接的网络名称,填写EXTERNAL就是连接endpoints里面的EXTERNAL地址
|
||||
"maxConn": 100, // KM对单台Broker的最大JMX连接数
|
||||
"username": "xxxxx", //用户名,可以不填写
|
||||
"password": "xxxx", // 密码,可以不填写
|
||||
"openSSL": true, //开启SSL, true表示开启ssl, false表示关闭
|
||||
"useWhichEndpoint": "EXTERNAL" //指定要连接的网络名称,填写EXTERNAL就是连接endpoints里面的EXTERNAL地址
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
SQL例子:
|
||||
|
||||
```sql
|
||||
UPDATE ks_km_physical_cluster SET jmx_properties='{ "maxConn": 10, "username": "xxxxx", "password": "xxxx", "openSSL": false , "useWhichEndpoint": "xxx"}' where id={xxx};
|
||||
```
|
||||
|
||||
注意:
|
||||
|
||||
+ 目前此功能只支持采用 `ZK` 做分布式协调的kafka集群。
|
||||
---
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### 2.6、异因五:连接了错误端口
|
||||
|
||||
3.3.0 以上版本,或者是 master 分支最新代码,才具备该能力。
|
||||
|
||||
#### 2.6.1、异常现象
|
||||
|
||||
在 AWS 或者是容器上的 Kafka-Broker,使用同一个IP,但是外部服务想要去连接 JMX 端口时,需要进行映射。因此 KnowStreaming 如果直接连接 ZK 上获取到的 JMX 端口,会连接失败,因此需要具备连接端口可配置的能力。
|
||||
|
||||
TODO:补充具体的日志。
|
||||
|
||||
|
||||
#### 2.6.2、解决方案
|
||||
|
||||
可以手动往`ks_km_physical_cluster`表的`jmx_properties`字段增加一个`specifiedJmxPortList`字段,从而控制 `KnowStreaming` 连接到特定的JMX PORT。
|
||||
|
||||
`jmx_properties`格式:
|
||||
```json
|
||||
{
|
||||
"jmxPort": 2445, // 最低优先级使用的jmx端口
|
||||
"maxConn": 100, // KM对单台Broker的最大JMX连接数
|
||||
"username": "xxxxx", //用户名,可以不填写
|
||||
"password": "xxxx", // 密码,可以不填写
|
||||
"openSSL": true, //开启SSL, true表示开启ssl, false表示关闭
|
||||
"useWhichEndpoint": "EXTERNAL", //指定要连接的网络名称,填写EXTERNAL就是连接endpoints里面的EXTERNAL地址
|
||||
"specifiedJmxPortList": [ // 配置最高优先使用的jmx端口
|
||||
{
|
||||
"serverId": "1", // kafka-broker的brokerId, 注意这个是字符串类型,字符串类型的原因是要兼容connect的jmx端口的连接
|
||||
"jmxPort": 1234 // 该 broker 所连接的jmx端口
|
||||
},
|
||||
{
|
||||
"serverId": "2",
|
||||
"jmxPort": 1234
|
||||
},
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
SQL例子:
|
||||
|
||||
```sql
|
||||
UPDATE ks_km_physical_cluster SET jmx_properties='{ "maxConn": 10, "username": "xxxxx", "password": "xxxx", "openSSL": false , "specifiedJmxPortList": [{"serverId": "1", "jmxPort": 1234}] }' where id={xxx};
|
||||
```
|
||||
|
||||
|
||||
---
|
||||
|
||||
183
docs/dev_guide/页面无数据排查手册.md
Normal file
183
docs/dev_guide/页面无数据排查手册.md
Normal file
@@ -0,0 +1,183 @@
|
||||

|
||||
|
||||
# 页面无数据排查手册
|
||||
|
||||
- [页面无数据排查手册](#页面无数据排查手册)
|
||||
- [1、集群接入错误](#1集群接入错误)
|
||||
- [1.1、异常现象](#11异常现象)
|
||||
- [1.2、解决方案](#12解决方案)
|
||||
- [1.3、正常情况](#13正常情况)
|
||||
- [2、JMX连接失败](#2jmx连接失败)
|
||||
- [3、ElasticSearch问题](#3elasticsearch问题)
|
||||
- [3.1、异因一:缺少索引](#31异因一缺少索引)
|
||||
- [3.1.1、异常现象](#311异常现象)
|
||||
- [3.1.2、解决方案](#312解决方案)
|
||||
- [3.2、异因二:索引模板错误](#32异因二索引模板错误)
|
||||
- [3.2.1、异常现象](#321异常现象)
|
||||
- [3.2.2、解决方案](#322解决方案)
|
||||
- [3.3、异因三:集群Shard满](#33异因三集群shard满)
|
||||
- [3.3.1、异常现象](#331异常现象)
|
||||
- [3.3.2、解决方案](#332解决方案)
|
||||
|
||||
|
||||
---
|
||||
|
||||
## 1、集群接入错误
|
||||
|
||||
### 1.1、异常现象
|
||||
|
||||
如下图所示,集群非空时,大概率为地址配置错误导致。
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_BRiXBvqYFK2dxSF1aqgZ width="80%">
|
||||
|
||||
|
||||
|
||||
### 1.2、解决方案
|
||||
|
||||
接入集群时,依据提示的错误,进行相应的解决。例如:
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_Yn4LhV8aeSEKX1zrrkUi width="50%">
|
||||
|
||||
### 1.3、正常情况
|
||||
|
||||
接入集群时,页面信息都自动正常出现,没有提示错误。
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
## 2、JMX连接失败
|
||||
|
||||
背景:Kafka 通过 JMX 服务进行运行指标的暴露,因此 `KnowStreaming` 会主动连接 Kafka 的 JMX 服务进行指标采集。如果我们发现页面缺少指标,那么可能原因之一是 Kafka 的 JMX 端口配置的有问题导致指标获取失败,进而页面没有数据。
|
||||
|
||||
|
||||
具体见同目录下的文档:[解决连接JMX失败](./%E8%A7%A3%E5%86%B3%E8%BF%9E%E6%8E%A5JMX%E5%A4%B1%E8%B4%A5.md)
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## 3、ElasticSearch问题
|
||||
|
||||
**背景:**
|
||||
`KnowStreaming` 将从 Kafka 中采集到的指标存储到 ES 中,如果 ES 存在问题,则也可能会导致页面出现无数据的情况。
|
||||
|
||||
**日志:**
|
||||
`KnowStreaming` 读写 ES 相关日志,在 `logs/es/es.log` 中!
|
||||
|
||||
|
||||
**注意:**
|
||||
mac系统在执行curl指令时,可能报zsh错误。可参考以下操作。
|
||||
|
||||
```bash
|
||||
1 进入.zshrc 文件 vim ~/.zshrc
|
||||
2.在.zshrc中加入 setopt no_nomatch
|
||||
3.更新配置 source ~/.zshrc
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 3.1、异因一:缺少索引
|
||||
|
||||
#### 3.1.1、异常现象
|
||||
|
||||
报错信息
|
||||
|
||||
```log
|
||||
# 日志位置 logs/es/es.log
|
||||
com.didiglobal.logi.elasticsearch.client.model.exception.ESIndexNotFoundException: method [GET], host[http://127.0.0.1:9200], URI [/ks_kafka_broker_metric_2022-10-21,ks_kafka_broker_metric_2022-10-22/_search], status line [HTTP/1.1 404 Not Found]
|
||||
```
|
||||
|
||||
|
||||
`curl http://{ES的IP地址}:{ES的端口号}/_cat/indices/ks_kafka*` 查看KS索引列表,发现没有索引。
|
||||
|
||||
#### 3.1.2、解决方案
|
||||
|
||||
执行 [ES索引及模版初始化](https://github.com/didi/KnowStreaming/blob/master/bin/init_es_template.sh) 脚本,来创建索引及模版。
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
### 3.2、异因二:索引模板错误
|
||||
|
||||
#### 3.2.1、异常现象
|
||||
|
||||
多集群列表有数据,集群详情页图标无数据。查询KS索引模板列表,发现不存在。
|
||||
|
||||
```bash
|
||||
curl {ES的IP地址}:{ES的端口号}/_cat/templates/ks_kafka*?v&h=name
|
||||
```
|
||||
|
||||
正常KS模板如下图所示。
|
||||
|
||||
<img src=http://img-ys011.didistatic.com/static/dc2img/do1_l79bPYSci9wr6KFwZDA6 width="90%">
|
||||
|
||||
|
||||
|
||||
#### 3.2.2、解决方案
|
||||
|
||||
删除KS索引模板和索引
|
||||
|
||||
```bash
|
||||
curl -XDELETE {ES的IP地址}:{ES的端口号}/ks_kafka*
|
||||
curl -XDELETE {ES的IP地址}:{ES的端口号}/_template/ks_kafka*
|
||||
```
|
||||
|
||||
执行 [ES索引及模版初始化](https://github.com/didi/KnowStreaming/blob/master/bin/init_es_template.sh) 脚本,来创建索引及模版。
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
### 3.3、异因三:集群Shard满
|
||||
|
||||
#### 3.3.1、异常现象
|
||||
|
||||
报错信息
|
||||
|
||||
```log
|
||||
# 日志位置 logs/es/es.log
|
||||
|
||||
{"error":{"root_cause":[{"type":"validation_exception","reason":"Validation Failed: 1: this action would add [4] total shards, but this cluster currently has [1000]/[1000] maximum shards open;"}],"type":"validation_exception","reason":"Validation Failed: 1: this action would add [4] total shards, but this cluster currently has [1000]/[1000] maximum shards open;"},"status":400}
|
||||
```
|
||||
|
||||
尝试手动创建索引失败。
|
||||
|
||||
```bash
|
||||
#创建ks_kafka_cluster_metric_test索引的指令
|
||||
curl -s -XPUT http://{ES的IP地址}:{ES的端口号}/ks_kafka_cluster_metric_test
|
||||
```
|
||||
|
||||
|
||||
#### 3.3.2、解决方案
|
||||
|
||||
ES索引的默认分片数量为1000,达到数量以后,索引创建失败。
|
||||
|
||||
+ 扩大ES索引数量上限,执行指令
|
||||
|
||||
```
|
||||
curl -XPUT -H"content-type:application/json" http://{ES的IP地址}:{ES的端口号}/_cluster/settings -d '
|
||||
{
|
||||
"persistent": {
|
||||
"cluster": {
|
||||
"max_shards_per_node":{索引上限,默认为1000, 测试时可以将其调整为10000}
|
||||
}
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
执行 [ES索引及模版初始化](https://github.com/didi/KnowStreaming/blob/master/bin/init_es_template.sh) 脚本,来补全索引。
|
||||
|
||||
|
||||
@@ -8,6 +8,127 @@
|
||||
|
||||
暂无
|
||||
|
||||
---
|
||||
|
||||
### 升级至 `3.4.0` 版本
|
||||
|
||||
**配置变更**
|
||||
|
||||
```yaml
|
||||
# 新增的配置
|
||||
request: # 请求相关的配置
|
||||
api-call: # api调用
|
||||
timeout-unit-ms: 8000 # 超时时间,默认8000毫秒
|
||||
```
|
||||
|
||||
**SQL 变更**
|
||||
```sql
|
||||
-- 多集群管理权限2023-06-27新增
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2026', 'Connector-新增', '1593', '1', '2', 'Connector-新增', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2028', 'Connector-编辑', '1593', '1', '2', 'Connector-编辑', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2030', 'Connector-删除', '1593', '1', '2', 'Connector-删除', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2032', 'Connector-重启', '1593', '1', '2', 'Connector-重启', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2034', 'Connector-暂停&恢复', '1593', '1', '2', 'Connector-暂停&恢复', '0', 'know-streaming');
|
||||
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2026', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2028', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2030', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2032', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2034', '0', 'know-streaming');
|
||||
|
||||
|
||||
-- 多集群管理权限2023-06-29新增
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2036', 'Security-ACL新增', '1593', '1', '2', 'Security-ACL新增', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2038', 'Security-ACL删除', '1593', '1', '2', 'Security-ACL删除', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2040', 'Security-User新增', '1593', '1', '2', 'Security-User新增', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2042', 'Security-User删除', '1593', '1', '2', 'Security-User删除', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2044', 'Security-User修改密码', '1593', '1', '2', 'Security-User修改密码', '0', 'know-streaming');
|
||||
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2036', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2038', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2040', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2042', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2044', '0', 'know-streaming');
|
||||
|
||||
|
||||
-- 多集群管理权限2023-07-06新增
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2046', 'Group-删除', '1593', '1', '2', 'Group-删除', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2048', 'GroupOffset-Topic纬度删除', '1593', '1', '2', 'GroupOffset-Topic纬度删除', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2050', 'GroupOffset-Partition纬度删除', '1593', '1', '2', 'GroupOffset-Partition纬度删除', '0', 'know-streaming');
|
||||
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2046', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2048', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2050', '0', 'know-streaming');
|
||||
|
||||
|
||||
-- 多集群管理权限2023-07-18新增
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2052', 'Security-User查看密码', '1593', '1', '2', 'Security-User查看密码', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2052', '0', 'know-streaming');
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 升级至 `3.3.0` 版本
|
||||
|
||||
**SQL 变更**
|
||||
```sql
|
||||
ALTER TABLE `logi_security_user`
|
||||
CHANGE COLUMN `phone` `phone` VARCHAR(20) NOT NULL DEFAULT '' COMMENT 'mobile' ;
|
||||
|
||||
ALTER TABLE ks_kc_connector ADD `heartbeat_connector_name` varchar(512) DEFAULT '' COMMENT '心跳检测connector名称';
|
||||
ALTER TABLE ks_kc_connector ADD `checkpoint_connector_name` varchar(512) DEFAULT '' COMMENT '进度确认connector名称';
|
||||
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_MIRROR_MAKER_TOTAL_RECORD_ERRORS', '{\"value\" : 1}', 'MirrorMaker消息处理错误的次数', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_MIRROR_MAKER_REPLICATION_LATENCY_MS_MAX', '{\"value\" : 6000}', 'MirrorMaker消息复制最大延迟时间', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_MIRROR_MAKER_UNASSIGNED_TASK_COUNT', '{\"value\" : 20}', 'MirrorMaker未被分配的任务数量', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_MIRROR_MAKER_FAILED_TASK_COUNT', '{\"value\" : 10}', 'MirrorMaker失败状态的任务数量', 'admin');
|
||||
|
||||
|
||||
-- 多集群管理权限2023-01-05新增
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2012', 'Topic-新增Topic复制', '1593', '1', '2', 'Topic-新增Topic复制', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2014', 'Topic-详情-取消Topic复制', '1593', '1', '2', 'Topic-详情-取消Topic复制', '0', 'know-streaming');
|
||||
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2012', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2014', '0', 'know-streaming');
|
||||
|
||||
|
||||
-- 多集群管理权限2023-01-18新增
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2016', 'MM2-新增', '1593', '1', '2', 'MM2-新增', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2018', 'MM2-编辑', '1593', '1', '2', 'MM2-编辑', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2020', 'MM2-删除', '1593', '1', '2', 'MM2-删除', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2022', 'MM2-重启', '1593', '1', '2', 'MM2-重启', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2024', 'MM2-暂停&恢复', '1593', '1', '2', 'MM2-暂停&恢复', '0', 'know-streaming');
|
||||
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2016', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2018', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2020', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2022', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2024', '0', 'know-streaming');
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_ha_active_standby_relation`;
|
||||
CREATE TABLE `ks_ha_active_standby_relation` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`active_cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '主集群ID',
|
||||
`standby_cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '备集群ID',
|
||||
`res_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '资源名称',
|
||||
`res_type` int(11) NOT NULL DEFAULT '-1' COMMENT '资源类型,0:集群,1:镜像Topic,2:主备Topic',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_res` (`res_type`,`active_cluster_phy_id`,`standby_cluster_phy_id`,`res_name`),
|
||||
UNIQUE KEY `uniq_res_type_standby_cluster_res_name` (`res_type`,`standby_cluster_phy_id`,`res_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='HA主备关系表';
|
||||
|
||||
|
||||
-- 删除idx_cluster_phy_id 索引并新增idx_cluster_update_time索引
|
||||
ALTER TABLE `ks_km_kafka_change_record` DROP INDEX `idx_cluster_phy_id` ,
|
||||
ADD INDEX `idx_cluster_update_time` (`cluster_phy_id` ASC, `update_time` ASC);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 升级至 `3.2.0` 版本
|
||||
|
||||
**配置变更**
|
||||
|
||||
@@ -1,13 +1,37 @@
|
||||
|
||||

|
||||
|
||||
# FAQ
|
||||
|
||||
## 8.1、支持哪些 Kafka 版本?
|
||||
- [FAQ](#faq)
|
||||
- [1、支持哪些 Kafka 版本?](#1支持哪些-kafka-版本)
|
||||
- [1、2.x 版本和 3.0 版本有什么差异?](#12x-版本和-30-版本有什么差异)
|
||||
- [3、页面流量信息等无数据?](#3页面流量信息等无数据)
|
||||
- [4、`Jmx`连接失败如何解决?](#4jmx连接失败如何解决)
|
||||
- [5、有没有 API 文档?](#5有没有-api-文档)
|
||||
- [6、删除 Topic 成功后,为何过段时间又出现了?](#6删除-topic-成功后为何过段时间又出现了)
|
||||
- [7、如何在不登录的情况下,调用接口?](#7如何在不登录的情况下调用接口)
|
||||
- [8、Specified key was too long; max key length is 767 bytes](#8specified-key-was-too-long-max-key-length-is-767-bytes)
|
||||
- [9、出现 ESIndexNotFoundEXception 报错](#9出现-esindexnotfoundexception-报错)
|
||||
- [10、km-console 打包构建失败](#10km-console-打包构建失败)
|
||||
- [11、在 `km-console` 目录下执行 `npm run start` 时看不到应用构建和热加载过程?如何启动单个应用?](#11在-km-console-目录下执行-npm-run-start-时看不到应用构建和热加载过程如何启动单个应用)
|
||||
- [12、权限识别失败问题](#12权限识别失败问题)
|
||||
- [13、接入开启kerberos认证的kafka集群](#13接入开启kerberos认证的kafka集群)
|
||||
- [14、对接Ldap的配置](#14对接ldap的配置)
|
||||
- [15、测试时使用Testcontainers的说明](#15测试时使用testcontainers的说明)
|
||||
- [16、JMX连接失败怎么办](#16jmx连接失败怎么办)
|
||||
- [17、zk监控无数据问题](#17zk监控无数据问题)
|
||||
- [18、启动失败,报NoClassDefFoundError如何解决](#18启动失败报noclassdeffounderror如何解决)
|
||||
- [19、依赖ElasticSearch 8.0以上版本部署后指标信息无法正常显示如何解决]
|
||||
|
||||
## 1、支持哪些 Kafka 版本?
|
||||
|
||||
- 支持 0.10+ 的 Kafka 版本;
|
||||
- 支持 ZK 及 Raft 运行模式的 Kafka 版本;
|
||||
|
||||
|
||||
|
||||
## 8.1、2.x 版本和 3.0 版本有什么差异?
|
||||
## 1、2.x 版本和 3.0 版本有什么差异?
|
||||
|
||||
**全新设计理念**
|
||||
|
||||
@@ -23,7 +47,7 @@
|
||||
|
||||
|
||||
|
||||
## 8.3、页面流量信息等无数据?
|
||||
## 3、页面流量信息等无数据?
|
||||
|
||||
- 1、`Broker JMX`未正确开启
|
||||
|
||||
@@ -35,13 +59,13 @@
|
||||
|
||||
|
||||
|
||||
## 8.4、`Jmx`连接失败如何解决?
|
||||
## 4、`Jmx`连接失败如何解决?
|
||||
|
||||
- 参看 [Jmx 连接配置&问题解决](https://doc.knowstreaming.com/product/9-attachment#91jmx-%E8%BF%9E%E6%8E%A5%E5%A4%B1%E8%B4%A5%E9%97%AE%E9%A2%98%E8%A7%A3%E5%86%B3) 说明。
|
||||
|
||||
|
||||
|
||||
## 8.5、有没有 API 文档?
|
||||
## 5、有没有 API 文档?
|
||||
|
||||
`KnowStreaming` 采用 Swagger 进行 API 说明,在启动 KnowStreaming 服务之后,就可以从下面地址看到。
|
||||
|
||||
@@ -49,7 +73,7 @@ Swagger-API 地址: [http://IP:PORT/swagger-ui.html#/](http://IP:PORT/swagger-
|
||||
|
||||
|
||||
|
||||
## 8.6、删除 Topic 成功后,为何过段时间又出现了?
|
||||
## 6、删除 Topic 成功后,为何过段时间又出现了?
|
||||
|
||||
**原因说明:**
|
||||
|
||||
@@ -74,7 +98,7 @@ for (int i= 0; i < 100000; ++i) {
|
||||
|
||||
|
||||
|
||||
## 8.7、如何在不登录的情况下,调用接口?
|
||||
## 7、如何在不登录的情况下,调用接口?
|
||||
|
||||
步骤一:接口调用时,在 header 中,增加如下信息:
|
||||
|
||||
@@ -109,7 +133,7 @@ SECURITY.TRICK_USERS
|
||||
|
||||
但是还有一点需要注意,绕过的用户仅能调用他有权限的接口,比如一个普通用户,那么他就只能调用普通的接口,不能去调用运维人员的接口。
|
||||
|
||||
## 8.8、Specified key was too long; max key length is 767 bytes
|
||||
## 8、Specified key was too long; max key length is 767 bytes
|
||||
|
||||
**原因:** 不同版本的 InoDB 引擎,参数‘innodb_large_prefix’默认值不同,即在 5.6 默认值为 OFF,5.7 默认值为 ON。
|
||||
|
||||
@@ -121,13 +145,13 @@ SECURITY.TRICK_USERS
|
||||
- 将字符集改为 latin1(一个字符=一个字节)。
|
||||
- 开启‘innodb_large_prefix’,修改默认行格式‘innodb_file_format’为 Barracuda,并设置 row_format=dynamic。
|
||||
|
||||
## 8.9、出现 ESIndexNotFoundEXception 报错
|
||||
## 9、出现 ESIndexNotFoundEXception 报错
|
||||
|
||||
**原因 :**没有创建 ES 索引模版
|
||||
|
||||
**解决方案:**执行 init_es_template.sh 脚本,创建 ES 索引模版即可。
|
||||
|
||||
## 8.10、km-console 打包构建失败
|
||||
## 10、km-console 打包构建失败
|
||||
|
||||
首先,**请确保您正在使用最新版本**,版本列表见 [Tags](https://github.com/didi/KnowStreaming/tags)。如果不是最新版本,请升级后再尝试有无问题。
|
||||
|
||||
@@ -161,14 +185,14 @@ Node 版本: v12.22.12
|
||||
错误截图:
|
||||
```
|
||||
|
||||
## 8.11、在 `km-console` 目录下执行 `npm run start` 时看不到应用构建和热加载过程?如何启动单个应用?
|
||||
## 11、在 `km-console` 目录下执行 `npm run start` 时看不到应用构建和热加载过程?如何启动单个应用?
|
||||
|
||||
需要到具体的应用中执行 `npm run start`,例如 `cd packages/layout-clusters-fe` 后,执行 `npm run start`。
|
||||
|
||||
应用启动后需要到基座应用中查看(需要启动基座应用,即 layout-clusters-fe)。
|
||||
|
||||
|
||||
## 8.12、权限识别失败问题
|
||||
## 12、权限识别失败问题
|
||||
1、使用admin账号登陆KnowStreaming时,点击系统管理-用户管理-角色管理-新增角色,查看页面是否正常。
|
||||
|
||||
<img src="http://img-ys011.didistatic.com/static/dc2img/do1_gwGfjN9N92UxzHU8dfzr" width = "400" >
|
||||
@@ -182,3 +206,116 @@ Node 版本: v12.22.12
|
||||
|
||||
+ 原因:由于数据库编码和我们提供的脚本不一致,数据库里的数据发生了乱码,因此出现权限识别失败问题。
|
||||
+ 解决方案:清空数据库数据,将数据库字符集调整为utf8,最后重新执行[dml-logi.sql](https://github.com/didi/KnowStreaming/blob/master/km-dist/init/sql/dml-logi.sql)脚本导入数据即可。
|
||||
|
||||
|
||||
## 13、接入开启kerberos认证的kafka集群
|
||||
|
||||
1. 部署KnowStreaming的机器上安装krb客户端;
|
||||
2. 替换/etc/krb5.conf配置文件;
|
||||
3. 把kafka对应的keytab复制到改机器目录下;
|
||||
4. 接入集群时认证配置,配置信息根据实际情况填写;
|
||||
```json
|
||||
{
|
||||
"security.protocol": "SASL_PLAINTEXT",
|
||||
"sasl.mechanism": "GSSAPI",
|
||||
"sasl.jaas.config": "com.sun.security.auth.module.Krb5LoginModule required useKeyTab=true keyTab=\"/etc/keytab/kafka.keytab\" storeKey=true useTicketCache=false principal=\"kafka/kafka@TEST.COM\";",
|
||||
"sasl.kerberos.service.name": "kafka"
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## 14、对接Ldap的配置
|
||||
|
||||
```yaml
|
||||
# 需要在application.yml中增加如下配置。相关配置的信息,按实际情况进行调整
|
||||
account:
|
||||
ldap:
|
||||
url: ldap://127.0.0.1:8080/
|
||||
basedn: DC=senz,DC=local
|
||||
factory: com.sun.jndi.ldap.LdapCtxFactory
|
||||
filter: sAMAccountName
|
||||
security:
|
||||
authentication: simple
|
||||
principal: CN=search,DC=senz,DC=local
|
||||
credentials: xxxxxxx
|
||||
auth-user-registration: false # 是否注册到mysql,默认false
|
||||
auth-user-registration-role: 1677 # 1677是超级管理员角色的id,如果赋予想默认赋予普通角色,可以到ks新建一个。
|
||||
|
||||
# 需要在application.yml中修改如下配置
|
||||
spring:
|
||||
logi-security:
|
||||
login-extend-bean-name: ksLdapLoginService # 表示使用ldap的service
|
||||
```
|
||||
|
||||
## 15、测试时使用Testcontainers的说明
|
||||
|
||||
1. 需要docker运行环境 [Testcontainers运行环境说明](https://www.testcontainers.org/supported_docker_environment/)
|
||||
2. 如果本机没有docker,可以使用[远程访问docker](https://docs.docker.com/config/daemon/remote-access/) [Testcontainers配置说明](https://www.testcontainers.org/features/configuration/#customizing-docker-host-detection)
|
||||
|
||||
|
||||
## 16、JMX连接失败怎么办
|
||||
|
||||
详细见:[解决连接JMX失败](../dev_guide/%E8%A7%A3%E5%86%B3%E8%BF%9E%E6%8E%A5JMX%E5%A4%B1%E8%B4%A5.md)
|
||||
|
||||
|
||||
## 17、zk监控无数据问题
|
||||
|
||||
**现象:**
|
||||
zookeeper集群正常,但Ks上zk页面所有监控指标无数据,`KnowStreaming` log_error.log日志提示
|
||||
|
||||
```vim
|
||||
[MetricCollect-Shard-0-8-thread-1] ERROR class=c.x.k.s.k.c.s.h.c.z.HealthCheckZookeeperService||method=checkWatchCount||param=ZookeeperParam(zkAddressList=[Tuple{v1=192.168.xxx.xx, v2=2181}, Tuple{v1=192.168.xxx.xx, v2=2181}, Tuple{v1=192.168.xxx.xx, v2=2181}], zkConfig=null)||config=HealthAmountRatioConfig(amount=100000, ratio=0.8)||result=Result{message='mntr is not executed because it is not in the whitelist.
|
||||
', code=8031, data=null}||errMsg=get metrics failed, may be collect failed or zk mntr command not in whitelist.
|
||||
2023-04-23 14:39:07.234 [MetricCollect-Shard-0-8-thread-1] ERROR class=c.x.k.s.k.c.s.h.checker.AbstractHeal
|
||||
```
|
||||
|
||||
|
||||
原因就很明确了。需要开放zk的四字命令,在`zoo.cfg`配置文件中添加
|
||||
```
|
||||
4lw.commands.whitelist=mntr,stat,ruok,envi,srvr,envi,cons,conf,wchs,wchp
|
||||
```
|
||||
|
||||
|
||||
建议至少开放上述几个四字命令,当然,您也可以全部开放
|
||||
```
|
||||
4lw.commands.whitelist=*
|
||||
```
|
||||
|
||||
## 18、启动失败,报NoClassDefFoundError如何解决
|
||||
|
||||
**错误现象:**
|
||||
```log
|
||||
# 启动失败,报nested exception is java.lang.NoClassDefFoundError: Could not initialize class com.didiglobal.logi.job.core.WorkerSingleton$Singleton
|
||||
|
||||
|
||||
2023-08-11 22:54:29.842 [main] ERROR class=org.springframework.boot.SpringApplication||Application run failed
|
||||
org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'quartzScheduler' defined in class path resource [com/didiglobal/logi/job/LogIJobAutoConfiguration.class]: Bean instantiation via factory method failed; nested exception is org.springframework.beans.BeanInstantiationException: Failed to instantiate [com.didiglobal.logi.job.core.Scheduler]: Factory method 'quartzScheduler' threw exception; nested exception is java.lang.NoClassDefFoundError: Could not initialize class com.didiglobal.logi.job.core.WorkerSingleton$Singleton
|
||||
at org.springframework.beans.factory.support.ConstructorResolver.instantiate(ConstructorResolver.java:657)
|
||||
```
|
||||
|
||||
|
||||
**问题原因:**
|
||||
1. `KnowStreaming` 依赖的 `Logi-Job` 初始化 `WorkerSingleton$Singleton` 失败。
|
||||
2. `WorkerSingleton$Singleton` 初始化的过程中,会去获取一些操作系统的信息,如果获取时出现了异常,则会导致 `WorkerSingleton$Singleton` 初始化失败。
|
||||
|
||||
|
||||
**临时建议:**
|
||||
|
||||
`Logi-Job` 问题的修复时间不好控制,之前我们测试验证了一下,在 `Windows`、`Mac`、`CentOS` 这几个操作系统下基本上都是可以正常运行的。
|
||||
|
||||
所以,如果有条件的话,可以暂时先使用这几个系统部署 `KnowStreaming`。
|
||||
|
||||
如果在在 `Windows`、`Mac`、`CentOS` 这几个操作系统下也出现了启动失败的问题,可以重试2-3次看是否还是启动失败,或者换一台机器试试。
|
||||
|
||||
## 依赖ElasticSearch 8.0以上版本部署后指标信息无法正常显示如何解决
|
||||
**错误现象**
|
||||
```log
|
||||
Warnings: [299 Elasticsearch-8.9.1-a813d015ef1826148d9d389bd1c0d781c6e349f0 "Legacy index templates are deprecated in favor of composable templates."]
|
||||
```
|
||||
**问题原因**
|
||||
1. ES8.0和ES7.0版本存在Template模式的差异,建议使用 /_index_template 端点来管理模板;
|
||||
2. ES java client在此版本的行为很奇怪表现为读取数据为空;
|
||||
|
||||
**解决方法**
|
||||
修改`es_template_create.sh`脚本中所有的`/_template`为`/_index_template`后执行即可。
|
||||
|
||||
|
||||
@@ -5,13 +5,13 @@
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.xiaojukeji.kafka</groupId>
|
||||
<artifactId>km-biz</artifactId>
|
||||
<version>${km.revision}</version>
|
||||
<version>${revision}</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<parent>
|
||||
<artifactId>km</artifactId>
|
||||
<groupId>com.xiaojukeji.kafka</groupId>
|
||||
<version>${km.revision}</version>
|
||||
<version>${revision}</version>
|
||||
</parent>
|
||||
|
||||
<properties>
|
||||
@@ -29,6 +29,11 @@
|
||||
<artifactId>km-core</artifactId>
|
||||
<version>${project.parent.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.xiaojukeji.kafka</groupId>
|
||||
<artifactId>km-rebalance</artifactId>
|
||||
<version>${project.parent.version}</version>
|
||||
</dependency>
|
||||
|
||||
<!-- spring -->
|
||||
<dependency>
|
||||
@@ -62,10 +67,6 @@
|
||||
<groupId>commons-lang</groupId>
|
||||
<artifactId>commons-lang</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>commons-codec</groupId>
|
||||
|
||||
@@ -4,8 +4,12 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhysHe
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhysState;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.MultiClusterDashboardDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.ClusterPhyBaseVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.ClusterPhyDashboardVO;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* 多集群总体状态
|
||||
*/
|
||||
@@ -24,4 +28,6 @@ public interface MultiClusterPhyManager {
|
||||
* @return
|
||||
*/
|
||||
PaginationResult<ClusterPhyDashboardVO> getClusterPhysDashboard(MultiClusterDashboardDTO dto);
|
||||
|
||||
Result<List<ClusterPhyBaseVO>> getClusterPhysBasic();
|
||||
}
|
||||
|
||||
@@ -140,7 +140,8 @@ public class ClusterBrokersManagerImpl implements ClusterBrokersManager {
|
||||
clusterBrokersStateVO.setKafkaControllerAlive(true);
|
||||
}
|
||||
|
||||
clusterBrokersStateVO.setConfigSimilar(brokerConfigService.countBrokerConfigDiffsFromDB(clusterPhyId, Arrays.asList("broker.id", "listeners", "name", "value")) <= 0);
|
||||
clusterBrokersStateVO.setConfigSimilar(brokerConfigService.countBrokerConfigDiffsFromDB(clusterPhyId, KafkaConstant.CONFIG_SIMILAR_IGNORED_CONFIG_KEY_LIST) <= 0
|
||||
);
|
||||
|
||||
return clusterBrokersStateVO;
|
||||
}
|
||||
@@ -201,7 +202,7 @@ public class ClusterBrokersManagerImpl implements ClusterBrokersManager {
|
||||
//补充非zk模式的JMXPort信息
|
||||
if (!clusterPhy.getRunState().equals(ClusterRunStateEnum.RUN_ZK.getRunState())) {
|
||||
JmxConfig jmxConfig = ConvertUtil.str2ObjByJson(clusterPhy.getJmxProperties(), JmxConfig.class);
|
||||
voList.forEach(elem -> elem.setJmxPort(jmxConfig.getJmxPort() == null ? -1 : jmxConfig.getJmxPort()));
|
||||
voList.forEach(elem -> elem.setJmxPort(jmxConfig.getFinallyJmxPort(String.valueOf(elem.getBrokerId()))));
|
||||
}
|
||||
|
||||
return voList;
|
||||
|
||||
@@ -136,13 +136,13 @@ public class ClusterConnectorsManagerImpl implements ClusterConnectorsManager {
|
||||
|
||||
private PaginationResult<ClusterConnectorOverviewVO> pagingConnectorInLocal(List<ClusterConnectorOverviewVO> connectorVOList, ClusterConnectorsOverviewDTO dto) {
|
||||
//模糊匹配
|
||||
connectorVOList = PaginationUtil.pageByFuzzyFilter(connectorVOList, dto.getSearchKeywords(), Arrays.asList("connectClusterName"));
|
||||
connectorVOList = PaginationUtil.pageByFuzzyFilter(connectorVOList, dto.getSearchKeywords(), Arrays.asList("connectorName"));
|
||||
|
||||
//排序
|
||||
if (!dto.getLatestMetricNames().isEmpty()) {
|
||||
PaginationMetricsUtil.sortMetrics(connectorVOList, "latestMetrics", dto.getSortMetricNameList(), "connectClusterName", dto.getSortType());
|
||||
PaginationMetricsUtil.sortMetrics(connectorVOList, "latestMetrics", dto.getSortMetricNameList(), "connectorName", dto.getSortType());
|
||||
} else {
|
||||
PaginationUtil.pageBySort(connectorVOList, dto.getSortField(), dto.getSortType(), "connectClusterName", dto.getSortType());
|
||||
PaginationUtil.pageBySort(connectorVOList, dto.getSortField(), dto.getSortType(), "connectorName", dto.getSortType());
|
||||
}
|
||||
|
||||
//分页
|
||||
|
||||
@@ -14,10 +14,12 @@ import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.res.ClusterPhyTop
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
|
||||
import com.xiaojukeji.know.streaming.km.common.converter.TopicVOConverter;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.ha.HaResTypeEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.PaginationMetricsUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.PaginationUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.ha.HaActiveStandbyRelationService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicMetricService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
@@ -38,6 +40,9 @@ public class ClusterTopicsManagerImpl implements ClusterTopicsManager {
|
||||
@Autowired
|
||||
private TopicMetricService topicMetricService;
|
||||
|
||||
@Autowired
|
||||
private HaActiveStandbyRelationService haActiveStandbyRelationService;
|
||||
|
||||
@Override
|
||||
public PaginationResult<ClusterPhyTopicsOverviewVO> getClusterPhyTopicsOverview(Long clusterPhyId, ClusterTopicsOverviewDTO dto) {
|
||||
// 获取集群所有的Topic信息
|
||||
@@ -46,8 +51,11 @@ public class ClusterTopicsManagerImpl implements ClusterTopicsManager {
|
||||
// 获取集群所有Topic的指标
|
||||
Map<String, TopicMetrics> metricsMap = topicMetricService.getLatestMetricsFromCache(clusterPhyId);
|
||||
|
||||
// 获取HA信息
|
||||
Set<String> haTopicNameSet = haActiveStandbyRelationService.listByClusterAndType(clusterPhyId, HaResTypeEnum.MIRROR_TOPIC).stream().map(elem -> elem.getResName()).collect(Collectors.toSet());
|
||||
|
||||
// 转换成vo
|
||||
List<ClusterPhyTopicsOverviewVO> voList = TopicVOConverter.convert2ClusterPhyTopicsOverviewVOList(topicList, metricsMap);
|
||||
List<ClusterPhyTopicsOverviewVO> voList = TopicVOConverter.convert2ClusterPhyTopicsOverviewVOList(topicList, metricsMap, haTopicNameSet);
|
||||
|
||||
// 请求分页信息
|
||||
PaginationResult<ClusterPhyTopicsOverviewVO> voPaginationResult = this.pagingTopicInLocal(voList, dto);
|
||||
|
||||
@@ -62,7 +62,8 @@ public class ClusterZookeepersManagerImpl implements ClusterZookeepersManager {
|
||||
vo.setTotalObserverCount(0);
|
||||
vo.setAliveServerCount(0);
|
||||
for (ZookeeperInfo info: infoList) {
|
||||
if (info.getRole().equals(ZKRoleEnum.LEADER.getRole())) {
|
||||
if (info.getRole().equals(ZKRoleEnum.LEADER.getRole()) || info.getRole().equals(ZKRoleEnum.STANDALONE.getRole())) {
|
||||
// leader 或者 standalone
|
||||
vo.setLeaderNode(info.getHost());
|
||||
}
|
||||
|
||||
|
||||
@@ -9,10 +9,10 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhysHe
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhysState;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.MultiClusterDashboardDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafkacontroller.KafkaController;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ClusterMetrics;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.ClusterPhyBaseVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.ClusterPhyDashboardVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||
@@ -24,8 +24,11 @@ import com.xiaojukeji.know.streaming.km.common.utils.PaginationMetricsUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterMetricService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.kafkacontroller.KafkaControllerService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.ClusterMetricVersionItems;
|
||||
import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Resource;
|
||||
import com.xiaojukeji.know.streaming.km.rebalance.common.BalanceMetricConstant;
|
||||
import com.xiaojukeji.know.streaming.km.rebalance.common.bean.entity.ClusterBalanceItemState;
|
||||
import com.xiaojukeji.know.streaming.km.rebalance.core.service.ClusterBalanceService;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
@@ -43,36 +46,28 @@ public class MultiClusterPhyManagerImpl implements MultiClusterPhyManager {
|
||||
private ClusterMetricService clusterMetricService;
|
||||
|
||||
@Autowired
|
||||
private KafkaControllerService kafkaControllerService;
|
||||
private ClusterBalanceService clusterBalanceService;
|
||||
|
||||
@Override
|
||||
public ClusterPhysState getClusterPhysState() {
|
||||
List<ClusterPhy> clusterPhyList = clusterPhyService.listAllClusters();
|
||||
ClusterPhysState physState = new ClusterPhysState(0, 0, 0, clusterPhyList.size());
|
||||
|
||||
Map<Long, KafkaController> controllerMap = kafkaControllerService.getKafkaControllersFromDB(
|
||||
clusterPhyList.stream().map(elem -> elem.getId()).collect(Collectors.toList()),
|
||||
false
|
||||
);
|
||||
|
||||
ClusterPhysState physState = new ClusterPhysState(0, 0, clusterPhyList.size());
|
||||
for (ClusterPhy clusterPhy: clusterPhyList) {
|
||||
KafkaController kafkaController = controllerMap.get(clusterPhy.getId());
|
||||
|
||||
if (kafkaController != null && !kafkaController.alive()) {
|
||||
// 存在明确的信息表示controller挂了
|
||||
physState.setDownCount(physState.getDownCount() + 1);
|
||||
} else if ((System.currentTimeMillis() - clusterPhy.getCreateTime().getTime() >= 5 * 60 * 1000) && kafkaController == null) {
|
||||
// 集群接入时间是在近5分钟内,同时kafkaController信息不存在,则设置为down
|
||||
for (ClusterPhy clusterPhy : clusterPhyList) {
|
||||
ClusterMetrics metrics = clusterMetricService.getLatestMetricsFromCache(clusterPhy.getId());
|
||||
Float state = metrics.getMetric(ClusterMetricVersionItems.CLUSTER_METRIC_HEALTH_STATE);
|
||||
if (state == null) {
|
||||
physState.setUnknownCount(physState.getUnknownCount() + 1);
|
||||
} else if (state.intValue() == HealthStateEnum.DEAD.getDimension()) {
|
||||
physState.setDownCount(physState.getDownCount() + 1);
|
||||
} else {
|
||||
// 其他情况都设置为alive
|
||||
physState.setLiveCount(physState.getLiveCount() + 1);
|
||||
}
|
||||
}
|
||||
|
||||
return physState;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public ClusterPhysHealthState getClusterPhysHealthState() {
|
||||
List<ClusterPhy> clusterPhyList = clusterPhyService.listAllClusters();
|
||||
@@ -107,23 +102,6 @@ public class MultiClusterPhyManagerImpl implements MultiClusterPhyManager {
|
||||
// 转为vo格式,方便后续进行分页筛选等
|
||||
List<ClusterPhyDashboardVO> voList = ConvertUtil.list2List(clusterPhyList, ClusterPhyDashboardVO.class);
|
||||
|
||||
// 获取集群controller信息并补充到vo中,
|
||||
Map<Long, KafkaController> controllerMap = kafkaControllerService.getKafkaControllersFromDB(clusterPhyList.stream().map(elem -> elem.getId()).collect(Collectors.toList()), false);
|
||||
for (ClusterPhyDashboardVO vo: voList) {
|
||||
KafkaController kafkaController = controllerMap.get(vo.getId());
|
||||
|
||||
if (kafkaController != null && !kafkaController.alive()) {
|
||||
// 存在明确的信息表示controller挂了
|
||||
vo.setAlive(Constant.DOWN);
|
||||
} else if ((System.currentTimeMillis() - vo.getCreateTime().getTime() >= 5 * 60L * 1000L) && kafkaController == null) {
|
||||
// 集群接入时间是在近5分钟内,同时kafkaController信息不存在,则设置为down
|
||||
vo.setAlive(Constant.DOWN);
|
||||
} else {
|
||||
// 其他情况都设置为alive
|
||||
vo.setAlive(Constant.ALIVE);
|
||||
}
|
||||
}
|
||||
|
||||
// 本地分页过滤
|
||||
voList = this.getAndPagingDataInLocal(voList, dto);
|
||||
|
||||
@@ -148,6 +126,15 @@ public class MultiClusterPhyManagerImpl implements MultiClusterPhyManager {
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<List<ClusterPhyBaseVO>> getClusterPhysBasic() {
|
||||
// 获取集群
|
||||
List<ClusterPhy> clusterPhyList = clusterPhyService.listAllClusters();
|
||||
|
||||
// 转为vo格式,方便后续进行分页筛选等
|
||||
return Result.buildSuc(ConvertUtil.list2List(clusterPhyList, ClusterPhyBaseVO.class));
|
||||
}
|
||||
|
||||
|
||||
/**************************************************** private method ****************************************************/
|
||||
|
||||
@@ -174,6 +161,11 @@ public class MultiClusterPhyManagerImpl implements MultiClusterPhyManager {
|
||||
ClusterMetrics clusterMetrics = clusterMetricService.getLatestMetricsFromCache(vo.getId());
|
||||
clusterMetrics.getMetrics().putIfAbsent(ClusterMetricVersionItems.CLUSTER_METRIC_HEALTH_STATE, (float) HealthStateEnum.UNKNOWN.getDimension());
|
||||
|
||||
Result<ClusterMetrics> balanceMetricsResult = this.getClusterLoadReBalanceInfo(vo.getId());
|
||||
if (balanceMetricsResult.hasData()) {
|
||||
clusterMetrics.putMetric(balanceMetricsResult.getData().getMetrics());
|
||||
}
|
||||
|
||||
metricsList.add(clusterMetrics);
|
||||
}
|
||||
|
||||
@@ -195,4 +187,21 @@ public class MultiClusterPhyManagerImpl implements MultiClusterPhyManager {
|
||||
dto.setClusterPhyIds(clusterIdList);
|
||||
return dto;
|
||||
}
|
||||
|
||||
private Result<ClusterMetrics> getClusterLoadReBalanceInfo(Long clusterPhyId) {
|
||||
Result<ClusterBalanceItemState> stateResult = clusterBalanceService.getItemStateFromCacheFirst(clusterPhyId);
|
||||
if (stateResult.failed()) {
|
||||
return Result.buildFromIgnoreData(stateResult);
|
||||
}
|
||||
|
||||
ClusterBalanceItemState state = stateResult.getData();
|
||||
|
||||
ClusterMetrics metric = ClusterMetrics.initWithMetrics(clusterPhyId, BalanceMetricConstant.CLUSTER_METRIC_LOAD_RE_BALANCE_ENABLE, state.getEnable()? Constant.YES: Constant.NO);
|
||||
metric.putMetric(BalanceMetricConstant.CLUSTER_METRIC_LOAD_RE_BALANCE_CPU, state.getResItemState(Resource.CPU).floatValue());
|
||||
metric.putMetric(BalanceMetricConstant.CLUSTER_METRIC_LOAD_RE_BALANCE_NW_IN, state.getResItemState(Resource.NW_IN).floatValue());
|
||||
metric.putMetric(BalanceMetricConstant.CLUSTER_METRIC_LOAD_RE_BALANCE_NW_OUT, state.getResItemState(Resource.NW_OUT).floatValue());
|
||||
metric.putMetric(BalanceMetricConstant.CLUSTER_METRIC_LOAD_RE_BALANCE_DISK, state.getResItemState(Resource.DISK).floatValue());
|
||||
|
||||
return Result.buildSuc(metric);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ public interface ConnectorManager {
|
||||
Result<Void> updateConnectorConfig(Long connectClusterId, String connectorName, Properties configs, String operator);
|
||||
|
||||
Result<Void> createConnector(ConnectorCreateDTO dto, String operator);
|
||||
Result<Void> createConnector(ConnectorCreateDTO dto, String heartbeatName, String checkpointName, String operator);
|
||||
|
||||
Result<ConnectorStateVO> getConnectorStateVO(Long connectClusterId, String connectorName);
|
||||
}
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
package com.xiaojukeji.know.streaming.km.biz.connect.connector.impl;
|
||||
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
import com.xiaojukeji.know.streaming.km.biz.connect.connector.ConnectorManager;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.connector.ConnectorCreateDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.WorkerConnector;
|
||||
@@ -12,7 +10,9 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.connect.connector.ConnectorStateVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.OpConnectorService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.connect.plugin.PluginService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerConnectorService;
|
||||
import org.apache.kafka.connect.runtime.AbstractStatus;
|
||||
@@ -25,14 +25,15 @@ import java.util.stream.Collectors;
|
||||
|
||||
@Service
|
||||
public class ConnectorManagerImpl implements ConnectorManager {
|
||||
private static final ILog LOGGER = LogFactory.getLog(ConnectorManagerImpl.class);
|
||||
|
||||
@Autowired
|
||||
private PluginService pluginService;
|
||||
|
||||
@Autowired
|
||||
private ConnectorService connectorService;
|
||||
|
||||
@Autowired
|
||||
private OpConnectorService opConnectorService;
|
||||
|
||||
@Autowired
|
||||
private WorkerConnectorService workerConnectorService;
|
||||
|
||||
@@ -47,25 +48,50 @@ public class ConnectorManagerImpl implements ConnectorManager {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "Connector参数错误");
|
||||
}
|
||||
|
||||
return connectorService.updateConnectorConfig(connectClusterId, connectorName, configs, operator);
|
||||
return opConnectorService.updateConnectorConfig(connectClusterId, connectorName, configs, operator);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<Void> createConnector(ConnectorCreateDTO dto, String operator) {
|
||||
Result<KSConnectorInfo> createResult = connectorService.createConnector(dto.getConnectClusterId(), dto.getConnectorName(), dto.getConfigs(), operator);
|
||||
dto.getSuitableConfig().put(KafkaConnectConstant.MIRROR_MAKER_NAME_FIELD_NAME, dto.getConnectorName());
|
||||
|
||||
Result<KSConnectorInfo> createResult = opConnectorService.createConnector(dto.getConnectClusterId(), dto.getConnectorName(), dto.getSuitableConfig(), operator);
|
||||
if (createResult.failed()) {
|
||||
return Result.buildFromIgnoreData(createResult);
|
||||
}
|
||||
|
||||
Result<KSConnector> ksConnectorResult = connectorService.getAllConnectorInfoFromCluster(dto.getConnectClusterId(), dto.getConnectorName());
|
||||
Result<KSConnector> ksConnectorResult = connectorService.getConnectorFromKafka(dto.getConnectClusterId(), dto.getConnectorName());
|
||||
if (ksConnectorResult.failed()) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.SUCCESS, "创建成功,但是获取元信息失败,页面元信息会存在1分钟延迟");
|
||||
}
|
||||
|
||||
connectorService.addNewToDB(ksConnectorResult.getData());
|
||||
opConnectorService.addNewToDB(ksConnectorResult.getData());
|
||||
return Result.buildSuc();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<Void> createConnector(ConnectorCreateDTO dto, String heartbeatName, String checkpointName, String operator) {
|
||||
dto.getSuitableConfig().put(KafkaConnectConstant.MIRROR_MAKER_NAME_FIELD_NAME, dto.getConnectorName());
|
||||
|
||||
Result<KSConnectorInfo> createResult = opConnectorService.createConnector(dto.getConnectClusterId(), dto.getConnectorName(), dto.getSuitableConfig(), operator);
|
||||
if (createResult.failed()) {
|
||||
return Result.buildFromIgnoreData(createResult);
|
||||
}
|
||||
|
||||
Result<KSConnector> ksConnectorResult = connectorService.getConnectorFromKafka(dto.getConnectClusterId(), dto.getConnectorName());
|
||||
if (ksConnectorResult.failed()) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.SUCCESS, "创建成功,但是获取元信息失败,页面元信息会存在1分钟延迟");
|
||||
}
|
||||
|
||||
KSConnector connector = ksConnectorResult.getData();
|
||||
connector.setCheckpointConnectorName(checkpointName);
|
||||
connector.setHeartbeatConnectorName(heartbeatName);
|
||||
|
||||
opConnectorService.addNewToDB(connector);
|
||||
return Result.buildSuc();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Result<ConnectorStateVO> getConnectorStateVO(Long connectClusterId, String connectorName) {
|
||||
ConnectorPO connectorPO = connectorService.getConnectorFromDB(connectClusterId, connectorName);
|
||||
|
||||
@@ -0,0 +1,43 @@
|
||||
package com.xiaojukeji.know.streaming.km.biz.connect.mm2;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.ClusterMirrorMakersOverviewDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.mm2.MirrorMakerCreateDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.mm2.ClusterMirrorMakerOverviewVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.mm2.MirrorMakerBaseStateVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.mm2.MirrorMakerStateVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.connect.plugin.ConnectConfigInfosVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.connect.task.KCTaskOverviewVO;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* @author wyb
|
||||
* @date 2022/12/26
|
||||
*/
|
||||
public interface MirrorMakerManager {
|
||||
Result<Void> createMirrorMaker(MirrorMakerCreateDTO dto, String operator);
|
||||
|
||||
Result<Void> deleteMirrorMaker(Long connectClusterId, String sourceConnectorName, String operator);
|
||||
|
||||
Result<Void> modifyMirrorMakerConfig(MirrorMakerCreateDTO dto, String operator);
|
||||
|
||||
Result<Void> restartMirrorMaker(Long connectClusterId, String sourceConnectorName, String operator);
|
||||
Result<Void> stopMirrorMaker(Long connectClusterId, String sourceConnectorName, String operator);
|
||||
Result<Void> resumeMirrorMaker(Long connectClusterId, String sourceConnectorName, String operator);
|
||||
|
||||
Result<MirrorMakerStateVO> getMirrorMakerStateVO(Long clusterPhyId);
|
||||
|
||||
PaginationResult<ClusterMirrorMakerOverviewVO> getClusterMirrorMakersOverview(Long clusterPhyId, ClusterMirrorMakersOverviewDTO dto);
|
||||
|
||||
|
||||
Result<MirrorMakerBaseStateVO> getMirrorMakerState(Long connectId, String connectName);
|
||||
|
||||
Result<Map<String, List<KCTaskOverviewVO>>> getTaskOverview(Long connectClusterId, String connectorName);
|
||||
Result<List<Properties>> getMM2Configs(Long connectClusterId, String connectorName);
|
||||
|
||||
Result<List<ConnectConfigInfosVO>> validateConnectors(MirrorMakerCreateDTO dto);
|
||||
}
|
||||
@@ -0,0 +1,653 @@
|
||||
package com.xiaojukeji.know.streaming.km.biz.connect.mm2.impl;
|
||||
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
import com.xiaojukeji.know.streaming.km.biz.connect.connector.ConnectorManager;
|
||||
import com.xiaojukeji.know.streaming.km.biz.connect.mm2.MirrorMakerManager;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.ClusterMirrorMakersOverviewDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.ClusterConnectorDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.connector.ConnectorCreateDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.mm2.MirrorMakerCreateDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.MetricDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.mm2.MetricsMirrorMakersDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectWorker;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.WorkerConnector;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.config.ConnectConfigInfos;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorInfo;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.mm2.MirrorMakerMetrics;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.mm2.ClusterMirrorMakerOverviewVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.mm2.MirrorMakerBaseStateVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.mm2.MirrorMakerStateVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.connect.plugin.ConnectConfigInfosVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricLineVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.connect.task.KCTaskOverviewVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.*;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.MirrorMakerUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.OpConnectorService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.connect.mm2.MirrorMakerMetricService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.connect.plugin.PluginService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerConnectorService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerService;
|
||||
import com.xiaojukeji.know.streaming.km.core.utils.ApiCallThreadPoolService;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.apache.kafka.connect.runtime.AbstractStatus.State.RUNNING;
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant.*;
|
||||
|
||||
|
||||
/**
|
||||
* @author wyb
|
||||
* @date 2022/12/26
|
||||
*/
|
||||
@Service
|
||||
public class MirrorMakerManagerImpl implements MirrorMakerManager {
|
||||
private static final ILog LOGGER = LogFactory.getLog(MirrorMakerManagerImpl.class);
|
||||
|
||||
@Autowired
|
||||
private ConnectorService connectorService;
|
||||
|
||||
@Autowired
|
||||
private OpConnectorService opConnectorService;
|
||||
|
||||
@Autowired
|
||||
private WorkerConnectorService workerConnectorService;
|
||||
|
||||
@Autowired
|
||||
private WorkerService workerService;
|
||||
|
||||
@Autowired
|
||||
private ConnectorManager connectorManager;
|
||||
|
||||
@Autowired
|
||||
private ClusterPhyService clusterPhyService;
|
||||
|
||||
@Autowired
|
||||
private MirrorMakerMetricService mirrorMakerMetricService;
|
||||
|
||||
@Autowired
|
||||
private ConnectClusterService connectClusterService;
|
||||
|
||||
@Autowired
|
||||
private PluginService pluginService;
|
||||
|
||||
@Override
|
||||
public Result<Void> createMirrorMaker(MirrorMakerCreateDTO dto, String operator) {
|
||||
// 检查基本参数
|
||||
Result<Void> rv = this.checkCreateMirrorMakerParamAndUnifyData(dto);
|
||||
if (rv.failed()) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
// 创建MirrorSourceConnector
|
||||
Result<Void> sourceConnectResult = connectorManager.createConnector(
|
||||
dto,
|
||||
dto.getCheckpointConnectorConfigs() != null? MirrorMakerUtil.genCheckpointName(dto.getConnectorName()): "",
|
||||
dto.getHeartbeatConnectorConfigs() != null? MirrorMakerUtil.genHeartbeatName(dto.getConnectorName()): "",
|
||||
operator
|
||||
);
|
||||
if (sourceConnectResult.failed()) {
|
||||
// 创建失败, 直接返回
|
||||
return Result.buildFromIgnoreData(sourceConnectResult);
|
||||
}
|
||||
|
||||
// 创建 checkpoint 任务
|
||||
Result<Void> checkpointResult = Result.buildSuc();
|
||||
if (dto.getCheckpointConnectorConfigs() != null) {
|
||||
checkpointResult = connectorManager.createConnector(
|
||||
new ConnectorCreateDTO(dto.getConnectClusterId(), MirrorMakerUtil.genCheckpointName(dto.getConnectorName()), dto.getCheckpointConnectorConfigs()),
|
||||
operator
|
||||
);
|
||||
}
|
||||
|
||||
// 创建 heartbeat 任务
|
||||
Result<Void> heartbeatResult = Result.buildSuc();
|
||||
if (dto.getHeartbeatConnectorConfigs() != null) {
|
||||
heartbeatResult = connectorManager.createConnector(
|
||||
new ConnectorCreateDTO(dto.getConnectClusterId(), MirrorMakerUtil.genHeartbeatName(dto.getConnectorName()), dto.getHeartbeatConnectorConfigs()),
|
||||
operator
|
||||
);
|
||||
}
|
||||
|
||||
// 全都成功
|
||||
if (checkpointResult.successful() && checkpointResult.successful()) {
|
||||
return Result.buildSuc();
|
||||
} else if (checkpointResult.failed() && checkpointResult.failed()) {
|
||||
return Result.buildFromRSAndMsg(
|
||||
ResultStatus.KAFKA_CONNECTOR_OPERATE_FAILED,
|
||||
String.format("创建 checkpoint & heartbeat 失败.%n失败信息分别为:%s%n%n%s", checkpointResult.getMessage(), heartbeatResult.getMessage())
|
||||
);
|
||||
} else if (checkpointResult.failed()) {
|
||||
return Result.buildFromRSAndMsg(
|
||||
ResultStatus.KAFKA_CONNECTOR_OPERATE_FAILED,
|
||||
String.format("创建 checkpoint 失败.%n失败信息分别为:%s", checkpointResult.getMessage())
|
||||
);
|
||||
} else{
|
||||
return Result.buildFromRSAndMsg(
|
||||
ResultStatus.KAFKA_CONNECTOR_OPERATE_FAILED,
|
||||
String.format("创建 heartbeat 失败.%n失败信息分别为:%s", heartbeatResult.getMessage())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<Void> deleteMirrorMaker(Long connectClusterId, String sourceConnectorName, String operator) {
|
||||
ConnectorPO connectorPO = connectorService.getConnectorFromDB(connectClusterId, sourceConnectorName);
|
||||
if (connectorPO == null) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectorNotExist(connectClusterId, sourceConnectorName));
|
||||
}
|
||||
|
||||
Result<Void> rv = Result.buildSuc();
|
||||
if (!ValidateUtils.isBlank(connectorPO.getCheckpointConnectorName())) {
|
||||
rv = opConnectorService.deleteConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator);
|
||||
}
|
||||
if (rv.failed()) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
if (!ValidateUtils.isBlank(connectorPO.getHeartbeatConnectorName())) {
|
||||
rv = opConnectorService.deleteConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator);
|
||||
}
|
||||
if (rv.failed()) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
return opConnectorService.deleteConnector(connectClusterId, sourceConnectorName, operator);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<Void> modifyMirrorMakerConfig(MirrorMakerCreateDTO dto, String operator) {
|
||||
ConnectorPO connectorPO = connectorService.getConnectorFromDB(dto.getConnectClusterId(), dto.getConnectorName());
|
||||
if (connectorPO == null) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectorNotExist(dto.getConnectClusterId(), dto.getConnectorName()));
|
||||
}
|
||||
|
||||
Result<Void> rv = Result.buildSuc();
|
||||
if (!ValidateUtils.isBlank(connectorPO.getCheckpointConnectorName()) && dto.getCheckpointConnectorConfigs() != null) {
|
||||
rv = opConnectorService.updateConnectorConfig(dto.getConnectClusterId(), connectorPO.getCheckpointConnectorName(), dto.getCheckpointConnectorConfigs(), operator);
|
||||
}
|
||||
if (rv.failed()) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
if (!ValidateUtils.isBlank(connectorPO.getHeartbeatConnectorName()) && dto.getHeartbeatConnectorConfigs() != null) {
|
||||
rv = opConnectorService.updateConnectorConfig(dto.getConnectClusterId(), connectorPO.getHeartbeatConnectorName(), dto.getHeartbeatConnectorConfigs(), operator);
|
||||
}
|
||||
if (rv.failed()) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
return opConnectorService.updateConnectorConfig(dto.getConnectClusterId(), dto.getConnectorName(), dto.getSuitableConfig(), operator);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<Void> restartMirrorMaker(Long connectClusterId, String sourceConnectorName, String operator) {
|
||||
ConnectorPO connectorPO = connectorService.getConnectorFromDB(connectClusterId, sourceConnectorName);
|
||||
if (connectorPO == null) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectorNotExist(connectClusterId, sourceConnectorName));
|
||||
}
|
||||
|
||||
Result<Void> rv = Result.buildSuc();
|
||||
if (!ValidateUtils.isBlank(connectorPO.getCheckpointConnectorName())) {
|
||||
rv = opConnectorService.restartConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator);
|
||||
}
|
||||
if (rv.failed()) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
if (!ValidateUtils.isBlank(connectorPO.getHeartbeatConnectorName())) {
|
||||
rv = opConnectorService.restartConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator);
|
||||
}
|
||||
if (rv.failed()) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
return opConnectorService.restartConnector(connectClusterId, sourceConnectorName, operator);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<Void> stopMirrorMaker(Long connectClusterId, String sourceConnectorName, String operator) {
|
||||
ConnectorPO connectorPO = connectorService.getConnectorFromDB(connectClusterId, sourceConnectorName);
|
||||
if (connectorPO == null) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectorNotExist(connectClusterId, sourceConnectorName));
|
||||
}
|
||||
|
||||
Result<Void> rv = Result.buildSuc();
|
||||
if (!ValidateUtils.isBlank(connectorPO.getCheckpointConnectorName())) {
|
||||
rv = opConnectorService.stopConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator);
|
||||
}
|
||||
if (rv.failed()) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
if (!ValidateUtils.isBlank(connectorPO.getHeartbeatConnectorName())) {
|
||||
rv = opConnectorService.stopConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator);
|
||||
}
|
||||
if (rv.failed()) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
return opConnectorService.stopConnector(connectClusterId, sourceConnectorName, operator);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<Void> resumeMirrorMaker(Long connectClusterId, String sourceConnectorName, String operator) {
|
||||
ConnectorPO connectorPO = connectorService.getConnectorFromDB(connectClusterId, sourceConnectorName);
|
||||
if (connectorPO == null) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectorNotExist(connectClusterId, sourceConnectorName));
|
||||
}
|
||||
|
||||
Result<Void> rv = Result.buildSuc();
|
||||
if (!ValidateUtils.isBlank(connectorPO.getCheckpointConnectorName())) {
|
||||
rv = opConnectorService.resumeConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator);
|
||||
}
|
||||
if (rv.failed()) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
if (!ValidateUtils.isBlank(connectorPO.getHeartbeatConnectorName())) {
|
||||
rv = opConnectorService.resumeConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator);
|
||||
}
|
||||
if (rv.failed()) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
return opConnectorService.resumeConnector(connectClusterId, sourceConnectorName, operator);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<MirrorMakerStateVO> getMirrorMakerStateVO(Long clusterPhyId) {
|
||||
List<ConnectorPO> connectorPOList = connectorService.listByKafkaClusterIdFromDB(clusterPhyId);
|
||||
List<WorkerConnector> workerConnectorList = workerConnectorService.listByKafkaClusterIdFromDB(clusterPhyId);
|
||||
List<ConnectWorker> workerList = workerService.listByKafkaClusterIdFromDB(clusterPhyId);
|
||||
|
||||
return Result.buildSuc(convert2MirrorMakerStateVO(connectorPOList, workerConnectorList, workerList));
|
||||
}
|
||||
|
||||
@Override
|
||||
public PaginationResult<ClusterMirrorMakerOverviewVO> getClusterMirrorMakersOverview(Long clusterPhyId, ClusterMirrorMakersOverviewDTO dto) {
|
||||
List<ConnectorPO> mirrorMakerList = connectorService.listByKafkaClusterIdFromDB(clusterPhyId).stream().filter(elem -> elem.getConnectorClassName().equals(MIRROR_MAKER_SOURCE_CONNECTOR_TYPE)).collect(Collectors.toList());
|
||||
List<ConnectCluster> connectClusterList = connectClusterService.listByKafkaCluster(clusterPhyId);
|
||||
|
||||
|
||||
Result<List<MirrorMakerMetrics>> latestMetricsResult = mirrorMakerMetricService.getLatestMetricsFromES(clusterPhyId,
|
||||
mirrorMakerList.stream().map(elem -> new Tuple<>(elem.getConnectClusterId(), elem.getConnectorName())).collect(Collectors.toList()),
|
||||
dto.getLatestMetricNames());
|
||||
|
||||
if (latestMetricsResult.failed()) {
|
||||
LOGGER.error("method=getClusterMirrorMakersOverview||clusterPhyId={}||result={}||errMsg=get latest metric failed", clusterPhyId, latestMetricsResult);
|
||||
return PaginationResult.buildFailure(latestMetricsResult, dto);
|
||||
}
|
||||
|
||||
List<ClusterMirrorMakerOverviewVO> mirrorMakerOverviewVOList = this.convert2ClusterMirrorMakerOverviewVO(mirrorMakerList, connectClusterList, latestMetricsResult.getData());
|
||||
|
||||
List<ClusterMirrorMakerOverviewVO> mirrorMakerVOList = this.completeClusterInfo(mirrorMakerOverviewVOList);
|
||||
|
||||
PaginationResult<ClusterMirrorMakerOverviewVO> voPaginationResult = this.pagingMirrorMakerInLocal(mirrorMakerVOList, dto);
|
||||
|
||||
if (voPaginationResult.failed()) {
|
||||
LOGGER.error("method=ClusterMirrorMakerOverviewVO||clusterPhyId={}||result={}||errMsg=pagination in local failed", clusterPhyId, voPaginationResult);
|
||||
|
||||
return PaginationResult.buildFailure(voPaginationResult, dto);
|
||||
}
|
||||
|
||||
// 查询历史指标
|
||||
Result<List<MetricMultiLinesVO>> lineMetricsResult = mirrorMakerMetricService.listMirrorMakerClusterMetricsFromES(
|
||||
clusterPhyId,
|
||||
this.buildMetricsConnectorsDTO(
|
||||
voPaginationResult.getData().getBizData().stream().map(elem -> new ClusterConnectorDTO(elem.getConnectClusterId(), elem.getConnectorName())).collect(Collectors.toList()),
|
||||
dto.getMetricLines()
|
||||
));
|
||||
|
||||
return PaginationResult.buildSuc(
|
||||
this.supplyData2ClusterMirrorMakerOverviewVOList(
|
||||
voPaginationResult.getData().getBizData(),
|
||||
lineMetricsResult.getData()
|
||||
),
|
||||
voPaginationResult
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<MirrorMakerBaseStateVO> getMirrorMakerState(Long connectClusterId, String connectName) {
|
||||
//mm2任务
|
||||
ConnectorPO connectorPO = connectorService.getConnectorFromDB(connectClusterId, connectName);
|
||||
if (connectorPO == null){
|
||||
return Result.buildFrom(ResultStatus.NOT_EXIST);
|
||||
}
|
||||
|
||||
List<WorkerConnector> workerConnectorList = workerConnectorService.listFromDB(connectClusterId).stream()
|
||||
.filter(workerConnector -> workerConnector.getConnectorName().equals(connectorPO.getConnectorName())
|
||||
|| (!StringUtils.isBlank(connectorPO.getCheckpointConnectorName()) && workerConnector.getConnectorName().equals(connectorPO.getCheckpointConnectorName()))
|
||||
|| (!StringUtils.isBlank(connectorPO.getHeartbeatConnectorName()) && workerConnector.getConnectorName().equals(connectorPO.getHeartbeatConnectorName())))
|
||||
.collect(Collectors.toList());
|
||||
|
||||
MirrorMakerBaseStateVO mirrorMakerBaseStateVO = new MirrorMakerBaseStateVO();
|
||||
mirrorMakerBaseStateVO.setTotalTaskCount(workerConnectorList.size());
|
||||
mirrorMakerBaseStateVO.setAliveTaskCount(workerConnectorList.stream().filter(elem -> elem.getState().equals(RUNNING.name())).collect(Collectors.toList()).size());
|
||||
mirrorMakerBaseStateVO.setWorkerCount(workerConnectorList.stream().collect(Collectors.groupingBy(WorkerConnector::getWorkerId)).size());
|
||||
return Result.buildSuc(mirrorMakerBaseStateVO);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<Map<String, List<KCTaskOverviewVO>>> getTaskOverview(Long connectClusterId, String connectorName) {
|
||||
ConnectorPO connectorPO = connectorService.getConnectorFromDB(connectClusterId, connectorName);
|
||||
if (connectorPO == null){
|
||||
return Result.buildFrom(ResultStatus.NOT_EXIST);
|
||||
}
|
||||
|
||||
Map<String, List<KCTaskOverviewVO>> listMap = new HashMap<>();
|
||||
List<WorkerConnector> workerConnectorList = workerConnectorService.listFromDB(connectClusterId);
|
||||
if (workerConnectorList.isEmpty()){
|
||||
return Result.buildSuc(listMap);
|
||||
}
|
||||
workerConnectorList.forEach(workerConnector -> {
|
||||
if (workerConnector.getConnectorName().equals(connectorPO.getConnectorName())){
|
||||
listMap.putIfAbsent(KafkaConnectConstant.MIRROR_MAKER_SOURCE_CONNECTOR_TYPE, new ArrayList<>());
|
||||
listMap.get(MIRROR_MAKER_SOURCE_CONNECTOR_TYPE).add(ConvertUtil.obj2Obj(workerConnector, KCTaskOverviewVO.class));
|
||||
} else if (workerConnector.getConnectorName().equals(connectorPO.getCheckpointConnectorName())) {
|
||||
listMap.putIfAbsent(KafkaConnectConstant.MIRROR_MAKER_HEARTBEAT_CONNECTOR_TYPE, new ArrayList<>());
|
||||
listMap.get(MIRROR_MAKER_HEARTBEAT_CONNECTOR_TYPE).add(ConvertUtil.obj2Obj(workerConnector, KCTaskOverviewVO.class));
|
||||
} else if (workerConnector.getConnectorName().equals(connectorPO.getHeartbeatConnectorName())) {
|
||||
listMap.putIfAbsent(KafkaConnectConstant.MIRROR_MAKER_CHECKPOINT_CONNECTOR_TYPE, new ArrayList<>());
|
||||
listMap.get(MIRROR_MAKER_CHECKPOINT_CONNECTOR_TYPE).add(ConvertUtil.obj2Obj(workerConnector, KCTaskOverviewVO.class));
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
return Result.buildSuc(listMap);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<List<Properties>> getMM2Configs(Long connectClusterId, String connectorName) {
|
||||
ConnectorPO connectorPO = connectorService.getConnectorFromDB(connectClusterId, connectorName);
|
||||
if (connectorPO == null){
|
||||
return Result.buildFrom(ResultStatus.NOT_EXIST);
|
||||
}
|
||||
|
||||
List<Properties> propList = new ArrayList<>();
|
||||
|
||||
// source
|
||||
Result<KSConnectorInfo> connectorResult = connectorService.getConnectorInfoFromCluster(connectClusterId, connectorPO.getConnectorName());
|
||||
if (connectorResult.failed()) {
|
||||
return Result.buildFromIgnoreData(connectorResult);
|
||||
}
|
||||
|
||||
Properties props = new Properties();
|
||||
props.putAll(connectorResult.getData().getConfig());
|
||||
propList.add(props);
|
||||
|
||||
// checkpoint
|
||||
if (!ValidateUtils.isBlank(connectorPO.getCheckpointConnectorName())) {
|
||||
connectorResult = connectorService.getConnectorInfoFromCluster(connectClusterId, connectorPO.getCheckpointConnectorName());
|
||||
if (connectorResult.failed()) {
|
||||
return Result.buildFromIgnoreData(connectorResult);
|
||||
}
|
||||
|
||||
props = new Properties();
|
||||
props.putAll(connectorResult.getData().getConfig());
|
||||
propList.add(props);
|
||||
}
|
||||
|
||||
|
||||
// heartbeat
|
||||
if (!ValidateUtils.isBlank(connectorPO.getHeartbeatConnectorName())) {
|
||||
connectorResult = connectorService.getConnectorInfoFromCluster(connectClusterId, connectorPO.getHeartbeatConnectorName());
|
||||
if (connectorResult.failed()) {
|
||||
return Result.buildFromIgnoreData(connectorResult);
|
||||
}
|
||||
|
||||
props = new Properties();
|
||||
props.putAll(connectorResult.getData().getConfig());
|
||||
propList.add(props);
|
||||
}
|
||||
|
||||
return Result.buildSuc(propList);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<List<ConnectConfigInfosVO>> validateConnectors(MirrorMakerCreateDTO dto) {
|
||||
List<ConnectConfigInfosVO> voList = new ArrayList<>();
|
||||
|
||||
Result<ConnectConfigInfos> infoResult = pluginService.validateConfig(dto.getConnectClusterId(), dto.getSuitableConfig());
|
||||
if (infoResult.failed()) {
|
||||
return Result.buildFromIgnoreData(infoResult);
|
||||
}
|
||||
|
||||
voList.add(ConvertUtil.obj2Obj(infoResult.getData(), ConnectConfigInfosVO.class));
|
||||
|
||||
if (dto.getHeartbeatConnectorConfigs() != null) {
|
||||
infoResult = pluginService.validateConfig(dto.getConnectClusterId(), dto.getHeartbeatConnectorConfigs());
|
||||
if (infoResult.failed()) {
|
||||
return Result.buildFromIgnoreData(infoResult);
|
||||
}
|
||||
|
||||
voList.add(ConvertUtil.obj2Obj(infoResult.getData(), ConnectConfigInfosVO.class));
|
||||
}
|
||||
|
||||
if (dto.getCheckpointConnectorConfigs() != null) {
|
||||
infoResult = pluginService.validateConfig(dto.getConnectClusterId(), dto.getCheckpointConnectorConfigs());
|
||||
if (infoResult.failed()) {
|
||||
return Result.buildFromIgnoreData(infoResult);
|
||||
}
|
||||
|
||||
voList.add(ConvertUtil.obj2Obj(infoResult.getData(), ConnectConfigInfosVO.class));
|
||||
}
|
||||
|
||||
return Result.buildSuc(voList);
|
||||
}
|
||||
|
||||
|
||||
/**************************************************** private method ****************************************************/
|
||||
|
||||
private MetricsMirrorMakersDTO buildMetricsConnectorsDTO(List<ClusterConnectorDTO> connectorDTOList, MetricDTO metricDTO) {
|
||||
MetricsMirrorMakersDTO dto = ConvertUtil.obj2Obj(metricDTO, MetricsMirrorMakersDTO.class);
|
||||
dto.setConnectorNameList(connectorDTOList == null? new ArrayList<>(): connectorDTOList);
|
||||
|
||||
return dto;
|
||||
}
|
||||
|
||||
public Result<Void> checkCreateMirrorMakerParamAndUnifyData(MirrorMakerCreateDTO dto) {
|
||||
ClusterPhy sourceClusterPhy = clusterPhyService.getClusterByCluster(dto.getSourceKafkaClusterId());
|
||||
if (sourceClusterPhy == null) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.CLUSTER_NOT_EXIST, MsgConstant.getClusterPhyNotExist(dto.getSourceKafkaClusterId()));
|
||||
}
|
||||
|
||||
ConnectCluster connectCluster = connectClusterService.getById(dto.getConnectClusterId());
|
||||
if (connectCluster == null) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.CLUSTER_NOT_EXIST, MsgConstant.getConnectClusterNotExist(dto.getConnectClusterId()));
|
||||
}
|
||||
|
||||
ClusterPhy targetClusterPhy = clusterPhyService.getClusterByCluster(connectCluster.getKafkaClusterPhyId());
|
||||
if (targetClusterPhy == null) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.CLUSTER_NOT_EXIST, MsgConstant.getClusterPhyNotExist(connectCluster.getKafkaClusterPhyId()));
|
||||
}
|
||||
|
||||
if (!dto.getSuitableConfig().containsKey(CONNECTOR_CLASS_FILED_NAME)) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "SourceConnector缺少connector.class");
|
||||
}
|
||||
|
||||
if (!MIRROR_MAKER_SOURCE_CONNECTOR_TYPE.equals(dto.getSuitableConfig().getProperty(CONNECTOR_CLASS_FILED_NAME))) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "SourceConnector的connector.class类型错误");
|
||||
}
|
||||
|
||||
if (dto.getCheckpointConnectorConfigs() != null) {
|
||||
if (!dto.getCheckpointConnectorConfigs().containsKey(CONNECTOR_CLASS_FILED_NAME)) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "CheckpointConnector缺少connector.class");
|
||||
}
|
||||
|
||||
if (!MIRROR_MAKER_CHECKPOINT_CONNECTOR_TYPE.equals(dto.getCheckpointConnectorConfigs().getProperty(CONNECTOR_CLASS_FILED_NAME))) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "Checkpoint的connector.class类型错误");
|
||||
}
|
||||
}
|
||||
|
||||
if (dto.getHeartbeatConnectorConfigs() != null) {
|
||||
if (!dto.getHeartbeatConnectorConfigs().containsKey(CONNECTOR_CLASS_FILED_NAME)) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "HeartbeatConnector缺少connector.class");
|
||||
}
|
||||
|
||||
if (!MIRROR_MAKER_HEARTBEAT_CONNECTOR_TYPE.equals(dto.getHeartbeatConnectorConfigs().getProperty(CONNECTOR_CLASS_FILED_NAME))) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "Heartbeat的connector.class类型错误");
|
||||
}
|
||||
}
|
||||
|
||||
dto.unifyData(
|
||||
sourceClusterPhy.getId(), sourceClusterPhy.getBootstrapServers(), ConvertUtil.str2ObjByJson(sourceClusterPhy.getClientProperties(), Properties.class),
|
||||
targetClusterPhy.getId(), targetClusterPhy.getBootstrapServers(), ConvertUtil.str2ObjByJson(targetClusterPhy.getClientProperties(), Properties.class)
|
||||
);
|
||||
|
||||
return Result.buildSuc();
|
||||
}
|
||||
|
||||
private MirrorMakerStateVO convert2MirrorMakerStateVO(List<ConnectorPO> connectorPOList,List<WorkerConnector> workerConnectorList,List<ConnectWorker> workerList){
|
||||
MirrorMakerStateVO mirrorMakerStateVO = new MirrorMakerStateVO();
|
||||
|
||||
List<ConnectorPO> sourceSet = connectorPOList.stream().filter(elem -> elem.getConnectorClassName().equals(MIRROR_MAKER_SOURCE_CONNECTOR_TYPE)).collect(Collectors.toList());
|
||||
mirrorMakerStateVO.setMirrorMakerCount(sourceSet.size());
|
||||
|
||||
Set<Long> connectClusterIdSet = sourceSet.stream().map(ConnectorPO::getConnectClusterId).collect(Collectors.toSet());
|
||||
mirrorMakerStateVO.setWorkerCount(workerList.stream().filter(elem -> connectClusterIdSet.contains(elem.getConnectClusterId())).collect(Collectors.toList()).size());
|
||||
|
||||
List<ConnectorPO> mirrorMakerConnectorList = new ArrayList<>();
|
||||
mirrorMakerConnectorList.addAll(sourceSet);
|
||||
mirrorMakerConnectorList.addAll(connectorPOList.stream().filter(elem -> elem.getConnectorClassName().equals(MIRROR_MAKER_CHECKPOINT_CONNECTOR_TYPE)).collect(Collectors.toList()));
|
||||
mirrorMakerConnectorList.addAll(connectorPOList.stream().filter(elem -> elem.getConnectorClassName().equals(MIRROR_MAKER_HEARTBEAT_CONNECTOR_TYPE)).collect(Collectors.toList()));
|
||||
mirrorMakerStateVO.setTotalConnectorCount(mirrorMakerConnectorList.size());
|
||||
mirrorMakerStateVO.setAliveConnectorCount(mirrorMakerConnectorList.stream().filter(elem -> elem.getState().equals(RUNNING.name())).collect(Collectors.toList()).size());
|
||||
|
||||
Set<String> connectorNameSet = mirrorMakerConnectorList.stream().map(elem -> elem.getConnectorName()).collect(Collectors.toSet());
|
||||
List<WorkerConnector> taskList = workerConnectorList.stream().filter(elem -> connectorNameSet.contains(elem.getConnectorName())).collect(Collectors.toList());
|
||||
mirrorMakerStateVO.setTotalTaskCount(taskList.size());
|
||||
mirrorMakerStateVO.setAliveTaskCount(taskList.stream().filter(elem -> elem.getState().equals(RUNNING.name())).collect(Collectors.toList()).size());
|
||||
|
||||
return mirrorMakerStateVO;
|
||||
}
|
||||
|
||||
private List<ClusterMirrorMakerOverviewVO> convert2ClusterMirrorMakerOverviewVO(List<ConnectorPO> mirrorMakerList, List<ConnectCluster> connectClusterList, List<MirrorMakerMetrics> latestMetric) {
|
||||
List<ClusterMirrorMakerOverviewVO> clusterMirrorMakerOverviewVOList = new ArrayList<>();
|
||||
Map<String, MirrorMakerMetrics> metricsMap = latestMetric.stream().collect(Collectors.toMap(elem -> elem.getConnectClusterId() + "@" + elem.getConnectorName(), Function.identity()));
|
||||
Map<Long, ConnectCluster> connectClusterMap = connectClusterList.stream().collect(Collectors.toMap(elem -> elem.getId(), Function.identity()));
|
||||
|
||||
for (ConnectorPO mirrorMaker : mirrorMakerList) {
|
||||
ClusterMirrorMakerOverviewVO clusterMirrorMakerOverviewVO = new ClusterMirrorMakerOverviewVO();
|
||||
clusterMirrorMakerOverviewVO.setConnectClusterId(mirrorMaker.getConnectClusterId());
|
||||
clusterMirrorMakerOverviewVO.setConnectClusterName(connectClusterMap.get(mirrorMaker.getConnectClusterId()).getName());
|
||||
clusterMirrorMakerOverviewVO.setConnectorName(mirrorMaker.getConnectorName());
|
||||
clusterMirrorMakerOverviewVO.setState(mirrorMaker.getState());
|
||||
clusterMirrorMakerOverviewVO.setCheckpointConnector(mirrorMaker.getCheckpointConnectorName());
|
||||
clusterMirrorMakerOverviewVO.setTaskCount(mirrorMaker.getTaskCount());
|
||||
clusterMirrorMakerOverviewVO.setHeartbeatConnector(mirrorMaker.getHeartbeatConnectorName());
|
||||
clusterMirrorMakerOverviewVO.setLatestMetrics(metricsMap.getOrDefault(mirrorMaker.getConnectClusterId() + "@" + mirrorMaker.getConnectorName(), new MirrorMakerMetrics(mirrorMaker.getConnectClusterId(), mirrorMaker.getConnectorName())));
|
||||
clusterMirrorMakerOverviewVOList.add(clusterMirrorMakerOverviewVO);
|
||||
}
|
||||
return clusterMirrorMakerOverviewVOList;
|
||||
}
|
||||
|
||||
PaginationResult<ClusterMirrorMakerOverviewVO> pagingMirrorMakerInLocal(List<ClusterMirrorMakerOverviewVO> mirrorMakerOverviewVOList, ClusterMirrorMakersOverviewDTO dto) {
|
||||
List<ClusterMirrorMakerOverviewVO> mirrorMakerVOList = PaginationUtil.pageByFuzzyFilter(mirrorMakerOverviewVOList, dto.getSearchKeywords(), Arrays.asList("connectorName"));
|
||||
|
||||
//排序
|
||||
if (!dto.getLatestMetricNames().isEmpty()) {
|
||||
PaginationMetricsUtil.sortMetrics(mirrorMakerVOList, "latestMetrics", dto.getSortMetricNameList(), "connectorName", dto.getSortType());
|
||||
} else {
|
||||
PaginationUtil.pageBySort(mirrorMakerVOList, dto.getSortField(), dto.getSortType(), "connectorName", dto.getSortType());
|
||||
}
|
||||
|
||||
//分页
|
||||
return PaginationUtil.pageBySubData(mirrorMakerVOList, dto);
|
||||
}
|
||||
|
||||
public static List<ClusterMirrorMakerOverviewVO> supplyData2ClusterMirrorMakerOverviewVOList(List<ClusterMirrorMakerOverviewVO> voList,
|
||||
List<MetricMultiLinesVO> metricLineVOList) {
|
||||
Map<String, List<MetricLineVO>> metricLineMap = new HashMap<>();
|
||||
if (metricLineVOList != null) {
|
||||
for (MetricMultiLinesVO metricMultiLinesVO : metricLineVOList) {
|
||||
metricMultiLinesVO.getMetricLines()
|
||||
.forEach(metricLineVO -> {
|
||||
String key = metricLineVO.getName();
|
||||
List<MetricLineVO> metricLineVOS = metricLineMap.getOrDefault(key, new ArrayList<>());
|
||||
metricLineVOS.add(metricLineVO);
|
||||
metricLineMap.put(key, metricLineVOS);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
voList.forEach(elem -> elem.setMetricLines(metricLineMap.get(elem.getConnectClusterId() + "#" + elem.getConnectorName())));
|
||||
|
||||
return voList;
|
||||
}
|
||||
|
||||
private List<ClusterMirrorMakerOverviewVO> completeClusterInfo(List<ClusterMirrorMakerOverviewVO> mirrorMakerVOList) {
|
||||
|
||||
Map<String, KSConnectorInfo> connectorInfoMap = new ConcurrentHashMap<>();
|
||||
|
||||
for (ClusterMirrorMakerOverviewVO mirrorMakerVO : mirrorMakerVOList) {
|
||||
ApiCallThreadPoolService.runnableTask(String.format("method=completeClusterInfo||connectClusterId=%d||connectorName=%s||getMirrorMakerInfo", mirrorMakerVO.getConnectClusterId(), mirrorMakerVO.getConnectorName()),
|
||||
3000
|
||||
, () -> {
|
||||
Result<KSConnectorInfo> connectorInfoRet = connectorService.getConnectorInfoFromCluster(mirrorMakerVO.getConnectClusterId(), mirrorMakerVO.getConnectorName());
|
||||
if (connectorInfoRet.hasData()) {
|
||||
connectorInfoMap.put(mirrorMakerVO.getConnectClusterId() + mirrorMakerVO.getConnectorName(), connectorInfoRet.getData());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
ApiCallThreadPoolService.waitResult();
|
||||
|
||||
List<ClusterMirrorMakerOverviewVO> newMirrorMakerVOList = new ArrayList<>();
|
||||
for (ClusterMirrorMakerOverviewVO mirrorMakerVO : mirrorMakerVOList) {
|
||||
KSConnectorInfo connectorInfo = connectorInfoMap.get(mirrorMakerVO.getConnectClusterId() + mirrorMakerVO.getConnectorName());
|
||||
if (connectorInfo == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
String sourceClusterAlias = connectorInfo.getConfig().get(MIRROR_MAKER_SOURCE_CLUSTER_ALIAS_FIELD_NAME);
|
||||
String targetClusterAlias = connectorInfo.getConfig().get(MIRROR_MAKER_TARGET_CLUSTER_ALIAS_FIELD_NAME);
|
||||
//先默认设置为集群别名
|
||||
mirrorMakerVO.setSourceKafkaClusterName(sourceClusterAlias);
|
||||
mirrorMakerVO.setDestKafkaClusterName(targetClusterAlias);
|
||||
|
||||
if (!ValidateUtils.isBlank(sourceClusterAlias) && CommonUtils.isNumeric(sourceClusterAlias)) {
|
||||
ClusterPhy clusterPhy = LoadedClusterPhyCache.getByPhyId(Long.valueOf(sourceClusterAlias));
|
||||
if (clusterPhy != null) {
|
||||
mirrorMakerVO.setSourceKafkaClusterId(clusterPhy.getId());
|
||||
mirrorMakerVO.setSourceKafkaClusterName(clusterPhy.getName());
|
||||
}
|
||||
}
|
||||
|
||||
if (!ValidateUtils.isBlank(targetClusterAlias) && CommonUtils.isNumeric(targetClusterAlias)) {
|
||||
ClusterPhy clusterPhy = LoadedClusterPhyCache.getByPhyId(Long.valueOf(targetClusterAlias));
|
||||
if (clusterPhy != null) {
|
||||
mirrorMakerVO.setDestKafkaClusterId(clusterPhy.getId());
|
||||
mirrorMakerVO.setDestKafkaClusterName(clusterPhy.getName());
|
||||
}
|
||||
}
|
||||
|
||||
newMirrorMakerVOList.add(mirrorMakerVO);
|
||||
|
||||
}
|
||||
|
||||
return newMirrorMakerVOList;
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package com.xiaojukeji.know.streaming.km.biz.group;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.ClusterGroupSummaryDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.group.GroupOffsetDeleteDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.group.GroupOffsetResetDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationSortDTO;
|
||||
@@ -25,7 +26,7 @@ public interface GroupManager {
|
||||
String searchGroupKeyword,
|
||||
PaginationBaseDTO dto);
|
||||
|
||||
PaginationResult<GroupTopicOverviewVO> pagingGroupTopicMembers(Long clusterPhyId, String groupName, PaginationBaseDTO dto);
|
||||
PaginationResult<GroupTopicOverviewVO> pagingGroupTopicMembers(Long clusterPhyId, String groupName, PaginationBaseDTO dto) throws Exception;
|
||||
|
||||
PaginationResult<GroupOverviewVO> pagingClusterGroupsOverview(Long clusterPhyId, ClusterGroupSummaryDTO dto);
|
||||
|
||||
@@ -39,5 +40,9 @@ public interface GroupManager {
|
||||
|
||||
Result<Void> resetGroupOffsets(GroupOffsetResetDTO dto, String operator) throws Exception;
|
||||
|
||||
List<GroupTopicOverviewVO> getGroupTopicOverviewVOList (Long clusterPhyId, List<GroupMemberPO> groupMemberPOList);
|
||||
Result<Void> deleteGroupOffsets(GroupOffsetDeleteDTO dto, String operator) throws Exception;
|
||||
|
||||
@Deprecated
|
||||
List<GroupTopicOverviewVO> getGroupTopicOverviewVOList(Long clusterPhyId, List<GroupMemberPO> groupMemberPOList);
|
||||
List<GroupTopicOverviewVO> getGroupTopicOverviewVOList(Long clusterPhyId, List<GroupMemberPO> groupMemberPOList, Integer timeoutUnitMs);
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
import com.xiaojukeji.know.streaming.km.biz.group.GroupManager;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.ClusterGroupSummaryDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.group.GroupOffsetDeleteDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.group.GroupOffsetResetDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationSortDTO;
|
||||
@@ -17,6 +18,9 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.kafka.KSMemberConsume
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafka.KSMemberDescription;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.GroupMetrics;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.offset.KSOffsetSpec;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.DeleteGroupParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.DeleteGroupTopicParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.DeleteGroupTopicPartitionParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
|
||||
@@ -32,6 +36,7 @@ import com.xiaojukeji.know.streaming.km.common.converter.GroupConverter;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.AggTypeEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.OffsetTypeEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.SortTypeEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.group.DeleteGroupTypeEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupStateEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
|
||||
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
|
||||
@@ -40,11 +45,14 @@ import com.xiaojukeji.know.streaming.km.common.utils.PaginationMetricsUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.PaginationUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.config.KSConfigUtils;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.group.GroupMetricService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.group.GroupService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.group.OpGroupService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.GroupMetricVersionItems;
|
||||
import com.xiaojukeji.know.streaming.km.core.utils.ApiCallThreadPoolService;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dao.GroupMetricESDAO;
|
||||
import org.apache.kafka.common.ConsumerGroupState;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
@@ -52,13 +60,14 @@ import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static com.xiaojukeji.know.streaming.km.common.enums.group.GroupTypeEnum.CONNECT_CLUSTER_PROTOCOL_TYPE;
|
||||
|
||||
@Component
|
||||
public class GroupManagerImpl implements GroupManager {
|
||||
private static final ILog log = LogFactory.getLog(GroupManagerImpl.class);
|
||||
private static final ILog LOGGER = LogFactory.getLog(GroupManagerImpl.class);
|
||||
|
||||
@Autowired
|
||||
private TopicService topicService;
|
||||
@@ -66,6 +75,9 @@ public class GroupManagerImpl implements GroupManager {
|
||||
@Autowired
|
||||
private GroupService groupService;
|
||||
|
||||
@Autowired
|
||||
private OpGroupService opGroupService;
|
||||
|
||||
@Autowired
|
||||
private PartitionService partitionService;
|
||||
|
||||
@@ -78,6 +90,9 @@ public class GroupManagerImpl implements GroupManager {
|
||||
@Autowired
|
||||
private ClusterPhyService clusterPhyService;
|
||||
|
||||
@Autowired
|
||||
private KSConfigUtils ksConfigUtils;
|
||||
|
||||
@Override
|
||||
public PaginationResult<GroupTopicOverviewVO> pagingGroupMembers(Long clusterPhyId,
|
||||
String topicName,
|
||||
@@ -85,20 +100,33 @@ public class GroupManagerImpl implements GroupManager {
|
||||
String searchTopicKeyword,
|
||||
String searchGroupKeyword,
|
||||
PaginationBaseDTO dto) {
|
||||
long startTimeUnitMs = System.currentTimeMillis();
|
||||
|
||||
PaginationResult<GroupMemberPO> paginationResult = groupService.pagingGroupMembers(clusterPhyId, topicName, groupName, searchTopicKeyword, searchGroupKeyword, dto);
|
||||
|
||||
if (!paginationResult.hasData()) {
|
||||
return PaginationResult.buildSuc(new ArrayList<>(), paginationResult);
|
||||
}
|
||||
|
||||
List<GroupTopicOverviewVO> groupTopicVOList = this.getGroupTopicOverviewVOList(clusterPhyId, paginationResult.getData().getBizData());
|
||||
List<GroupTopicOverviewVO> groupTopicVOList = this.getGroupTopicOverviewVOList(
|
||||
clusterPhyId,
|
||||
paginationResult.getData().getBizData(),
|
||||
ksConfigUtils.getApiCallLeftTimeUnitMs(System.currentTimeMillis() - startTimeUnitMs) // 超时时间
|
||||
);
|
||||
|
||||
return PaginationResult.buildSuc(groupTopicVOList, paginationResult);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PaginationResult<GroupTopicOverviewVO> pagingGroupTopicMembers(Long clusterPhyId, String groupName, PaginationBaseDTO dto) {
|
||||
Group group = groupService.getGroupFromDB(clusterPhyId, groupName);
|
||||
public PaginationResult<GroupTopicOverviewVO> pagingGroupTopicMembers(Long clusterPhyId, String groupName, PaginationBaseDTO dto) throws Exception {
|
||||
long startTimeUnitMs = System.currentTimeMillis();
|
||||
|
||||
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(clusterPhyId);
|
||||
if (clusterPhy == null) {
|
||||
return PaginationResult.buildFailure(MsgConstant.getClusterPhyNotExist(clusterPhyId), dto);
|
||||
}
|
||||
|
||||
Group group = groupService.getGroupFromKafka(clusterPhy, groupName);
|
||||
|
||||
//没有topicMember则直接返回
|
||||
if (group == null || ValidateUtils.isEmptyList(group.getTopicMembers())) {
|
||||
@@ -113,7 +141,14 @@ public class GroupManagerImpl implements GroupManager {
|
||||
|
||||
List<GroupMemberPO> groupMemberPOList = paginationResult.getData().getBizData().stream().map(elem -> new GroupMemberPO(clusterPhyId, elem.getTopicName(), groupName, group.getState().getState(), elem.getMemberCount())).collect(Collectors.toList());
|
||||
|
||||
return PaginationResult.buildSuc(this.getGroupTopicOverviewVOList(clusterPhyId, groupMemberPOList), paginationResult);
|
||||
return PaginationResult.buildSuc(
|
||||
this.getGroupTopicOverviewVOList(
|
||||
clusterPhyId,
|
||||
groupMemberPOList,
|
||||
ksConfigUtils.getApiCallLeftTimeUnitMs(System.currentTimeMillis() - startTimeUnitMs) // 超时时间
|
||||
),
|
||||
paginationResult
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -121,7 +156,7 @@ public class GroupManagerImpl implements GroupManager {
|
||||
List<Group> groupList = groupService.listClusterGroups(clusterPhyId);
|
||||
|
||||
// 类型转化
|
||||
List<GroupOverviewVO> voList = groupList.stream().map(elem -> GroupConverter.convert2GroupOverviewVO(elem)).collect(Collectors.toList());
|
||||
List<GroupOverviewVO> voList = groupList.stream().map(GroupConverter::convert2GroupOverviewVO).collect(Collectors.toList());
|
||||
|
||||
// 搜索groupName
|
||||
voList = PaginationUtil.pageByFuzzyFilter(voList, dto.getSearchGroupName(), Arrays.asList("name"));
|
||||
@@ -168,9 +203,10 @@ public class GroupManagerImpl implements GroupManager {
|
||||
// 转换存储格式
|
||||
Map<TopicPartition, KSMemberDescription> tpMemberMap = new HashMap<>();
|
||||
|
||||
//如果不是connect集群
|
||||
// 如果不是connect集群
|
||||
if (!groupDescription.protocolType().equals(CONNECT_CLUSTER_PROTOCOL_TYPE)) {
|
||||
for (KSMemberDescription description : groupDescription.members()) {
|
||||
// 如果是 Consumer 的 Description ,则 Assignment 的类型为 KSMemberConsumerAssignment 的
|
||||
KSMemberConsumerAssignment assignment = (KSMemberConsumerAssignment) description.assignment();
|
||||
for (TopicPartition tp : assignment.topicPartitions()) {
|
||||
tpMemberMap.put(tp, description);
|
||||
@@ -245,6 +281,52 @@ public class GroupManagerImpl implements GroupManager {
|
||||
return groupService.resetGroupOffsets(dto.getClusterId(), dto.getGroupName(), offsetMapResult.getData(), operator);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<Void> deleteGroupOffsets(GroupOffsetDeleteDTO dto, String operator) throws Exception {
|
||||
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(dto.getClusterPhyId());
|
||||
if (clusterPhy == null) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.CLUSTER_NOT_EXIST, MsgConstant.getClusterPhyNotExist(dto.getClusterPhyId()));
|
||||
}
|
||||
|
||||
|
||||
// 按照group纬度进行删除
|
||||
if (ValidateUtils.isBlank(dto.getGroupName())) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "groupName不允许为空");
|
||||
}
|
||||
if (DeleteGroupTypeEnum.GROUP.getCode().equals(dto.getDeleteType())) {
|
||||
return opGroupService.deleteGroupOffset(
|
||||
new DeleteGroupParam(dto.getClusterPhyId(), dto.getGroupName(), DeleteGroupTypeEnum.GROUP),
|
||||
operator
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
// 按照topic纬度进行删除
|
||||
if (ValidateUtils.isBlank(dto.getTopicName())) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "topicName不允许为空");
|
||||
}
|
||||
if (DeleteGroupTypeEnum.GROUP_TOPIC.getCode().equals(dto.getDeleteType())) {
|
||||
return opGroupService.deleteGroupTopicOffset(
|
||||
new DeleteGroupTopicParam(dto.getClusterPhyId(), dto.getGroupName(), DeleteGroupTypeEnum.GROUP, dto.getTopicName()),
|
||||
operator
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
// 按照partition纬度进行删除
|
||||
if (ValidateUtils.isNullOrLessThanZero(dto.getPartitionId())) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "partitionId不允许为空或小于0");
|
||||
}
|
||||
if (DeleteGroupTypeEnum.GROUP_TOPIC_PARTITION.getCode().equals(dto.getDeleteType())) {
|
||||
return opGroupService.deleteGroupTopicPartitionOffset(
|
||||
new DeleteGroupTopicPartitionParam(dto.getClusterPhyId(), dto.getGroupName(), DeleteGroupTypeEnum.GROUP, dto.getTopicName(), dto.getPartitionId()),
|
||||
operator
|
||||
);
|
||||
}
|
||||
|
||||
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "deleteType类型错误");
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<GroupTopicOverviewVO> getGroupTopicOverviewVOList(Long clusterPhyId, List<GroupMemberPO> groupMemberPOList) {
|
||||
// 获取指标
|
||||
@@ -256,11 +338,54 @@ public class GroupManagerImpl implements GroupManager {
|
||||
);
|
||||
if (metricsListResult.failed()) {
|
||||
// 如果查询失败,则输出错误信息,但是依旧进行已有数据的返回
|
||||
log.error("method=completeMetricData||clusterPhyId={}||result={}||errMsg=search es failed", clusterPhyId, metricsListResult);
|
||||
LOGGER.error("method=completeMetricData||clusterPhyId={}||result={}||errMsg=search es failed", clusterPhyId, metricsListResult);
|
||||
}
|
||||
return this.convert2GroupTopicOverviewVOList(groupMemberPOList, metricsListResult.getData());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<GroupTopicOverviewVO> getGroupTopicOverviewVOList(Long clusterPhyId, List<GroupMemberPO> poList, Integer timeoutUnitMs) {
|
||||
Set<String> requestedGroupSet = new HashSet<>();
|
||||
|
||||
// 获取指标
|
||||
Map<String, Map<String, Float>> groupTopicLagMap = new ConcurrentHashMap<>();
|
||||
poList.forEach(elem -> {
|
||||
if (requestedGroupSet.contains(elem.getGroupName())) {
|
||||
// 该Group已经处理过
|
||||
return;
|
||||
}
|
||||
|
||||
requestedGroupSet.add(elem.getGroupName());
|
||||
ApiCallThreadPoolService.runnableTask(
|
||||
String.format("clusterPhyId=%d||groupName=%s||msg=getGroupTopicLag", clusterPhyId, elem.getGroupName()),
|
||||
timeoutUnitMs,
|
||||
() -> {
|
||||
Result<List<GroupMetrics>> listResult = groupMetricService.collectGroupMetricsFromKafka(clusterPhyId, elem.getGroupName(), GroupMetricVersionItems.GROUP_METRIC_LAG);
|
||||
if (listResult == null || !listResult.hasData()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Map<String, Float> lagMetricMap = new HashMap<>();
|
||||
listResult.getData().forEach(item -> {
|
||||
Float newLag = item.getMetric(GroupMetricVersionItems.GROUP_METRIC_LAG);
|
||||
if (newLag == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
Float oldLag = lagMetricMap.getOrDefault(item.getTopic(), newLag);
|
||||
lagMetricMap.put(item.getTopic(), Math.max(oldLag, newLag));
|
||||
});
|
||||
|
||||
groupTopicLagMap.put(elem.getGroupName(), lagMetricMap);
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
ApiCallThreadPoolService.waitResult();
|
||||
|
||||
return this.convert2GroupTopicOverviewVOList(poList, groupTopicLagMap);
|
||||
}
|
||||
|
||||
|
||||
/**************************************************** private method ****************************************************/
|
||||
|
||||
@@ -314,13 +439,22 @@ public class GroupManagerImpl implements GroupManager {
|
||||
metricsList = new ArrayList<>();
|
||||
}
|
||||
|
||||
// <GroupName, <TopicName, GroupMetrics>>
|
||||
Map<String, Map<String, GroupMetrics>> metricsMap = new HashMap<>();
|
||||
// <GroupName, <TopicName, lag>>
|
||||
Map<String, Map<String, Float>> metricsMap = new HashMap<>();
|
||||
metricsList.stream().forEach(elem -> {
|
||||
Float metricValue = elem.getMetrics().get(GroupMetricVersionItems.GROUP_METRIC_LAG);
|
||||
if (metricValue == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
metricsMap.putIfAbsent(elem.getGroup(), new HashMap<>());
|
||||
metricsMap.get(elem.getGroup()).put(elem.getTopic(), elem);
|
||||
metricsMap.get(elem.getGroup()).put(elem.getTopic(), metricValue);
|
||||
});
|
||||
|
||||
return this.convert2GroupTopicOverviewVOList(poList, metricsMap);
|
||||
}
|
||||
|
||||
private List<GroupTopicOverviewVO> convert2GroupTopicOverviewVOList(List<GroupMemberPO> poList, Map<String, Map<String, Float>> metricsMap) {
|
||||
List<GroupTopicOverviewVO> voList = new ArrayList<>();
|
||||
for (GroupMemberPO po: poList) {
|
||||
GroupTopicOverviewVO vo = ConvertUtil.obj2Obj(po, GroupTopicOverviewVO.class);
|
||||
@@ -328,9 +462,9 @@ public class GroupManagerImpl implements GroupManager {
|
||||
continue;
|
||||
}
|
||||
|
||||
GroupMetrics metrics = metricsMap.getOrDefault(po.getGroupName(), new HashMap<>()).get(po.getTopicName());
|
||||
if (metrics != null) {
|
||||
vo.setMaxLag(ConvertUtil.Float2Long(metrics.getMetrics().get(GroupMetricVersionItems.GROUP_METRIC_LAG)));
|
||||
Float metricValue = metricsMap.getOrDefault(po.getGroupName(), new HashMap<>()).get(po.getTopicName());
|
||||
if (metricValue != null) {
|
||||
vo.setMaxLag(ConvertUtil.Float2Long(metricValue));
|
||||
}
|
||||
|
||||
voList.add(vo);
|
||||
|
||||
@@ -19,4 +19,9 @@ public interface OpTopicManager {
|
||||
* 扩分区
|
||||
*/
|
||||
Result<Void> expandTopic(TopicExpansionDTO dto, String operator);
|
||||
|
||||
/**
|
||||
* 清空Topic
|
||||
*/
|
||||
Result<Void> truncateTopic(Long clusterPhyId, String topicName, String operator);
|
||||
}
|
||||
|
||||
@@ -7,25 +7,28 @@ import com.xiaojukeji.know.streaming.km.common.bean.dto.topic.TopicCreateDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.topic.TopicExpansionDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.config.KafkaTopicConfigParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicCreateParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicPartitionExpandParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicTruncateParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.partition.Partition;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.BackoffUtils;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.FutureUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.*;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.kafka.KafkaReplicaAssignUtil;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.topic.OpTopicService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicConfigService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
|
||||
import kafka.admin.AdminUtils;
|
||||
import kafka.admin.BrokerMetadata;
|
||||
import org.apache.kafka.common.config.TopicConfig;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
@@ -59,6 +62,9 @@ public class OpTopicManagerImpl implements OpTopicManager {
|
||||
@Autowired
|
||||
private PartitionService partitionService;
|
||||
|
||||
@Autowired
|
||||
private TopicConfigService topicConfigService;
|
||||
|
||||
@Override
|
||||
public Result<Void> createTopic(TopicCreateDTO dto, String operator) {
|
||||
log.info("method=createTopic||param={}||operator={}.", dto, operator);
|
||||
@@ -156,9 +162,74 @@ public class OpTopicManagerImpl implements OpTopicManager {
|
||||
return rv;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<Void> truncateTopic(Long clusterPhyId, String topicName, String operator) {
|
||||
// 增加delete配置
|
||||
Result<Tuple<Boolean, String>> rt = this.addDeleteConfigIfNotExist(clusterPhyId, topicName, operator);
|
||||
if (rt.failed()) {
|
||||
log.error("method=truncateTopic||clusterPhyId={}||topicName={}||operator={}||result={}||msg=get config from kafka failed", clusterPhyId, topicName, operator, rt);
|
||||
return Result.buildFromIgnoreData(rt);
|
||||
}
|
||||
|
||||
// 清空Topic
|
||||
Result<Void> rv = opTopicService.truncateTopic(new TopicTruncateParam(clusterPhyId, topicName, KafkaConstant.TOPICK_TRUNCATE_DEFAULT_OFFSET), operator);
|
||||
if (rv.failed()) {
|
||||
log.error("method=truncateTopic||clusterPhyId={}||topicName={}||originConfig={}||operator={}||result={}||msg=truncate topic failed", clusterPhyId, topicName, rt.getData().v2(), operator, rv);
|
||||
// config被修改了,则错误提示需要提醒一下,否则直接返回错误
|
||||
return rt.getData().v1() ? Result.buildFailure(rv.getCode(), rv.getMessage() + "\t\n" + String.format("Topic的CleanupPolicy已被修改,需要手动恢复为%s", rt.getData().v2())) : rv;
|
||||
}
|
||||
|
||||
// 恢复compact配置
|
||||
rv = this.recoverConfigIfChanged(clusterPhyId, topicName, rt.getData().v1(), rt.getData().v2(), operator);
|
||||
if (rv.failed()) {
|
||||
log.error("method=truncateTopic||clusterPhyId={}||topicName={}||originConfig={}||operator={}||result={}||msg=truncate topic success but recover config failed", clusterPhyId, topicName, rt.getData().v2(), operator, rv);
|
||||
// config被修改了,则错误提示需要提醒一下,否则直接返回错误
|
||||
return Result.buildFailure(rv.getCode(), String.format("Topic清空操作已成功,但是恢复CleanupPolicy配置失败,需要手动恢复为%s。", rt.getData().v2()) + "\t\n" + rv.getMessage());
|
||||
}
|
||||
|
||||
return Result.buildSuc();
|
||||
}
|
||||
|
||||
/**************************************************** private method ****************************************************/
|
||||
|
||||
private Result<Tuple<Boolean, String>> addDeleteConfigIfNotExist(Long clusterPhyId, String topicName, String operator) {
|
||||
// 获取Topic配置
|
||||
Result<Map<String, String>> configMapResult = topicConfigService.getTopicConfigFromKafka(clusterPhyId, topicName);
|
||||
if (configMapResult.failed()) {
|
||||
return Result.buildFromIgnoreData(configMapResult);
|
||||
}
|
||||
|
||||
String cleanupPolicyValue = configMapResult.getData().getOrDefault(TopicConfig.CLEANUP_POLICY_CONFIG, "");
|
||||
List<String> cleanupPolicyValueList = CommonUtils.string2StrList(cleanupPolicyValue);
|
||||
if (cleanupPolicyValueList.size() == 1 && cleanupPolicyValueList.contains(TopicConfig.CLEANUP_POLICY_DELETE)) {
|
||||
// 不需要修改
|
||||
return Result.buildSuc(new Tuple<>(Boolean.FALSE, cleanupPolicyValue));
|
||||
}
|
||||
|
||||
Map<String, String> changedConfigMap = new HashMap<>(1);
|
||||
changedConfigMap.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE);
|
||||
|
||||
Result<Void> rv = topicConfigService.modifyTopicConfig(new KafkaTopicConfigParam(clusterPhyId, topicName, changedConfigMap), operator);
|
||||
if (rv.failed()) {
|
||||
// 修改失败
|
||||
return Result.buildFromIgnoreData(rv);
|
||||
}
|
||||
|
||||
return Result.buildSuc(new Tuple<>(Boolean.TRUE, cleanupPolicyValue));
|
||||
}
|
||||
|
||||
private Result<Void> recoverConfigIfChanged(Long clusterPhyId, String topicName, Boolean changed, String originValue, String operator) {
|
||||
if (!changed) {
|
||||
// 没有修改,直接返回
|
||||
return Result.buildSuc();
|
||||
}
|
||||
|
||||
// 恢复配置
|
||||
Map<String, String> changedConfigMap = new HashMap<>(1);
|
||||
changedConfigMap.put(TopicConfig.CLEANUP_POLICY_CONFIG, originValue);
|
||||
|
||||
return topicConfigService.modifyTopicConfig(new KafkaTopicConfigParam(clusterPhyId, topicName, changedConfigMap), operator);
|
||||
}
|
||||
|
||||
private Seq<BrokerMetadata> buildBrokerMetadataSeq(Long clusterPhyId, final List<Integer> selectedBrokerIdList) {
|
||||
// 选取Broker列表
|
||||
|
||||
@@ -28,6 +28,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.partition.TopicPart
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.PaginationConstant;
|
||||
import com.xiaojukeji.know.streaming.km.common.converter.TopicVOConverter;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.OffsetTypeEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.SortTypeEnum;
|
||||
@@ -38,6 +39,7 @@ import com.xiaojukeji.know.streaming.km.common.utils.PaginationUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.config.KSConfigUtils;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.group.GroupService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionMetricService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
|
||||
@@ -45,8 +47,7 @@ import com.xiaojukeji.know.streaming.km.core.service.topic.TopicConfigService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicMetricService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.TopicMetricVersionItems;
|
||||
import org.apache.commons.lang3.ObjectUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import com.xiaojukeji.know.streaming.km.core.utils.ApiCallThreadPoolService;
|
||||
import org.apache.kafka.clients.consumer.*;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.config.TopicConfig;
|
||||
@@ -60,7 +61,7 @@ import java.util.stream.Collectors;
|
||||
|
||||
@Component
|
||||
public class TopicStateManagerImpl implements TopicStateManager {
|
||||
private static final ILog log = LogFactory.getLog(TopicStateManagerImpl.class);
|
||||
private static final ILog LOGGER = LogFactory.getLog(TopicStateManagerImpl.class);
|
||||
|
||||
@Autowired
|
||||
private TopicService topicService;
|
||||
@@ -89,6 +90,9 @@ public class TopicStateManagerImpl implements TopicStateManager {
|
||||
@Autowired
|
||||
private GroupManager groupManager;
|
||||
|
||||
@Autowired
|
||||
private KSConfigUtils ksConfigUtils;
|
||||
|
||||
@Override
|
||||
public TopicBrokerAllVO getTopicBrokerAll(Long clusterPhyId, String topicName, String searchBrokerHost) throws NotExistException {
|
||||
Topic topic = topicService.getTopic(clusterPhyId, topicName);
|
||||
@@ -101,7 +105,7 @@ public class TopicStateManagerImpl implements TopicStateManager {
|
||||
TopicBrokerAllVO allVO = new TopicBrokerAllVO();
|
||||
|
||||
allVO.setTotal(topic.getBrokerIdSet().size());
|
||||
allVO.setLive((int)brokerMap.values().stream().filter(elem -> elem.alive()).count());
|
||||
allVO.setLive((int)brokerMap.values().stream().filter(Broker::alive).count());
|
||||
allVO.setDead(allVO.getTotal() - allVO.getLive());
|
||||
|
||||
allVO.setPartitionCount(topic.getPartitionNum());
|
||||
@@ -153,97 +157,28 @@ public class TopicStateManagerImpl implements TopicStateManager {
|
||||
return Result.buildFromIgnoreData(endOffsetsMapResult);
|
||||
}
|
||||
|
||||
List<TopicRecordVO> voList = new ArrayList<>();
|
||||
// 数据采集
|
||||
List<TopicRecordVO> voList = this.getTopicMessages(clusterPhy, topicName, beginOffsetsMapResult.getData(), endOffsetsMapResult.getData(), startTime, dto);
|
||||
|
||||
KafkaConsumer<String, String> kafkaConsumer = null;
|
||||
try {
|
||||
// 创建kafka-consumer
|
||||
kafkaConsumer = new KafkaConsumer<>(this.generateClientProperties(clusterPhy, dto.getMaxRecords()));
|
||||
|
||||
List<TopicPartition> partitionList = new ArrayList<>();
|
||||
long maxMessage = 0;
|
||||
for (Map.Entry<TopicPartition, Long> entry : endOffsetsMapResult.getData().entrySet()) {
|
||||
long begin = beginOffsetsMapResult.getData().get(entry.getKey());
|
||||
long end = entry.getValue();
|
||||
if (begin == end){
|
||||
continue;
|
||||
}
|
||||
maxMessage += end - begin;
|
||||
partitionList.add(entry.getKey());
|
||||
}
|
||||
maxMessage = Math.min(maxMessage, dto.getMaxRecords());
|
||||
kafkaConsumer.assign(partitionList);
|
||||
|
||||
Map<TopicPartition, OffsetAndTimestamp> partitionOffsetAndTimestampMap = new HashMap<>();
|
||||
// 获取指定时间每个分区的offset(按指定开始时间查询消息时)
|
||||
if (OffsetTypeEnum.PRECISE_TIMESTAMP.getResetType() == dto.getFilterOffsetReset()) {
|
||||
Map<TopicPartition, Long> timestampsToSearch = new HashMap<>();
|
||||
partitionList.forEach(topicPartition -> {
|
||||
timestampsToSearch.put(topicPartition, dto.getStartTimestampUnitMs());
|
||||
});
|
||||
partitionOffsetAndTimestampMap = kafkaConsumer.offsetsForTimes(timestampsToSearch);
|
||||
}
|
||||
|
||||
for (TopicPartition partition : partitionList) {
|
||||
if (OffsetTypeEnum.EARLIEST.getResetType() == dto.getFilterOffsetReset()) {
|
||||
// 重置到最旧
|
||||
kafkaConsumer.seek(partition, beginOffsetsMapResult.getData().get(partition));
|
||||
} else if (OffsetTypeEnum.PRECISE_TIMESTAMP.getResetType() == dto.getFilterOffsetReset()) {
|
||||
// 重置到指定时间
|
||||
kafkaConsumer.seek(partition, partitionOffsetAndTimestampMap.get(partition).offset());
|
||||
} else if (OffsetTypeEnum.PRECISE_OFFSET.getResetType() == dto.getFilterOffsetReset()) {
|
||||
// 重置到指定位置
|
||||
|
||||
} else {
|
||||
// 默认,重置到最新
|
||||
kafkaConsumer.seek(partition, Math.max(beginOffsetsMapResult.getData().get(partition), endOffsetsMapResult.getData().get(partition) - dto.getMaxRecords()));
|
||||
}
|
||||
}
|
||||
|
||||
// 这里需要减去 KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS 是因为poll一次需要耗时,如果这里不减去,则可能会导致poll之后,超过要求的时间
|
||||
while (System.currentTimeMillis() - startTime <= dto.getPullTimeoutUnitMs() && voList.size() < maxMessage) {
|
||||
ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(Duration.ofMillis(KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS));
|
||||
for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
|
||||
if (this.checkIfIgnore(consumerRecord, dto.getFilterKey(), dto.getFilterValue())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
voList.add(TopicVOConverter.convert2TopicRecordVO(topicName, consumerRecord));
|
||||
if (voList.size() >= dto.getMaxRecords()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// 超时则返回
|
||||
if (System.currentTimeMillis() - startTime + KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS > dto.getPullTimeoutUnitMs()
|
||||
|| voList.size() > dto.getMaxRecords()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// 排序
|
||||
if (ObjectUtils.isNotEmpty(voList)) {
|
||||
// 默认按时间倒序排序
|
||||
if (StringUtils.isBlank(dto.getSortType())) {
|
||||
dto.setSortType(SortTypeEnum.DESC.getSortType());
|
||||
}
|
||||
PaginationUtil.pageBySort(voList, dto.getSortField(), dto.getSortType());
|
||||
}
|
||||
|
||||
return Result.buildSuc(voList.subList(0, Math.min(dto.getMaxRecords(), voList.size())));
|
||||
} catch (Exception e) {
|
||||
log.error("method=getTopicMessages||clusterPhyId={}||topicName={}||param={}||errMsg=exception", clusterPhyId, topicName, dto, e);
|
||||
|
||||
throw new AdminOperateException(e.getMessage(), e, ResultStatus.KAFKA_OPERATE_FAILED);
|
||||
} finally {
|
||||
if (kafkaConsumer != null) {
|
||||
try {
|
||||
kafkaConsumer.close(Duration.ofMillis(KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS));
|
||||
} catch (Exception e) {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
// 排序
|
||||
if (ValidateUtils.isBlank(dto.getSortType())) {
|
||||
// 默认按时间倒序排序
|
||||
dto.setSortType(SortTypeEnum.DESC.getSortType());
|
||||
}
|
||||
if (ValidateUtils.isBlank(dto.getSortField())) {
|
||||
// 默认按照timestampUnitMs字段排序
|
||||
dto.setSortField(PaginationConstant.TOPIC_RECORDS_TIME_SORTED_FIELD);
|
||||
}
|
||||
|
||||
if (PaginationConstant.TOPIC_RECORDS_TIME_SORTED_FIELD.equals(dto.getSortField())) {
|
||||
// 如果是时间类型,则第二排序规则是offset
|
||||
PaginationUtil.pageBySort(voList, dto.getSortField(), dto.getSortType(), PaginationConstant.TOPIC_RECORDS_OFFSET_SORTED_FIELD, dto.getSortType());
|
||||
} else {
|
||||
// 如果是非时间类型,则第二排序规则是时间
|
||||
PaginationUtil.pageBySort(voList, dto.getSortField(), dto.getSortType(), PaginationConstant.TOPIC_RECORDS_TIME_SORTED_FIELD, dto.getSortType());
|
||||
}
|
||||
|
||||
return Result.buildSuc(voList.subList(0, Math.min(dto.getMaxRecords(), voList.size())));
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -298,26 +233,37 @@ public class TopicStateManagerImpl implements TopicStateManager {
|
||||
|
||||
@Override
|
||||
public Result<List<TopicPartitionVO>> getTopicPartitions(Long clusterPhyId, String topicName, List<String> metricsNames) {
|
||||
long startTime = System.currentTimeMillis();
|
||||
|
||||
List<Partition> partitionList = partitionService.listPartitionByTopic(clusterPhyId, topicName);
|
||||
if (ValidateUtils.isEmptyList(partitionList)) {
|
||||
return Result.buildSuc();
|
||||
}
|
||||
|
||||
Result<List<PartitionMetrics>> metricsResult = partitionMetricService.collectPartitionsMetricsFromKafka(clusterPhyId, topicName, metricsNames);
|
||||
if (metricsResult.failed()) {
|
||||
// 仅打印错误日志,但是不直接返回错误
|
||||
log.error(
|
||||
"method=getTopicPartitions||clusterPhyId={}||topicName={}||result={}||msg=get metrics from es failed",
|
||||
clusterPhyId, topicName, metricsResult
|
||||
);
|
||||
}
|
||||
|
||||
// 转map
|
||||
Map<Integer, PartitionMetrics> metricsMap = new HashMap<>();
|
||||
if (metricsResult.hasData()) {
|
||||
for (PartitionMetrics metrics: metricsResult.getData()) {
|
||||
metricsMap.put(metrics.getPartitionId(), metrics);
|
||||
}
|
||||
ApiCallThreadPoolService.runnableTask(
|
||||
String.format("clusterPhyId=%d||topicName=%s||method=getTopicPartitions", clusterPhyId, topicName),
|
||||
ksConfigUtils.getApiCallLeftTimeUnitMs(System.currentTimeMillis() - startTime),
|
||||
() -> {
|
||||
Result<List<PartitionMetrics>> metricsResult = partitionMetricService.collectPartitionsMetricsFromKafka(clusterPhyId, topicName, metricsNames);
|
||||
if (metricsResult.failed()) {
|
||||
// 仅打印错误日志,但是不直接返回错误
|
||||
LOGGER.error(
|
||||
"method=getTopicPartitions||clusterPhyId={}||topicName={}||result={}||msg=get metrics from kafka failed",
|
||||
clusterPhyId, topicName, metricsResult
|
||||
);
|
||||
}
|
||||
|
||||
for (PartitionMetrics metrics: metricsResult.getData()) {
|
||||
metricsMap.put(metrics.getPartitionId(), metrics);
|
||||
}
|
||||
}
|
||||
);
|
||||
boolean finished = ApiCallThreadPoolService.waitResultAndReturnFinished(1);
|
||||
|
||||
if (!finished && metricsMap.isEmpty()) {
|
||||
// 未完成 -> 打印日志
|
||||
LOGGER.error("method=getTopicPartitions||clusterPhyId={}||topicName={}||msg=get metrics from kafka failed", clusterPhyId, topicName);
|
||||
}
|
||||
|
||||
List<TopicPartitionVO> voList = new ArrayList<>();
|
||||
@@ -336,7 +282,7 @@ public class TopicStateManagerImpl implements TopicStateManager {
|
||||
|
||||
// Broker统计信息
|
||||
vo.setBrokerCount(brokerMap.size());
|
||||
vo.setLiveBrokerCount((int)brokerMap.values().stream().filter(elem -> elem.alive()).count());
|
||||
vo.setLiveBrokerCount((int)brokerMap.values().stream().filter(Broker::alive).count());
|
||||
vo.setDeadBrokerCount(vo.getBrokerCount() - vo.getLiveBrokerCount());
|
||||
|
||||
// Partition统计信息
|
||||
@@ -360,13 +306,19 @@ public class TopicStateManagerImpl implements TopicStateManager {
|
||||
|
||||
@Override
|
||||
public PaginationResult<GroupTopicOverviewVO> pagingTopicGroupsOverview(Long clusterPhyId, String topicName, String searchGroupName, PaginationBaseDTO dto) {
|
||||
long startTimeUnitMs = System.currentTimeMillis();
|
||||
|
||||
PaginationResult<GroupMemberPO> paginationResult = groupService.pagingGroupMembers(clusterPhyId, topicName, "", "", searchGroupName, dto);
|
||||
|
||||
if (!paginationResult.hasData()) {
|
||||
return PaginationResult.buildSuc(new ArrayList<>(), paginationResult);
|
||||
}
|
||||
|
||||
List<GroupTopicOverviewVO> groupTopicVOList = groupManager.getGroupTopicOverviewVOList(clusterPhyId, paginationResult.getData().getBizData());
|
||||
List<GroupTopicOverviewVO> groupTopicVOList = groupManager.getGroupTopicOverviewVOList(
|
||||
clusterPhyId,
|
||||
paginationResult.getData().getBizData(),
|
||||
ksConfigUtils.getApiCallLeftTimeUnitMs(System.currentTimeMillis() - startTimeUnitMs) // 超时时间
|
||||
);
|
||||
|
||||
return PaginationResult.buildSuc(groupTopicVOList, paginationResult);
|
||||
}
|
||||
@@ -386,11 +338,8 @@ public class TopicStateManagerImpl implements TopicStateManager {
|
||||
// ignore
|
||||
return true;
|
||||
}
|
||||
if (filterValue != null && consumerRecord.value() != null && !consumerRecord.value().contains(filterValue)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
return (filterValue != null && consumerRecord.value() != null && !consumerRecord.value().contains(filterValue));
|
||||
}
|
||||
|
||||
private TopicBrokerSingleVO getTopicBrokerSingle(Long clusterPhyId,
|
||||
@@ -450,4 +399,90 @@ public class TopicStateManagerImpl implements TopicStateManager {
|
||||
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, Math.max(2, Math.min(5, maxPollRecords)));
|
||||
return props;
|
||||
}
|
||||
|
||||
private List<TopicRecordVO> getTopicMessages(ClusterPhy clusterPhy,
|
||||
String topicName,
|
||||
Map<TopicPartition, Long> beginOffsetsMap,
|
||||
Map<TopicPartition, Long> endOffsetsMap,
|
||||
long startTime,
|
||||
TopicRecordDTO dto) throws AdminOperateException {
|
||||
List<TopicRecordVO> voList = new ArrayList<>();
|
||||
|
||||
try (KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(this.generateClientProperties(clusterPhy, dto.getMaxRecords()))) {
|
||||
// 移动到指定位置
|
||||
long maxMessage = this.assignAndSeekToSpecifiedOffset(kafkaConsumer, beginOffsetsMap, endOffsetsMap, dto);
|
||||
|
||||
// 这里需要减去 KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS 是因为poll一次需要耗时,如果这里不减去,则可能会导致poll之后,超过要求的时间
|
||||
while (System.currentTimeMillis() - startTime <= dto.getPullTimeoutUnitMs() && voList.size() < maxMessage) {
|
||||
ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(Duration.ofMillis(KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS));
|
||||
for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
|
||||
if (this.checkIfIgnore(consumerRecord, dto.getFilterKey(), dto.getFilterValue())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
voList.add(TopicVOConverter.convert2TopicRecordVO(topicName, consumerRecord));
|
||||
if (voList.size() >= dto.getMaxRecords()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// 超时则返回
|
||||
if (System.currentTimeMillis() - startTime + KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS > dto.getPullTimeoutUnitMs()
|
||||
|| voList.size() > dto.getMaxRecords()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return voList;
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("method=getTopicMessages||clusterPhyId={}||topicName={}||param={}||errMsg=exception", clusterPhy.getId(), topicName, dto, e);
|
||||
|
||||
throw new AdminOperateException(e.getMessage(), e, ResultStatus.KAFKA_OPERATE_FAILED);
|
||||
}
|
||||
}
|
||||
|
||||
private long assignAndSeekToSpecifiedOffset(KafkaConsumer<String, String> kafkaConsumer,
|
||||
Map<TopicPartition, Long> beginOffsetsMap,
|
||||
Map<TopicPartition, Long> endOffsetsMap,
|
||||
TopicRecordDTO dto) {
|
||||
List<TopicPartition> partitionList = new ArrayList<>();
|
||||
long maxMessage = 0;
|
||||
for (Map.Entry<TopicPartition, Long> entry : endOffsetsMap.entrySet()) {
|
||||
long begin = beginOffsetsMap.get(entry.getKey());
|
||||
long end = entry.getValue();
|
||||
if (begin == end){
|
||||
continue;
|
||||
}
|
||||
maxMessage += end - begin;
|
||||
partitionList.add(entry.getKey());
|
||||
}
|
||||
maxMessage = Math.min(maxMessage, dto.getMaxRecords());
|
||||
kafkaConsumer.assign(partitionList);
|
||||
|
||||
Map<TopicPartition, OffsetAndTimestamp> partitionOffsetAndTimestampMap = new HashMap<>();
|
||||
// 获取指定时间每个分区的offset(按指定开始时间查询消息时)
|
||||
if (OffsetTypeEnum.PRECISE_TIMESTAMP.getResetType() == dto.getFilterOffsetReset()) {
|
||||
Map<TopicPartition, Long> timestampsToSearch = new HashMap<>();
|
||||
partitionList.forEach(topicPartition -> timestampsToSearch.put(topicPartition, dto.getStartTimestampUnitMs()));
|
||||
partitionOffsetAndTimestampMap = kafkaConsumer.offsetsForTimes(timestampsToSearch);
|
||||
}
|
||||
|
||||
for (TopicPartition partition : partitionList) {
|
||||
if (OffsetTypeEnum.EARLIEST.getResetType() == dto.getFilterOffsetReset()) {
|
||||
// 重置到最旧
|
||||
kafkaConsumer.seek(partition, beginOffsetsMap.get(partition));
|
||||
} else if (OffsetTypeEnum.PRECISE_TIMESTAMP.getResetType() == dto.getFilterOffsetReset()) {
|
||||
// 重置到指定时间
|
||||
kafkaConsumer.seek(partition, partitionOffsetAndTimestampMap.get(partition).offset());
|
||||
} else if (OffsetTypeEnum.PRECISE_OFFSET.getResetType() == dto.getFilterOffsetReset()) {
|
||||
// 重置到指定位置
|
||||
|
||||
} else {
|
||||
// 默认,重置到最新
|
||||
kafkaConsumer.seek(partition, Math.max(beginOffsetsMap.get(partition), endOffsetsMap.get(partition) - dto.getMaxRecords()));
|
||||
}
|
||||
}
|
||||
|
||||
return maxMessage;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +34,10 @@ import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafk
|
||||
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.ClusterMetricVersionItems.*;
|
||||
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.GroupMetricVersionItems.*;
|
||||
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.TopicMetricVersionItems.*;
|
||||
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect.MirrorMakerMetricVersionItems.*;
|
||||
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect.ConnectClusterMetricVersionItems.*;
|
||||
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect.ConnectorMetricVersionItems.*;
|
||||
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.ZookeeperMetricVersionItems.*;
|
||||
|
||||
@Service
|
||||
public class VersionControlManagerImpl implements VersionControlManager {
|
||||
@@ -48,6 +52,7 @@ public class VersionControlManagerImpl implements VersionControlManager {
|
||||
|
||||
@PostConstruct
|
||||
public void init(){
|
||||
// topic
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_HEALTH_STATE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_FAILED_FETCH_REQ, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_FAILED_PRODUCE_REQ, true));
|
||||
@@ -58,6 +63,7 @@ public class VersionControlManagerImpl implements VersionControlManager {
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_BYTES_REJECTED, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_MESSAGE_IN, true));
|
||||
|
||||
// cluster
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_HEALTH_STATE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_ACTIVE_CONTROLLER_COUNT, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_BYTES_IN, true));
|
||||
@@ -73,11 +79,13 @@ public class VersionControlManagerImpl implements VersionControlManager {
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_GROUP_REBALANCES, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_JOB_RUNNING, true));
|
||||
|
||||
// group
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_OFFSET_CONSUMED, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_LAG, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_STATE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_HEALTH_STATE, true));
|
||||
|
||||
// broker
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_HEALTH_STATE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_CONNECTION_COUNT, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_MESSAGE_IN, true));
|
||||
@@ -91,6 +99,68 @@ public class VersionControlManagerImpl implements VersionControlManager {
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_PARTITIONS_SKEW, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_BYTES_IN, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_BYTES_OUT, true));
|
||||
|
||||
// zookeeper
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_ZOOKEEPER.getCode(), ZOOKEEPER_METRIC_HEALTH_STATE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_ZOOKEEPER.getCode(), ZOOKEEPER_METRIC_HEALTH_CHECK_PASSED, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_ZOOKEEPER.getCode(), ZOOKEEPER_METRIC_HEALTH_CHECK_TOTAL, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_ZOOKEEPER.getCode(), ZOOKEEPER_METRIC_MAX_REQUEST_LATENCY, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_ZOOKEEPER.getCode(), ZOOKEEPER_METRIC_OUTSTANDING_REQUESTS, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_ZOOKEEPER.getCode(), ZOOKEEPER_METRIC_NODE_COUNT, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_ZOOKEEPER.getCode(), ZOOKEEPER_METRIC_WATCH_COUNT, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_ZOOKEEPER.getCode(), ZOOKEEPER_METRIC_NUM_ALIVE_CONNECTIONS, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_ZOOKEEPER.getCode(), ZOOKEEPER_METRIC_PACKETS_RECEIVED, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_ZOOKEEPER.getCode(), ZOOKEEPER_METRIC_PACKETS_SENT, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_ZOOKEEPER.getCode(), ZOOKEEPER_METRIC_EPHEMERALS_COUNT, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_ZOOKEEPER.getCode(), ZOOKEEPER_METRIC_APPROXIMATE_DATA_SIZE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_ZOOKEEPER.getCode(), ZOOKEEPER_METRIC_OPEN_FILE_DESCRIPTOR_COUNT, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_ZOOKEEPER.getCode(), ZOOKEEPER_METRIC_KAFKA_ZK_DISCONNECTS_PER_SEC, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_ZOOKEEPER.getCode(), ZOOKEEPER_METRIC_KAFKA_ZK_SYNC_CONNECTS_PER_SEC, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_ZOOKEEPER.getCode(), ZOOKEEPER_METRIC_KAFKA_ZK_REQUEST_LATENCY_99TH, true));
|
||||
|
||||
// mm2
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_MIRROR_MAKER.getCode(), MIRROR_MAKER_METRIC_BYTE_COUNT, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_MIRROR_MAKER.getCode(), MIRROR_MAKER_METRIC_BYTE_RATE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_MIRROR_MAKER.getCode(), MIRROR_MAKER_METRIC_RECORD_AGE_MS_MAX, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_MIRROR_MAKER.getCode(), MIRROR_MAKER_METRIC_RECORD_COUNT, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_MIRROR_MAKER.getCode(), MIRROR_MAKER_METRIC_RECORD_RATE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_MIRROR_MAKER.getCode(), MIRROR_MAKER_METRIC_REPLICATION_LATENCY_MS_MAX, true));
|
||||
|
||||
// Connect Cluster
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CLUSTER.getCode(), CONNECT_CLUSTER_METRIC_CONNECTOR_COUNT, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CLUSTER.getCode(), CONNECT_CLUSTER_METRIC_TASK_COUNT, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CLUSTER.getCode(), CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_ATTEMPTS_TOTAL, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CLUSTER.getCode(), CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_FAILURE_PERCENTAGE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CLUSTER.getCode(), CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_FAILURE_TOTAL, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CLUSTER.getCode(), CONNECT_CLUSTER_METRIC_TASK_STARTUP_ATTEMPTS_TOTAL, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CLUSTER.getCode(), CONNECT_CLUSTER_METRIC_TASK_STARTUP_FAILURE_PERCENTAGE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CLUSTER.getCode(), CONNECT_CLUSTER_METRIC_TASK_STARTUP_FAILURE_TOTAL, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CLUSTER.getCode(), CONNECT_CLUSTER_METRIC_COLLECT_COST_TIME, true));
|
||||
|
||||
|
||||
// Connect Connector
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_HEALTH_STATE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_HEALTH_CHECK_PASSED, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_HEALTH_CHECK_TOTAL, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_COLLECT_COST_TIME, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_CONNECTOR_TOTAL_TASK_COUNT, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_CONNECTOR_RUNNING_TASK_COUNT, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_CONNECTOR_FAILED_TASK_COUNT, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SOURCE_RECORD_ACTIVE_COUNT, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SOURCE_RECORD_POLL_TOTAL, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SOURCE_RECORD_WRITE_TOTAL, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SINK_RECORD_ACTIVE_COUNT, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SINK_RECORD_READ_TOTAL, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SINK_RECORD_SEND_TOTAL, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_DEADLETTERQUEUE_PRODUCE_FAILURES, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_DEADLETTERQUEUE_PRODUCE_REQUESTS, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_TOTAL_ERRORS_LOGGED, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SOURCE_RECORD_POLL_RATE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SOURCE_RECORD_WRITE_RATE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SINK_RECORD_READ_RATE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SINK_RECORD_SEND_RATE, true));
|
||||
|
||||
|
||||
}
|
||||
|
||||
@Autowired
|
||||
|
||||
@@ -5,13 +5,13 @@
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.xiaojukeji.kafka</groupId>
|
||||
<artifactId>km-collector</artifactId>
|
||||
<version>${km.revision}</version>
|
||||
<version>${revision}</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<parent>
|
||||
<artifactId>km</artifactId>
|
||||
<groupId>com.xiaojukeji.kafka</groupId>
|
||||
<version>${km.revision}</version>
|
||||
<version>${revision}</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
|
||||
@@ -44,7 +44,7 @@ public class ConnectConnectorMetricCollector extends AbstractConnectMetricCollec
|
||||
Long connectClusterId = connectCluster.getId();
|
||||
|
||||
List<VersionControlItem> items = versionControlService.listVersionControlItem(this.getClusterVersion(connectCluster), collectorType().getCode());
|
||||
Result<List<String>> connectorList = connectorService.listConnectorsFromCluster(connectClusterId);
|
||||
Result<List<String>> connectorList = connectorService.listConnectorsFromCluster(connectCluster);
|
||||
|
||||
FutureWaitUtil<Void> future = this.getFutureUtilByClusterPhyId(connectClusterId);
|
||||
|
||||
@@ -82,6 +82,11 @@ public class ConnectConnectorMetricCollector extends AbstractConnectMetricCollec
|
||||
|
||||
for (VersionControlItem v : items) {
|
||||
try {
|
||||
//过滤已测得指标
|
||||
if (metrics.getMetrics().get(v.getName()) != null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Result<ConnectorMetrics> ret = connectorMetricService.collectConnectClusterMetricsFromKafka(connectClusterId, connectorName, v.getName(), connectorType);
|
||||
if (null == ret || ret.failed() || null == ret.getData()) {
|
||||
continue;
|
||||
|
||||
@@ -0,0 +1,117 @@
|
||||
package com.xiaojukeji.know.streaming.km.collector.metric.connect.mm2;
|
||||
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
import com.xiaojukeji.know.streaming.km.collector.metric.connect.AbstractConnectMetricCollector;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.mm2.MirrorMakerTopic;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.mm2.MirrorMakerMetrics;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionControlItem;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.event.metric.mm2.MirrorMakerMetricEvent;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.FutureWaitUtil;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.connect.mm2.MirrorMakerMetricService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.connect.mm2.MirrorMakerService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.version.VersionControlService;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant.MIRROR_MAKER_SOURCE_CONNECTOR_TYPE;
|
||||
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.METRIC_CONNECT_MIRROR_MAKER;
|
||||
|
||||
/**
|
||||
* @author wyb
|
||||
* @date 2022/12/15
|
||||
*/
|
||||
@Component
|
||||
public class MirrorMakerMetricCollector extends AbstractConnectMetricCollector<MirrorMakerMetrics> {
|
||||
protected static final ILog LOGGER = LogFactory.getLog(MirrorMakerMetricCollector.class);
|
||||
|
||||
@Autowired
|
||||
private VersionControlService versionControlService;
|
||||
|
||||
@Autowired
|
||||
private MirrorMakerService mirrorMakerService;
|
||||
|
||||
@Autowired
|
||||
private ConnectorService connectorService;
|
||||
|
||||
@Autowired
|
||||
private MirrorMakerMetricService mirrorMakerMetricService;
|
||||
|
||||
@Override
|
||||
public VersionItemTypeEnum collectorType() {
|
||||
return METRIC_CONNECT_MIRROR_MAKER;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<MirrorMakerMetrics> collectConnectMetrics(ConnectCluster connectCluster) {
|
||||
Long clusterPhyId = connectCluster.getKafkaClusterPhyId();
|
||||
Long connectClusterId = connectCluster.getId();
|
||||
|
||||
List<ConnectorPO> mirrorMakerList = connectorService.listByConnectClusterIdFromDB(connectClusterId).stream().filter(elem -> elem.getConnectorClassName().equals(MIRROR_MAKER_SOURCE_CONNECTOR_TYPE)).collect(Collectors.toList());
|
||||
Map<String, MirrorMakerTopic> mirrorMakerTopicMap = mirrorMakerService.getMirrorMakerTopicMap(connectClusterId).getData();
|
||||
|
||||
List<VersionControlItem> items = versionControlService.listVersionControlItem(this.getClusterVersion(connectCluster), collectorType().getCode());
|
||||
FutureWaitUtil<Void> future = this.getFutureUtilByClusterPhyId(clusterPhyId);
|
||||
|
||||
List<MirrorMakerMetrics> metricsList = new ArrayList<>();
|
||||
|
||||
for (ConnectorPO mirrorMaker : mirrorMakerList) {
|
||||
MirrorMakerMetrics metrics = new MirrorMakerMetrics(clusterPhyId, connectClusterId, mirrorMaker.getConnectorName());
|
||||
metricsList.add(metrics);
|
||||
|
||||
List<MirrorMakerTopic> mirrorMakerTopicList = mirrorMakerService.getMirrorMakerTopicList(mirrorMaker, mirrorMakerTopicMap);
|
||||
future.runnableTask(String.format("class=MirrorMakerMetricCollector||connectClusterId=%d||mirrorMakerName=%s", connectClusterId, mirrorMaker.getConnectorName()),
|
||||
30000,
|
||||
() -> collectMetrics(connectClusterId, mirrorMaker.getConnectorName(), metrics, items, mirrorMakerTopicList));
|
||||
}
|
||||
future.waitResult(30000);
|
||||
|
||||
this.publishMetric(new MirrorMakerMetricEvent(this,metricsList));
|
||||
|
||||
return metricsList;
|
||||
}
|
||||
|
||||
/**************************************************** private method ****************************************************/
|
||||
private void collectMetrics(Long connectClusterId, String mirrorMakerName, MirrorMakerMetrics metrics, List<VersionControlItem> items, List<MirrorMakerTopic> mirrorMakerTopicList) {
|
||||
long startTime = System.currentTimeMillis();
|
||||
metrics.putMetric(Constant.COLLECT_METRICS_COST_TIME_METRICS_NAME, Constant.COLLECT_METRICS_ERROR_COST_TIME);
|
||||
|
||||
for (VersionControlItem v : items) {
|
||||
try {
|
||||
//已测量指标过滤
|
||||
if (metrics.getMetrics().get(v.getName()) != null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Result<MirrorMakerMetrics> ret = mirrorMakerMetricService.collectMirrorMakerMetricsFromKafka(connectClusterId, mirrorMakerName, mirrorMakerTopicList, v.getName());
|
||||
if (ret == null || !ret.hasData()) {
|
||||
continue;
|
||||
}
|
||||
metrics.putMetric(ret.getData().getMetrics());
|
||||
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(
|
||||
"method=collectMetrics||connectClusterId={}||mirrorMakerName={}||metric={}||errMsg=exception!",
|
||||
connectClusterId, mirrorMakerName, v.getName(), e
|
||||
);
|
||||
|
||||
}
|
||||
}
|
||||
metrics.putMetric(Constant.COLLECT_METRICS_COST_TIME_METRICS_NAME, (System.currentTimeMillis() - startTime) / 1000.0f);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
package com.xiaojukeji.know.streaming.km.collector.metric.kafka;
|
||||
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ReplicationMetrics;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.partition.Partition;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionControlItem;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.event.metric.ReplicaMetricEvent;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.FutureWaitUtil;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.replica.ReplicaMetricService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.version.VersionControlService;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.METRIC_REPLICATION;
|
||||
|
||||
/**
|
||||
* @author didi
|
||||
*/
|
||||
@Component
|
||||
public class ReplicaMetricCollector extends AbstractKafkaMetricCollector<ReplicationMetrics> {
|
||||
protected static final ILog LOGGER = LogFactory.getLog(ReplicaMetricCollector.class);
|
||||
|
||||
@Autowired
|
||||
private VersionControlService versionControlService;
|
||||
|
||||
@Autowired
|
||||
private ReplicaMetricService replicaMetricService;
|
||||
|
||||
@Autowired
|
||||
private PartitionService partitionService;
|
||||
|
||||
@Override
|
||||
public List<ReplicationMetrics> collectKafkaMetrics(ClusterPhy clusterPhy) {
|
||||
Long clusterPhyId = clusterPhy.getId();
|
||||
List<Partition> partitions = partitionService.listPartitionFromCacheFirst(clusterPhyId);
|
||||
List<VersionControlItem> items = versionControlService.listVersionControlItem(this.getClusterVersion(clusterPhy), collectorType().getCode());
|
||||
|
||||
FutureWaitUtil<Void> future = this.getFutureUtilByClusterPhyId(clusterPhyId);
|
||||
|
||||
List<ReplicationMetrics> metricsList = new ArrayList<>();
|
||||
for(Partition partition : partitions) {
|
||||
for (Integer brokerId: partition.getAssignReplicaList()) {
|
||||
ReplicationMetrics metrics = new ReplicationMetrics(clusterPhyId, partition.getTopicName(), brokerId, partition.getPartitionId());
|
||||
metrics.putMetric(Constant.COLLECT_METRICS_COST_TIME_METRICS_NAME, Constant.COLLECT_METRICS_ERROR_COST_TIME);
|
||||
metricsList.add(metrics);
|
||||
|
||||
future.runnableTask(
|
||||
String.format("class=ReplicaMetricCollector||clusterPhyId=%d||brokerId=%d||topicName=%s||partitionId=%d",
|
||||
clusterPhyId, brokerId, partition.getTopicName(), partition.getPartitionId()),
|
||||
30000,
|
||||
() -> collectMetrics(clusterPhyId, metrics, items)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
future.waitExecute(30000);
|
||||
|
||||
publishMetric(new ReplicaMetricEvent(this, metricsList));
|
||||
|
||||
return metricsList;
|
||||
}
|
||||
|
||||
@Override
|
||||
public VersionItemTypeEnum collectorType() {
|
||||
return METRIC_REPLICATION;
|
||||
}
|
||||
|
||||
/**************************************************** private method ****************************************************/
|
||||
|
||||
private ReplicationMetrics collectMetrics(Long clusterPhyId, ReplicationMetrics metrics, List<VersionControlItem> items) {
|
||||
long startTime = System.currentTimeMillis();
|
||||
|
||||
for(VersionControlItem v : items) {
|
||||
try {
|
||||
if (metrics.getMetrics().containsKey(v.getName())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Result<ReplicationMetrics> ret = replicaMetricService.collectReplicaMetricsFromKafka(
|
||||
clusterPhyId,
|
||||
metrics.getTopic(),
|
||||
metrics.getBrokerId(),
|
||||
metrics.getPartitionId(),
|
||||
v.getName()
|
||||
);
|
||||
|
||||
if (null == ret || ret.failed() || null == ret.getData()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
metrics.putMetric(ret.getData().getMetrics());
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(
|
||||
"method=collectMetrics||clusterPhyId={}||topicName={}||partition={}||metricName={}||errMsg=exception!",
|
||||
clusterPhyId, metrics.getTopic(), metrics.getPartitionId(), v.getName(), e
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// 记录采集性能
|
||||
metrics.putMetric(Constant.COLLECT_METRICS_COST_TIME_METRICS_NAME, (System.currentTimeMillis() - startTime) / 1000.0f);
|
||||
|
||||
return metrics;
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
package com.xiaojukeji.know.streaming.km.collector.sink.kafka;
|
||||
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
import com.xiaojukeji.know.streaming.km.collector.sink.AbstractMetricESSender;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.event.metric.ReplicaMetricEvent;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.ReplicationMetricPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
import org.springframework.context.ApplicationListener;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
|
||||
import static com.xiaojukeji.know.streaming.km.persistence.es.template.TemplateConstant.REPLICATION_INDEX;
|
||||
|
||||
@Component
|
||||
public class ReplicaMetricESSender extends AbstractMetricESSender implements ApplicationListener<ReplicaMetricEvent> {
|
||||
private static final ILog LOGGER = LogFactory.getLog(ReplicaMetricESSender.class);
|
||||
|
||||
@PostConstruct
|
||||
public void init(){
|
||||
LOGGER.info("method=init||msg=init finished");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onApplicationEvent(ReplicaMetricEvent event) {
|
||||
send2es(REPLICATION_INDEX, ConvertUtil.list2List(event.getReplicationMetrics(), ReplicationMetricPO.class));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
package com.xiaojukeji.know.streaming.km.collector.sink.mm2;
|
||||
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
import com.xiaojukeji.know.streaming.km.collector.sink.AbstractMetricESSender;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.event.metric.mm2.MirrorMakerMetricEvent;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.mm2.MirrorMakerMetricPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
import org.springframework.context.ApplicationListener;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
|
||||
import static com.xiaojukeji.know.streaming.km.persistence.es.template.TemplateConstant.CONNECT_MM2_INDEX;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 2022/12/20
|
||||
*/
|
||||
@Component
|
||||
public class MirrorMakerMetricESSender extends AbstractMetricESSender implements ApplicationListener<MirrorMakerMetricEvent> {
|
||||
protected static final ILog LOGGER = LogFactory.getLog(MirrorMakerMetricESSender.class);
|
||||
|
||||
@PostConstruct
|
||||
public void init(){
|
||||
LOGGER.info("method=init||msg=init finished");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onApplicationEvent(MirrorMakerMetricEvent event) {
|
||||
send2es(CONNECT_MM2_INDEX, ConvertUtil.list2List(event.getMetricsList(), MirrorMakerMetricPO.class));
|
||||
}
|
||||
}
|
||||
@@ -5,13 +5,13 @@
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.xiaojukeji.kafka</groupId>
|
||||
<artifactId>km-common</artifactId>
|
||||
<version>${km.revision}</version>
|
||||
<version>${revision}</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<parent>
|
||||
<artifactId>km</artifactId>
|
||||
<groupId>com.xiaojukeji.kafka</groupId>
|
||||
<version>${km.revision}</version>
|
||||
<version>${revision}</version>
|
||||
</parent>
|
||||
|
||||
<properties>
|
||||
@@ -81,10 +81,6 @@
|
||||
<version>3.0.2</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.projectlombok</groupId>
|
||||
<artifactId>lombok</artifactId>
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.dto.cluster;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationMulFuzzySearchDTO;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 22/02/24
|
||||
*/
|
||||
@Data
|
||||
public class ClusterGroupsOverviewDTO extends PaginationMulFuzzySearchDTO {
|
||||
@ApiModelProperty("查找该Topic")
|
||||
private String topicName;
|
||||
|
||||
@ApiModelProperty("查找该Group")
|
||||
private String groupName;
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.dto.cluster;
|
||||
|
||||
import lombok.Data;
|
||||
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 22/12/12
|
||||
*/
|
||||
@Data
|
||||
public class ClusterMirrorMakersOverviewDTO extends ClusterConnectorsOverviewDTO {
|
||||
}
|
||||
@@ -19,11 +19,11 @@ import javax.validation.constraints.NotNull;
|
||||
public class ClusterConnectorDTO extends BaseDTO {
|
||||
@NotNull(message = "connectClusterId不允许为空")
|
||||
@ApiModelProperty(value = "Connector集群ID", example = "1")
|
||||
private Long connectClusterId;
|
||||
protected Long connectClusterId;
|
||||
|
||||
@NotBlank(message = "name不允许为空串")
|
||||
@ApiModelProperty(value = "Connector名称", example = "know-streaming-connector")
|
||||
private String connectorName;
|
||||
protected String connectorName;
|
||||
|
||||
public ClusterConnectorDTO(Long connectClusterId, String connectorName) {
|
||||
this.connectClusterId = connectClusterId;
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.dto.connect.connector;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.ClusterConnectorDTO;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import javax.validation.constraints.NotNull;
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 2022-10-17
|
||||
*/
|
||||
@Data
|
||||
@ApiModel(description = "修改Connector配置")
|
||||
public class ConnectorConfigModifyDTO extends ClusterConnectorDTO {
|
||||
@NotNull(message = "configs不允许为空")
|
||||
@ApiModelProperty(value = "配置", example = "")
|
||||
private Properties configs;
|
||||
}
|
||||
@@ -1,11 +1,12 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.dto.connect.connector;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.ClusterConnectorDTO;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
import javax.validation.constraints.NotNull;
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
@@ -13,9 +14,23 @@ import java.util.Properties;
|
||||
* @date 2022-10-17
|
||||
*/
|
||||
@Data
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
@NoArgsConstructor
|
||||
@ApiModel(description = "创建Connector")
|
||||
public class ConnectorCreateDTO extends ClusterConnectorDTO {
|
||||
@NotNull(message = "configs不允许为空")
|
||||
@Deprecated
|
||||
@ApiModelProperty(value = "配置, 优先使用config字段,3.5.0版本将删除该字段", example = "")
|
||||
protected Properties configs;
|
||||
|
||||
@ApiModelProperty(value = "配置", example = "")
|
||||
private Properties configs;
|
||||
protected Properties config;
|
||||
|
||||
public ConnectorCreateDTO(Long connectClusterId, String connectorName, Properties config) {
|
||||
super(connectClusterId, connectorName);
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
public Properties getSuitableConfig() {
|
||||
return config != null? config: configs;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.dto.connect.mm2;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.connector.ConnectorActionDTO;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import lombok.Data;
|
||||
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 2022-12-12
|
||||
*/
|
||||
@Data
|
||||
@ApiModel(description = "操作MM2")
|
||||
public class MirrorMaker2ActionDTO extends ConnectorActionDTO {
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.dto.connect.mm2;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.connector.ConnectorDeleteDTO;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import lombok.Data;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 2022-12-12
|
||||
*/
|
||||
@Data
|
||||
@ApiModel(description = "删除MM2")
|
||||
public class MirrorMaker2DeleteDTO extends ConnectorDeleteDTO {
|
||||
}
|
||||
@@ -0,0 +1,69 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.dto.connect.mm2;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.connector.ConnectorCreateDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
|
||||
import javax.validation.Valid;
|
||||
import javax.validation.constraints.NotNull;
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 2022-12-12
|
||||
*/
|
||||
@Data
|
||||
@ApiModel(description = "创建MM2")
|
||||
public class MirrorMakerCreateDTO extends ConnectorCreateDTO {
|
||||
@NotNull(message = "sourceKafkaClusterId不允许为空")
|
||||
@ApiModelProperty(value = "源Kafka集群ID", example = "")
|
||||
private Long sourceKafkaClusterId;
|
||||
|
||||
@Valid
|
||||
@ApiModelProperty(value = "heartbeat-connector的信息", example = "")
|
||||
private Properties heartbeatConnectorConfigs;
|
||||
|
||||
@Valid
|
||||
@ApiModelProperty(value = "checkpoint-connector的信息", example = "")
|
||||
private Properties checkpointConnectorConfigs;
|
||||
|
||||
public void unifyData(Long sourceKafkaClusterId, String sourceBootstrapServers, Properties sourceKafkaProps,
|
||||
Long targetKafkaClusterId, String targetBootstrapServers, Properties targetKafkaProps) {
|
||||
if (sourceKafkaProps == null) {
|
||||
sourceKafkaProps = new Properties();
|
||||
}
|
||||
|
||||
if (targetKafkaProps == null) {
|
||||
targetKafkaProps = new Properties();
|
||||
}
|
||||
|
||||
this.unifyData(this.getSuitableConfig(), sourceKafkaClusterId, sourceBootstrapServers, sourceKafkaProps, targetKafkaClusterId, targetBootstrapServers, targetKafkaProps);
|
||||
|
||||
if (heartbeatConnectorConfigs != null) {
|
||||
this.unifyData(this.heartbeatConnectorConfigs, sourceKafkaClusterId, sourceBootstrapServers, sourceKafkaProps, targetKafkaClusterId, targetBootstrapServers, targetKafkaProps);
|
||||
}
|
||||
|
||||
if (checkpointConnectorConfigs != null) {
|
||||
this.unifyData(this.checkpointConnectorConfigs, sourceKafkaClusterId, sourceBootstrapServers, sourceKafkaProps, targetKafkaClusterId, targetBootstrapServers, targetKafkaProps);
|
||||
}
|
||||
}
|
||||
|
||||
private void unifyData(Properties dataConfig,
|
||||
Long sourceKafkaClusterId, String sourceBootstrapServers, Properties sourceKafkaProps,
|
||||
Long targetKafkaClusterId, String targetBootstrapServers, Properties targetKafkaProps) {
|
||||
dataConfig.put(KafkaConnectConstant.MIRROR_MAKER_SOURCE_CLUSTER_ALIAS_FIELD_NAME, sourceKafkaClusterId);
|
||||
dataConfig.put(KafkaConnectConstant.MIRROR_MAKER_SOURCE_CLUSTER_FIELD_NAME + "." + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, sourceBootstrapServers);
|
||||
for (Object configKey: sourceKafkaProps.keySet()) {
|
||||
dataConfig.put(KafkaConnectConstant.MIRROR_MAKER_SOURCE_CLUSTER_FIELD_NAME + "." + configKey, sourceKafkaProps.getProperty((String) configKey));
|
||||
}
|
||||
|
||||
dataConfig.put(KafkaConnectConstant.MIRROR_MAKER_TARGET_CLUSTER_ALIAS_FIELD_NAME, targetKafkaClusterId);
|
||||
dataConfig.put(KafkaConnectConstant.MIRROR_MAKER_TARGET_CLUSTER_FIELD_NAME + "." + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, targetBootstrapServers);
|
||||
for (Object configKey: targetKafkaProps.keySet()) {
|
||||
dataConfig.put(KafkaConnectConstant.MIRROR_MAKER_TARGET_CLUSTER_FIELD_NAME + "." + configKey, targetKafkaProps.getProperty((String) configKey));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.dto.group;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.BaseDTO;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import javax.validation.constraints.Min;
|
||||
import javax.validation.constraints.NotBlank;
|
||||
import javax.validation.constraints.NotNull;
|
||||
|
||||
/**
|
||||
* 删除offset
|
||||
* @author zengqiao
|
||||
* @date 19/4/8
|
||||
*/
|
||||
@Data
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
public class GroupOffsetDeleteDTO extends BaseDTO {
|
||||
@Min(value = 0, message = "clusterPhyId不允许为null或者小于0")
|
||||
@ApiModelProperty(value = "集群ID", example = "6")
|
||||
private Long clusterPhyId;
|
||||
|
||||
@NotBlank(message = "groupName不允许为空")
|
||||
@ApiModelProperty(value = "消费组名称", example = "g-know-streaming")
|
||||
private String groupName;
|
||||
|
||||
@ApiModelProperty(value = "Topic名称,按照Topic纬度进行删除时需要传", example = "know-streaming")
|
||||
protected String topicName;
|
||||
|
||||
@ApiModelProperty(value = "分区ID,按照分区纬度进行删除时需要传")
|
||||
private Integer partitionId;
|
||||
|
||||
/**
|
||||
* @see com.xiaojukeji.know.streaming.km.common.enums.group.DeleteGroupTypeEnum
|
||||
*/
|
||||
@NotNull(message = "deleteType不允许为空")
|
||||
@ApiModelProperty(value = "删除类型", example = "0:group纬度,1:Topic纬度,2:Partition纬度")
|
||||
private Integer deleteType;
|
||||
}
|
||||
@@ -0,0 +1,38 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.dto.ha.mirror;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.BaseDTO;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import javax.validation.constraints.Min;
|
||||
import javax.validation.constraints.NotBlank;
|
||||
import javax.validation.constraints.NotNull;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/4/23
|
||||
*/
|
||||
@Data
|
||||
@ApiModel(description="Topic镜像信息")
|
||||
public class MirrorTopicCreateDTO extends BaseDTO {
|
||||
@Min(value = 0, message = "sourceClusterPhyId不允许为空,且最小值为0")
|
||||
@ApiModelProperty(value = "源集群ID", example = "3")
|
||||
private Long sourceClusterPhyId;
|
||||
|
||||
@Min(value = 0, message = "destClusterPhyId不允许为空,且最小值为0")
|
||||
@ApiModelProperty(value = "目标集群ID", example = "3")
|
||||
private Long destClusterPhyId;
|
||||
|
||||
@NotBlank(message = "topicName不允许为空串")
|
||||
@ApiModelProperty(value = "Topic名称", example = "mirrorTopic")
|
||||
private String topicName;
|
||||
|
||||
@NotNull(message = "syncData不允许为空")
|
||||
@ApiModelProperty(value = "同步数据", example = "true")
|
||||
private Boolean syncData;
|
||||
|
||||
@NotNull(message = "syncConfig不允许为空")
|
||||
@ApiModelProperty(value = "同步配置", example = "false")
|
||||
private Boolean syncConfig;
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.dto.ha.mirror;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.BaseDTO;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import javax.validation.constraints.Min;
|
||||
import javax.validation.constraints.NotBlank;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/4/23
|
||||
*/
|
||||
@Data
|
||||
@ApiModel(description="Topic镜像信息")
|
||||
public class MirrorTopicDeleteDTO extends BaseDTO {
|
||||
@Min(value = 0, message = "sourceClusterPhyId不允许为空,且最小值为0")
|
||||
@ApiModelProperty(value = "源集群ID", example = "3")
|
||||
private Long sourceClusterPhyId;
|
||||
|
||||
@Min(value = 0, message = "destClusterPhyId不允许为空,且最小值为0")
|
||||
@ApiModelProperty(value = "目标集群ID", example = "3")
|
||||
private Long destClusterPhyId;
|
||||
|
||||
@NotBlank(message = "topicName不允许为空串")
|
||||
@ApiModelProperty(value = "Topic名称", example = "mirrorTopic")
|
||||
private String topicName;
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.mm2;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.ClusterConnectorDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.MetricDTO;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author didi
|
||||
*/
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@AllArgsConstructor
|
||||
@ApiModel(description = "MirrorMaker指标查询信息")
|
||||
public class MetricsMirrorMakersDTO extends MetricDTO {
|
||||
@ApiModelProperty("MirrorMaker的SourceConnect列表")
|
||||
private List<ClusterConnectorDTO> connectorNameList;
|
||||
}
|
||||
@@ -4,6 +4,8 @@ package com.xiaojukeji.know.streaming.km.common.bean.entity.broker;
|
||||
import com.alibaba.fastjson.TypeReference;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.common.IpPortData;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.broker.BrokerPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.jmx.JmxEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
@@ -71,10 +73,10 @@ public class Broker implements Serializable {
|
||||
metadata.setBrokerId(node.id());
|
||||
metadata.setHost(node.host());
|
||||
metadata.setPort(node.port());
|
||||
metadata.setJmxPort(-1);
|
||||
metadata.setJmxPort(JmxEnum.UNKNOWN.getPort());
|
||||
metadata.setStartTimestamp(startTimestamp);
|
||||
metadata.setRack(node.rack());
|
||||
metadata.setStatus(1);
|
||||
metadata.setStatus(Constant.ALIVE);
|
||||
return metadata;
|
||||
}
|
||||
|
||||
|
||||
@@ -18,5 +18,7 @@ public class ClusterPhysState {
|
||||
|
||||
private Integer downCount;
|
||||
|
||||
private Integer unknownCount;
|
||||
|
||||
private Integer total;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,29 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.config;
|
||||
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 23/05/19
|
||||
*/
|
||||
@Data
|
||||
@ApiModel(description = "Jmx配置")
|
||||
public class JmxAuthConfig implements Serializable {
|
||||
@ApiModelProperty(value="最大连接", example = "100")
|
||||
protected Integer maxConn;
|
||||
|
||||
@ApiModelProperty(value="是否开启SSL,如果开始则username 与 token 必须非空", example = "false")
|
||||
protected Boolean openSSL;
|
||||
|
||||
@ApiModelProperty(value="SSL情况下的username", example = "Ks-Km")
|
||||
protected String username;
|
||||
|
||||
@ApiModelProperty(value="SSL情况下的token", example = "KsKmCCY19")
|
||||
protected String token;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.config;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.jmx.ServerIdJmxPort;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.jmx.JmxEnum;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
@@ -12,24 +14,69 @@ import java.io.Serializable;
|
||||
*/
|
||||
@Data
|
||||
@ApiModel(description = "Jmx配置")
|
||||
public class JmxConfig implements Serializable {
|
||||
@ApiModelProperty(value="jmx端口", example = "8099")
|
||||
public class JmxConfig extends JmxAuthConfig {
|
||||
@ApiModelProperty(value="jmx端口,最低优先使用的端口", example = "8099")
|
||||
private Integer jmxPort;
|
||||
|
||||
@ApiModelProperty(value="最大连接", example = "100")
|
||||
private Integer maxConn;
|
||||
|
||||
@ApiModelProperty(value="是否开启SSL,如果开始则username 与 token 必须非空", example = "false")
|
||||
private Boolean openSSL;
|
||||
|
||||
@ApiModelProperty(value="SSL情况下的username", example = "Ks-Km")
|
||||
private String username;
|
||||
|
||||
@ApiModelProperty(value="SSL情况下的token", example = "KsKmCCY19")
|
||||
private String token;
|
||||
|
||||
@ApiModelProperty(value="使用哪个endpoint网络", example = "EXTERNAL")
|
||||
private String useWhichEndpoint;
|
||||
|
||||
@ApiModelProperty(value="指定server的JMX端口, 最高优先使用的端口", example = "")
|
||||
private List<ServerIdJmxPort> specifiedJmxPortList;
|
||||
|
||||
/**
|
||||
* 选取最终的jmx端口
|
||||
* @param serverId 服务ID
|
||||
* @param metadataJmxPort ks从元信息中获取到的jmx端口
|
||||
*/
|
||||
public Integer getFinallyJmxPort(String serverId, Integer metadataJmxPort) {
|
||||
if (specifiedJmxPortList == null || specifiedJmxPortList.isEmpty()) {
|
||||
// 未进行特殊指定时,zkJMX端口存在则优先使用zkJmxPort,否则使用配置的jmxPort
|
||||
return this.selectJmxPort(jmxPort, metadataJmxPort);
|
||||
}
|
||||
|
||||
// 进行特殊配置时
|
||||
for (ServerIdJmxPort serverIdJmxPort: specifiedJmxPortList) {
|
||||
if (serverId.equals(serverIdJmxPort.getServerId()) && serverIdJmxPort.getJmxPort() != null) {
|
||||
// 当前server有指定具体的jmx端口时,则使用具体的端口
|
||||
return serverIdJmxPort.getJmxPort();
|
||||
}
|
||||
}
|
||||
|
||||
return this.selectJmxPort(jmxPort, metadataJmxPort);
|
||||
}
|
||||
|
||||
/**
|
||||
* 选取最终的jmx端口
|
||||
* @param serverId serverId
|
||||
*/
|
||||
public Integer getFinallyJmxPort(String serverId) {
|
||||
return this.getFinallyJmxPort(serverId, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* 选取jmx端口
|
||||
* @param feJmxPort 前端页面配置的jmx端口
|
||||
* @param metadataJmxPort ks从元信息中获取到的jmx端口
|
||||
*/
|
||||
private Integer selectJmxPort(Integer feJmxPort, Integer metadataJmxPort) {
|
||||
if (metadataJmxPort == null) {
|
||||
return feJmxPort != null? feJmxPort: JmxEnum.NOT_OPEN.getPort();
|
||||
}
|
||||
|
||||
if (JmxEnum.NOT_OPEN.getPort().equals(metadataJmxPort)) {
|
||||
// 如果元信息提示未开启,则直接返回未开启
|
||||
return JmxEnum.NOT_OPEN.getPort();
|
||||
}
|
||||
|
||||
if (JmxEnum.UNKNOWN.getPort().equals(metadataJmxPort)) {
|
||||
// 如果元信息提示未知,则直接返回feJmxPort 或者 未开启
|
||||
return feJmxPort != null? feJmxPort: JmxEnum.NOT_OPEN.getPort();
|
||||
}
|
||||
|
||||
// 其他情况,返回 metadataJmxPort
|
||||
return metadataJmxPort;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -13,9 +13,6 @@ import java.util.Properties;
|
||||
*/
|
||||
@ApiModel(description = "ZK配置")
|
||||
public class ZKConfig implements Serializable {
|
||||
@ApiModelProperty(value="ZK的jmx配置")
|
||||
private JmxConfig jmxConfig;
|
||||
|
||||
@ApiModelProperty(value="ZK是否开启secure", example = "false")
|
||||
private Boolean openSecure = false;
|
||||
|
||||
@@ -28,14 +25,6 @@ public class ZKConfig implements Serializable {
|
||||
@ApiModelProperty(value="ZK的Request超时时间")
|
||||
private Properties otherProps = new Properties();
|
||||
|
||||
public JmxConfig getJmxConfig() {
|
||||
return jmxConfig == null? new JmxConfig(): jmxConfig;
|
||||
}
|
||||
|
||||
public void setJmxConfig(JmxConfig jmxConfig) {
|
||||
this.jmxConfig = jmxConfig;
|
||||
}
|
||||
|
||||
public Boolean getOpenSecure() {
|
||||
return openSecure != null && openSecure;
|
||||
}
|
||||
@@ -53,7 +42,7 @@ public class ZKConfig implements Serializable {
|
||||
}
|
||||
|
||||
public Integer getRequestTimeoutUnitMs() {
|
||||
return requestTimeoutUnitMs == null? Constant.DEFAULT_REQUEST_TIMEOUT_UNIT_MS: requestTimeoutUnitMs;
|
||||
return requestTimeoutUnitMs == null? Constant.DEFAULT_SESSION_TIMEOUT_UNIT_MS: requestTimeoutUnitMs;
|
||||
}
|
||||
|
||||
public void setRequestTimeoutUnitMs(Integer requestTimeoutUnitMs) {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.connect;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.EntityIdInterface;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
|
||||
import lombok.Data;
|
||||
|
||||
import java.io.Serializable;
|
||||
@@ -54,6 +55,22 @@ public class ConnectCluster implements Serializable, Comparable<ConnectCluster>,
|
||||
*/
|
||||
private String clusterUrl;
|
||||
|
||||
public String getSuitableRequestUrl() {
|
||||
// 优先使用用户填写的url
|
||||
String suitableRequestUrl = this.clusterUrl;
|
||||
if (ValidateUtils.isBlank(suitableRequestUrl)) {
|
||||
// 用户如果没有填写,则使用元信息中的url
|
||||
suitableRequestUrl = this.memberLeaderUrl;
|
||||
}
|
||||
|
||||
//url去斜杠
|
||||
if (suitableRequestUrl.length() > 0 && suitableRequestUrl.charAt(suitableRequestUrl.length() - 1) == '/') {
|
||||
return suitableRequestUrl.substring(0, suitableRequestUrl.length() - 1);
|
||||
}
|
||||
|
||||
return suitableRequestUrl;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(ConnectCluster connectCluster) {
|
||||
return this.id.compareTo(connectCluster.getId());
|
||||
|
||||
@@ -7,7 +7,6 @@ import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.net.URI;
|
||||
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
|
||||
@@ -45,4 +45,14 @@ public class KSConnector implements Serializable {
|
||||
* 状态
|
||||
*/
|
||||
private String state;
|
||||
|
||||
/**
|
||||
* 心跳检测connector名称
|
||||
*/
|
||||
private String heartbeatConnectorName;
|
||||
|
||||
/**
|
||||
* 进度确认connector名称
|
||||
*/
|
||||
private String checkpointConnectorName;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.connect.mm2;
|
||||
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* @author wyb
|
||||
* @date 2022/12/14
|
||||
*/
|
||||
@Data
|
||||
@AllArgsConstructor
|
||||
@NoArgsConstructor
|
||||
public class MirrorMakerTopic {
|
||||
|
||||
/**
|
||||
* mm2集群别名
|
||||
*/
|
||||
private String clusterAlias;
|
||||
|
||||
/**
|
||||
* topic名称
|
||||
*/
|
||||
private String topicName;
|
||||
|
||||
/**
|
||||
* partition在connect上的分布 Map<PartitionId,WorkerId>
|
||||
*/
|
||||
private Map<Integer,String> partitionMap;
|
||||
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.ha;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.BasePO;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.ha.HaResTypeEnum;
|
||||
import lombok.Data;
|
||||
|
||||
@Data
|
||||
public class HaActiveStandbyRelation extends BasePO {
|
||||
private Long activeClusterPhyId;
|
||||
|
||||
private Long standbyClusterPhyId;
|
||||
|
||||
/**
|
||||
* 资源名称
|
||||
*/
|
||||
private String resName;
|
||||
|
||||
/**
|
||||
* 资源类型,0:集群,1:镜像Topic,2:主备Topic
|
||||
* @see HaResTypeEnum
|
||||
*/
|
||||
private Integer resType;
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.jmx;
|
||||
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* @author didi
|
||||
*/
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@AllArgsConstructor
|
||||
public class ServerIdJmxPort implements Serializable {
|
||||
/**
|
||||
* serverID
|
||||
*/
|
||||
private String serverId;
|
||||
|
||||
/**
|
||||
* JMX端口
|
||||
*/
|
||||
private Integer jmxPort;
|
||||
}
|
||||
@@ -27,6 +27,10 @@ public abstract class BaseMetrics implements Serializable {
|
||||
protected Map<String, Float> metrics = new ConcurrentHashMap<>();
|
||||
|
||||
public void putMetric(String key, Float value){
|
||||
if (value == null || key == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
metrics.put(key, value);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BaseMetrics;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
import lombok.ToString;
|
||||
@@ -12,20 +11,18 @@ import lombok.ToString;
|
||||
*/
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@AllArgsConstructor
|
||||
@ToString
|
||||
public class ConnectClusterMetrics extends BaseMetrics {
|
||||
private Long connectClusterId;
|
||||
protected Long connectClusterId;
|
||||
|
||||
public ConnectClusterMetrics(Long clusterPhyId, Long connectClusterId){
|
||||
public ConnectClusterMetrics(Long clusterPhyId, Long connectClusterId ){
|
||||
super(clusterPhyId);
|
||||
this.connectClusterId = connectClusterId;
|
||||
}
|
||||
|
||||
public static ConnectClusterMetrics initWithMetric(Long connectClusterId, String metric, Float value) {
|
||||
ConnectClusterMetrics brokerMetrics = new ConnectClusterMetrics(connectClusterId, connectClusterId);
|
||||
brokerMetrics.putMetric(metric, value);
|
||||
return brokerMetrics;
|
||||
public ConnectClusterMetrics(Long connectClusterId, String metricName, Float metricValue) {
|
||||
this(null, connectClusterId);
|
||||
this.putMetric(metricName, metricValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BaseMetrics;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
import lombok.ToString;
|
||||
@@ -11,25 +9,19 @@ import lombok.ToString;
|
||||
* @date 2022/11/2
|
||||
*/
|
||||
@Data
|
||||
@AllArgsConstructor
|
||||
@NoArgsConstructor
|
||||
@ToString
|
||||
public class ConnectWorkerMetrics extends BaseMetrics {
|
||||
|
||||
private Long connectClusterId;
|
||||
|
||||
public class ConnectWorkerMetrics extends ConnectClusterMetrics {
|
||||
private String workerId;
|
||||
|
||||
public static ConnectWorkerMetrics initWithMetric(Long connectClusterId, String workerId, String metric, Float value) {
|
||||
ConnectWorkerMetrics connectWorkerMetrics = new ConnectWorkerMetrics();
|
||||
connectWorkerMetrics.setConnectClusterId(connectClusterId);
|
||||
connectWorkerMetrics.setWorkerId(workerId);
|
||||
connectWorkerMetrics.putMetric(metric, value);
|
||||
return connectWorkerMetrics;
|
||||
public ConnectWorkerMetrics(Long connectClusterId, String workerId, String metricName, Float metricValue) {
|
||||
super(null, connectClusterId);
|
||||
this.workerId = workerId;
|
||||
this.putMetric(metricName, metricValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String unique() {
|
||||
return "KCC@" + clusterPhyId + "@" + connectClusterId + "@" + workerId;
|
||||
return "KCW@" + clusterPhyId + "@" + connectClusterId + "@" + workerId;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BaseMetrics;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
import lombok.ToString;
|
||||
@@ -12,24 +11,21 @@ import lombok.ToString;
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@ToString
|
||||
public class ConnectorMetrics extends BaseMetrics {
|
||||
private Long connectClusterId;
|
||||
public class ConnectorMetrics extends ConnectClusterMetrics {
|
||||
protected String connectorName;
|
||||
|
||||
private String connectorName;
|
||||
|
||||
private String connectorNameAndClusterId;
|
||||
protected String connectorNameAndClusterId;
|
||||
|
||||
public ConnectorMetrics(Long connectClusterId, String connectorName) {
|
||||
super(null);
|
||||
super(null, connectClusterId);
|
||||
this.connectClusterId = connectClusterId;
|
||||
this.connectorName = connectorName;
|
||||
this.connectorNameAndClusterId = connectorName + "#" + connectClusterId;
|
||||
}
|
||||
|
||||
public static ConnectorMetrics initWithMetric(Long connectClusterId, String connectorName, String metricName, Float value) {
|
||||
ConnectorMetrics metrics = new ConnectorMetrics(connectClusterId, connectorName);
|
||||
metrics.putMetric(metricName, value);
|
||||
return metrics;
|
||||
public ConnectorMetrics(Long connectClusterId, String connectorName, String metricName, Float metricValue) {
|
||||
this(connectClusterId, connectorName);
|
||||
this.putMetric(metricName, metricValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BaseMetrics;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
import lombok.ToString;
|
||||
@@ -13,11 +11,7 @@ import lombok.ToString;
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@ToString
|
||||
public class ConnectorTaskMetrics extends BaseMetrics {
|
||||
private Long connectClusterId;
|
||||
|
||||
private String connectorName;
|
||||
|
||||
public class ConnectorTaskMetrics extends ConnectorMetrics {
|
||||
private Integer taskId;
|
||||
|
||||
public ConnectorTaskMetrics(Long connectClusterId, String connectorName, Integer taskId) {
|
||||
@@ -26,14 +20,13 @@ public class ConnectorTaskMetrics extends BaseMetrics {
|
||||
this.taskId = taskId;
|
||||
}
|
||||
|
||||
public static ConnectorTaskMetrics initWithMetric(Long connectClusterId, String connectorName, Integer taskId, String metricName, Float value) {
|
||||
ConnectorTaskMetrics metrics = new ConnectorTaskMetrics(connectClusterId, connectorName, taskId);
|
||||
metrics.putMetric(metricName,value);
|
||||
return metrics;
|
||||
public ConnectorTaskMetrics(Long connectClusterId, String connectorName, Integer taskId, String metricName, Float metricValue) {
|
||||
this(connectClusterId, connectorName, taskId);
|
||||
this.putMetric(metricName, metricValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String unique() {
|
||||
return "KCOR@" + connectClusterId + "@" + connectorName + "@" + taskId;
|
||||
return "KCORT@" + connectClusterId + "@" + connectorName + "@" + taskId;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,46 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.mm2;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BaseMetrics;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
import lombok.ToString;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/6/17
|
||||
*/
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@ToString
|
||||
public class MirrorMakerMetrics extends BaseMetrics {
|
||||
private Long connectClusterId;
|
||||
|
||||
private String connectorName;
|
||||
|
||||
private String connectorNameAndClusterId;
|
||||
|
||||
public MirrorMakerMetrics(Long connectClusterId, String connectorName) {
|
||||
super(null);
|
||||
this.connectClusterId = connectClusterId;
|
||||
this.connectorName = connectorName;
|
||||
this.connectorNameAndClusterId = connectorName + "#" + connectClusterId;
|
||||
}
|
||||
|
||||
public MirrorMakerMetrics(Long clusterPhyId, Long connectClusterId, String connectorName) {
|
||||
super(clusterPhyId);
|
||||
this.connectClusterId = connectClusterId;
|
||||
this.connectorName = connectorName;
|
||||
this.connectorNameAndClusterId = connectorName + "#" + connectClusterId;
|
||||
}
|
||||
|
||||
public static MirrorMakerMetrics initWithMetric(Long connectClusterId, String connectorName, String metricName, Float value) {
|
||||
MirrorMakerMetrics metrics = new MirrorMakerMetrics(connectClusterId, connectorName);
|
||||
metrics.putMetric(metricName, value);
|
||||
return metrics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String unique() {
|
||||
return "KCOR@" + connectClusterId + "@" + connectorName;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,38 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.mm2;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BaseMetrics;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
/**
|
||||
* @author wyb
|
||||
* @date 2022/12/16
|
||||
*/
|
||||
@Data
|
||||
@AllArgsConstructor
|
||||
@NoArgsConstructor
|
||||
public class MirrorMakerTopicPartitionMetrics extends BaseMetrics {
|
||||
private Long connectClusterId;
|
||||
|
||||
private String mirrorMakerName;
|
||||
|
||||
private String clusterAlias;
|
||||
|
||||
private String topicName;
|
||||
|
||||
private Integer partitionId;
|
||||
|
||||
private String workerId;
|
||||
|
||||
@Override
|
||||
public String unique() {
|
||||
return "KCOR@" + connectClusterId + "@" + mirrorMakerName + "@" + clusterAlias + "@" + workerId + "@" + topicName + "@" + partitionId;
|
||||
}
|
||||
|
||||
public static MirrorMakerTopicPartitionMetrics initWithMetric(Long connectClusterId, String mirrorMakerName, String clusterAlias, String topicName, Integer partitionId, String workerId, String metricName, Float value) {
|
||||
MirrorMakerTopicPartitionMetrics metrics = new MirrorMakerTopicPartitionMetrics(connectClusterId, mirrorMakerName, clusterAlias, topicName, partitionId, workerId);
|
||||
metrics.putMetric(metricName, value);
|
||||
return metrics;
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,5 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.param.connect;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ConnectClusterParam;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
@@ -18,9 +16,12 @@ public class ConnectorParam extends ConnectClusterParam {
|
||||
|
||||
private String connectorName;
|
||||
|
||||
public ConnectorParam(Long connectClusterId, String connectorName) {
|
||||
private String connectorType;
|
||||
|
||||
public ConnectorParam(Long connectClusterId, String connectorName, String connectorType) {
|
||||
super(connectClusterId);
|
||||
this.connectorName = connectorName;
|
||||
this.connectorType = connectorType;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.param.connect.mm2;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.mm2.MirrorMakerTopic;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ConnectClusterParam;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author wyb
|
||||
* @date 2022/12/21
|
||||
*/
|
||||
@Data
|
||||
@AllArgsConstructor
|
||||
@NoArgsConstructor
|
||||
public class MirrorMakerParam extends ConnectClusterParam {
|
||||
|
||||
private String mirrorMakerName;
|
||||
|
||||
private String connectorType;
|
||||
|
||||
List<MirrorMakerTopic> mirrorMakerTopicList;
|
||||
|
||||
public MirrorMakerParam(Long connectClusterId, String connectorType, String mirrorMakerName, List<MirrorMakerTopic> mirrorMakerTopicList) {
|
||||
super(connectClusterId);
|
||||
this.mirrorMakerName = mirrorMakerName;
|
||||
this.connectorType = connectorType;
|
||||
this.mirrorMakerTopicList = mirrorMakerTopicList;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.param.group;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.group.DeleteGroupTypeEnum;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
public class DeleteGroupParam extends GroupParam {
|
||||
protected DeleteGroupTypeEnum deleteGroupTypeEnum;
|
||||
|
||||
public DeleteGroupParam(Long clusterPhyId, String groupName, DeleteGroupTypeEnum deleteGroupTypeEnum) {
|
||||
super(clusterPhyId, groupName);
|
||||
this.deleteGroupTypeEnum = deleteGroupTypeEnum;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.param.group;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.group.DeleteGroupTypeEnum;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
public class DeleteGroupTopicParam extends DeleteGroupParam {
|
||||
protected String topicName;
|
||||
|
||||
public DeleteGroupTopicParam(Long clusterPhyId, String groupName, DeleteGroupTypeEnum deleteGroupTypeEnum, String topicName) {
|
||||
super(clusterPhyId, groupName, deleteGroupTypeEnum);
|
||||
this.topicName = topicName;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.param.group;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.group.DeleteGroupTypeEnum;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
public class DeleteGroupTopicPartitionParam extends DeleteGroupTopicParam {
|
||||
protected Integer partitionId;
|
||||
|
||||
public DeleteGroupTopicPartitionParam(Long clusterPhyId, String groupName, DeleteGroupTypeEnum deleteGroupTypeEnum, String topicName, Integer partitionId) {
|
||||
super(clusterPhyId, groupName, deleteGroupTypeEnum, topicName);
|
||||
this.partitionId = partitionId;
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,11 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.param.group;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@AllArgsConstructor
|
||||
public class GroupParam extends ClusterPhyParam {
|
||||
protected String groupName;
|
||||
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.param.metric.connect.mm2;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.mm2.MirrorMakerTopic;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.metric.MetricParam;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author wyb
|
||||
* @date 2022/12/15
|
||||
*/
|
||||
@Data
|
||||
@AllArgsConstructor
|
||||
@NoArgsConstructor
|
||||
public class MirrorMakerMetricParam extends MetricParam {
|
||||
private Long connectClusterId;
|
||||
|
||||
private String mirrorMakerName;
|
||||
|
||||
private List<MirrorMakerTopic> mirrorMakerTopicList;
|
||||
|
||||
private String metric;
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@AllArgsConstructor
|
||||
public class TopicTruncateParam extends ClusterPhyParam {
|
||||
protected String topicName;
|
||||
protected long offset;
|
||||
|
||||
public TopicTruncateParam(Long clusterPhyId, String topicName, long offset) {
|
||||
super(clusterPhyId);
|
||||
this.topicName = topicName;
|
||||
this.offset = offset;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TopicParam{" +
|
||||
"clusterPhyId=" + clusterPhyId +
|
||||
", topicName='" + topicName + '\'' +
|
||||
", offset='" + offset + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -23,17 +23,17 @@ import lombok.Data;
|
||||
public class MonitorCmdData extends BaseFourLetterWordCmdData {
|
||||
private String zkVersion;
|
||||
private Float zkAvgLatency;
|
||||
private Long zkMaxLatency;
|
||||
private Long zkMinLatency;
|
||||
private Long zkPacketsReceived;
|
||||
private Long zkPacketsSent;
|
||||
private Long zkNumAliveConnections;
|
||||
private Long zkOutstandingRequests;
|
||||
private Float zkMaxLatency;
|
||||
private Float zkMinLatency;
|
||||
private Float zkPacketsReceived;
|
||||
private Float zkPacketsSent;
|
||||
private Float zkNumAliveConnections;
|
||||
private Float zkOutstandingRequests;
|
||||
private String zkServerState;
|
||||
private Long zkZnodeCount;
|
||||
private Long zkWatchCount;
|
||||
private Long zkEphemeralsCount;
|
||||
private Long zkApproximateDataSize;
|
||||
private Long zkOpenFileDescriptorCount;
|
||||
private Long zkMaxFileDescriptorCount;
|
||||
private Float zkZnodeCount;
|
||||
private Float zkWatchCount;
|
||||
private Float zkEphemeralsCount;
|
||||
private Float zkApproximateDataSize;
|
||||
private Float zkOpenFileDescriptorCount;
|
||||
private Float zkMaxFileDescriptorCount;
|
||||
}
|
||||
|
||||
@@ -18,13 +18,13 @@ import lombok.Data;
|
||||
public class ServerCmdData extends BaseFourLetterWordCmdData {
|
||||
private String zkVersion;
|
||||
private Float zkAvgLatency;
|
||||
private Long zkMaxLatency;
|
||||
private Long zkMinLatency;
|
||||
private Long zkPacketsReceived;
|
||||
private Long zkPacketsSent;
|
||||
private Long zkNumAliveConnections;
|
||||
private Long zkOutstandingRequests;
|
||||
private Float zkMaxLatency;
|
||||
private Float zkMinLatency;
|
||||
private Float zkPacketsReceived;
|
||||
private Float zkPacketsSent;
|
||||
private Float zkNumAliveConnections;
|
||||
private Float zkOutstandingRequests;
|
||||
private String zkServerState;
|
||||
private Long zkZnodeCount;
|
||||
private Float zkZnodeCount;
|
||||
private Long zkZxid;
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.fourletter
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.fourletterword.MonitorCmdData;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.zookeeper.FourLetterWordUtil;
|
||||
import lombok.Data;
|
||||
|
||||
@@ -50,53 +51,53 @@ public class MonitorCmdDataParser implements FourLetterWordDataParser<MonitorCmd
|
||||
}
|
||||
|
||||
MonitorCmdData monitorCmdData = new MonitorCmdData();
|
||||
dataMap.entrySet().stream().forEach(elem -> {
|
||||
dataMap.entrySet().forEach(elem -> {
|
||||
try {
|
||||
switch (elem.getKey()) {
|
||||
case "zk_version":
|
||||
monitorCmdData.setZkVersion(elem.getValue().split("-")[0]);
|
||||
break;
|
||||
case "zk_avg_latency":
|
||||
monitorCmdData.setZkAvgLatency(Float.valueOf(elem.getValue()));
|
||||
monitorCmdData.setZkAvgLatency(ConvertUtil.string2Float(elem.getValue()));
|
||||
break;
|
||||
case "zk_max_latency":
|
||||
monitorCmdData.setZkMaxLatency(Long.valueOf(elem.getValue()));
|
||||
monitorCmdData.setZkMaxLatency(ConvertUtil.string2Float(elem.getValue()));
|
||||
break;
|
||||
case "zk_min_latency":
|
||||
monitorCmdData.setZkMinLatency(Long.valueOf(elem.getValue()));
|
||||
monitorCmdData.setZkMinLatency(ConvertUtil.string2Float(elem.getValue()));
|
||||
break;
|
||||
case "zk_packets_received":
|
||||
monitorCmdData.setZkPacketsReceived(Long.valueOf(elem.getValue()));
|
||||
monitorCmdData.setZkPacketsReceived(ConvertUtil.string2Float(elem.getValue()));
|
||||
break;
|
||||
case "zk_packets_sent":
|
||||
monitorCmdData.setZkPacketsSent(Long.valueOf(elem.getValue()));
|
||||
monitorCmdData.setZkPacketsSent(ConvertUtil.string2Float(elem.getValue()));
|
||||
break;
|
||||
case "zk_num_alive_connections":
|
||||
monitorCmdData.setZkNumAliveConnections(Long.valueOf(elem.getValue()));
|
||||
monitorCmdData.setZkNumAliveConnections(ConvertUtil.string2Float(elem.getValue()));
|
||||
break;
|
||||
case "zk_outstanding_requests":
|
||||
monitorCmdData.setZkOutstandingRequests(Long.valueOf(elem.getValue()));
|
||||
monitorCmdData.setZkOutstandingRequests(ConvertUtil.string2Float(elem.getValue()));
|
||||
break;
|
||||
case "zk_server_state":
|
||||
monitorCmdData.setZkServerState(elem.getValue());
|
||||
break;
|
||||
case "zk_znode_count":
|
||||
monitorCmdData.setZkZnodeCount(Long.valueOf(elem.getValue()));
|
||||
monitorCmdData.setZkZnodeCount(ConvertUtil.string2Float(elem.getValue()));
|
||||
break;
|
||||
case "zk_watch_count":
|
||||
monitorCmdData.setZkWatchCount(Long.valueOf(elem.getValue()));
|
||||
monitorCmdData.setZkWatchCount(ConvertUtil.string2Float(elem.getValue()));
|
||||
break;
|
||||
case "zk_ephemerals_count":
|
||||
monitorCmdData.setZkEphemeralsCount(Long.valueOf(elem.getValue()));
|
||||
monitorCmdData.setZkEphemeralsCount(ConvertUtil.string2Float(elem.getValue()));
|
||||
break;
|
||||
case "zk_approximate_data_size":
|
||||
monitorCmdData.setZkApproximateDataSize(Long.valueOf(elem.getValue()));
|
||||
monitorCmdData.setZkApproximateDataSize(ConvertUtil.string2Float(elem.getValue()));
|
||||
break;
|
||||
case "zk_open_file_descriptor_count":
|
||||
monitorCmdData.setZkOpenFileDescriptorCount(Long.valueOf(elem.getValue()));
|
||||
monitorCmdData.setZkOpenFileDescriptorCount(ConvertUtil.string2Float(elem.getValue()));
|
||||
break;
|
||||
case "zk_max_file_descriptor_count":
|
||||
monitorCmdData.setZkMaxFileDescriptorCount(Long.valueOf(elem.getValue()));
|
||||
monitorCmdData.setZkMaxFileDescriptorCount(ConvertUtil.string2Float(elem.getValue()));
|
||||
break;
|
||||
case "Proposal sizes last/min/max":
|
||||
case "zk_fsync_threshold_exceed_count":
|
||||
|
||||
@@ -3,6 +3,7 @@ package com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.fourletter
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.fourletterword.ServerCmdData;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.zookeeper.FourLetterWordUtil;
|
||||
import lombok.Data;
|
||||
|
||||
@@ -45,7 +46,7 @@ public class ServerCmdDataParser implements FourLetterWordDataParser<ServerCmdDa
|
||||
}
|
||||
|
||||
ServerCmdData serverCmdData = new ServerCmdData();
|
||||
dataMap.entrySet().stream().forEach(elem -> {
|
||||
dataMap.entrySet().forEach(elem -> {
|
||||
try {
|
||||
switch (elem.getKey()) {
|
||||
case "Zookeeper version":
|
||||
@@ -53,27 +54,27 @@ public class ServerCmdDataParser implements FourLetterWordDataParser<ServerCmdDa
|
||||
break;
|
||||
case "Latency min/avg/max":
|
||||
String[] data = elem.getValue().split("/");
|
||||
serverCmdData.setZkMinLatency(Long.valueOf(data[0]));
|
||||
serverCmdData.setZkAvgLatency(Float.valueOf(data[1]));
|
||||
serverCmdData.setZkMaxLatency(Long.valueOf(data[2]));
|
||||
serverCmdData.setZkMinLatency(ConvertUtil.string2Float(data[0]));
|
||||
serverCmdData.setZkAvgLatency(ConvertUtil.string2Float(data[1]));
|
||||
serverCmdData.setZkMaxLatency(ConvertUtil.string2Float(data[2]));
|
||||
break;
|
||||
case "Received":
|
||||
serverCmdData.setZkPacketsReceived(Long.valueOf(elem.getValue()));
|
||||
serverCmdData.setZkPacketsReceived(ConvertUtil.string2Float(elem.getValue()));
|
||||
break;
|
||||
case "Sent":
|
||||
serverCmdData.setZkPacketsSent(Long.valueOf(elem.getValue()));
|
||||
serverCmdData.setZkPacketsSent(ConvertUtil.string2Float(elem.getValue()));
|
||||
break;
|
||||
case "Connections":
|
||||
serverCmdData.setZkNumAliveConnections(Long.valueOf(elem.getValue()));
|
||||
serverCmdData.setZkNumAliveConnections(ConvertUtil.string2Float(elem.getValue()));
|
||||
break;
|
||||
case "Outstanding":
|
||||
serverCmdData.setZkOutstandingRequests(Long.valueOf(elem.getValue()));
|
||||
serverCmdData.setZkOutstandingRequests(ConvertUtil.string2Float(elem.getValue()));
|
||||
break;
|
||||
case "Mode":
|
||||
serverCmdData.setZkServerState(elem.getValue());
|
||||
break;
|
||||
case "Node count":
|
||||
serverCmdData.setZkZnodeCount(Long.valueOf(elem.getValue()));
|
||||
serverCmdData.setZkZnodeCount(ConvertUtil.string2Float(elem.getValue()));
|
||||
break;
|
||||
case "Zxid":
|
||||
serverCmdData.setZkZxid(Long.parseUnsignedLong(elem.getValue().trim().substring(2), 16));
|
||||
|
||||
@@ -0,0 +1,16 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.event.cluster.connect;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.event.cluster.ClusterPhyBaseEvent;
|
||||
import lombok.Getter;
|
||||
|
||||
/**
|
||||
* 集群删除事件
|
||||
* @author zengqiao
|
||||
* @date 23/08/15
|
||||
*/
|
||||
@Getter
|
||||
public class ClusterPhyDeletedEvent extends ClusterPhyBaseEvent {
|
||||
public ClusterPhyDeletedEvent(Object source, Long clusterPhyId) {
|
||||
super(source, clusterPhyId);
|
||||
}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.event.metric;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ReplicationMetrics;
|
||||
import lombok.Getter;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author didi
|
||||
*/
|
||||
@Getter
|
||||
public class ReplicaMetricEvent extends BaseMetricEvent{
|
||||
|
||||
private final List<ReplicationMetrics> replicationMetrics;
|
||||
|
||||
public ReplicaMetricEvent(Object source, List<ReplicationMetrics> replicationMetrics) {
|
||||
super( source );
|
||||
this.replicationMetrics = replicationMetrics;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.event.metric.mm2;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.mm2.MirrorMakerMetrics;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.event.metric.BaseMetricEvent;
|
||||
import lombok.Getter;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 2022/12/20
|
||||
*/
|
||||
@Getter
|
||||
public class MirrorMakerMetricEvent extends BaseMetricEvent {
|
||||
private final List<MirrorMakerMetrics> metricsList;
|
||||
|
||||
public MirrorMakerMetricEvent(Object source, List<MirrorMakerMetrics> metricsList) {
|
||||
super(source);
|
||||
this.metricsList = metricsList;
|
||||
}
|
||||
}
|
||||
@@ -29,7 +29,7 @@ public class ConnectClusterPO extends BasePO {
|
||||
private Integer state;
|
||||
|
||||
/**
|
||||
* 集群地址
|
||||
* 用户填写的集群地址
|
||||
*/
|
||||
private String clusterUrl;
|
||||
|
||||
|
||||
@@ -47,4 +47,14 @@ public class ConnectorPO extends BasePO {
|
||||
* 状态
|
||||
*/
|
||||
private String state;
|
||||
|
||||
/**
|
||||
* 心跳检测connector
|
||||
*/
|
||||
private String heartbeatConnectorName;
|
||||
|
||||
/**
|
||||
* 进度确认connector
|
||||
*/
|
||||
private String checkpointConnectorName;
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.Objects;
|
||||
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@@ -37,4 +38,16 @@ public class GroupMemberPO extends BasePO {
|
||||
this.memberCount = memberCount;
|
||||
this.updateTime = updateTime;
|
||||
}
|
||||
|
||||
public boolean equal2GroupMemberPO(GroupMemberPO that) {
|
||||
if (that == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return Objects.equals(clusterPhyId, that.clusterPhyId)
|
||||
&& Objects.equals(topicName, that.topicName)
|
||||
&& Objects.equals(groupName, that.groupName)
|
||||
&& Objects.equals(state, that.state)
|
||||
&& Objects.equals(memberCount, that.memberCount);
|
||||
}
|
||||
}
|
||||
@@ -9,6 +9,8 @@ import com.xiaojukeji.know.streaming.km.common.enums.group.GroupTypeEnum;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@@ -58,4 +60,18 @@ public class GroupPO extends BasePO {
|
||||
*/
|
||||
private int coordinatorId;
|
||||
|
||||
public boolean equal2GroupPO(GroupPO groupPO) {
|
||||
if (groupPO == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return coordinatorId == groupPO.coordinatorId
|
||||
&& Objects.equals(clusterPhyId, groupPO.clusterPhyId)
|
||||
&& Objects.equals(type, groupPO.type)
|
||||
&& Objects.equals(name, groupPO.name)
|
||||
&& Objects.equals(state, groupPO.state)
|
||||
&& Objects.equals(memberCount, groupPO.memberCount)
|
||||
&& Objects.equals(topicMembers, groupPO.topicMembers)
|
||||
&& Objects.equals(partitionAssignor, groupPO.partitionAssignor);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.po.ha;
|
||||
|
||||
import com.baomidou.mybatisplus.annotation.TableName;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.BasePO;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@TableName(Constant.MYSQL_HA_TABLE_NAME_PREFIX + "active_standby_relation")
|
||||
public class HaActiveStandbyRelationPO extends BasePO {
|
||||
private Long activeClusterPhyId;
|
||||
|
||||
private Long standbyClusterPhyId;
|
||||
|
||||
/**
|
||||
* 资源名称
|
||||
*/
|
||||
private String resName;
|
||||
|
||||
/**
|
||||
* 资源类型,0:集群,1:镜像Topic,2:主备Topic
|
||||
*/
|
||||
private Integer resType;
|
||||
|
||||
public HaActiveStandbyRelationPO(Long activeClusterPhyId, Long standbyClusterPhyId, String resName, Integer resType) {
|
||||
this.activeClusterPhyId = activeClusterPhyId;
|
||||
this.standbyClusterPhyId = standbyClusterPhyId;
|
||||
this.resName = resName;
|
||||
this.resType = resType;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.po.metrice.mm2;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.BaseMetricESPO;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
import static com.xiaojukeji.know.streaming.km.common.utils.CommonUtils.monitorTimestamp2min;
|
||||
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@AllArgsConstructor
|
||||
public class MirrorMakerMetricPO extends BaseMetricESPO {
|
||||
private Long connectClusterId;
|
||||
|
||||
private String connectorName;
|
||||
|
||||
/**
|
||||
* 用于es内部排序
|
||||
*/
|
||||
private String connectorNameAndClusterId;
|
||||
|
||||
public MirrorMakerMetricPO(Long kafkaClusterPhyId, Long connectClusterId, String connectorName){
|
||||
super(kafkaClusterPhyId);
|
||||
this.connectClusterId = connectClusterId;
|
||||
this.connectorName = connectorName;
|
||||
this.connectorNameAndClusterId = connectorName + "#" + connectClusterId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getKey() {
|
||||
return "KCOR@" + clusterPhyId + "@" + connectClusterId + "@" + connectorName + "@" + monitorTimestamp2min(timestamp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRoutingValue() {
|
||||
return String.valueOf(connectClusterId);
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user