Compare commits
290 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4471b054bc | ||
|
|
7049e9429d | ||
|
|
67ad5cacb7 | ||
|
|
b4a739476a | ||
|
|
a7bf2085db | ||
|
|
c3802cf48b | ||
|
|
54711c4491 | ||
|
|
fcb52a69c0 | ||
|
|
1b632f9754 | ||
|
|
73d7a0ecdc | ||
|
|
08943593b3 | ||
|
|
c949a88f20 | ||
|
|
a49c11f655 | ||
|
|
a66aed4a88 | ||
|
|
0045c953a0 | ||
|
|
fdce41b451 | ||
|
|
053d4dcb18 | ||
|
|
e1b2c442aa | ||
|
|
0ed8ba8ca4 | ||
|
|
7d9ec05062 | ||
|
|
4f3c1ad9b6 | ||
|
|
6d45ed586c | ||
|
|
3b0c208eff | ||
|
|
05022f8db4 | ||
|
|
3336de457a | ||
|
|
10a27bc29c | ||
|
|
542e5d3c2d | ||
|
|
7372617b14 | ||
|
|
89735a130b | ||
|
|
859cf74bd6 | ||
|
|
e2744ab399 | ||
|
|
16bd065098 | ||
|
|
71c52e6dd7 | ||
|
|
a7f8c3ced3 | ||
|
|
f3f0432c65 | ||
|
|
426ba2d150 | ||
|
|
2790099efa | ||
|
|
f6ba8bc95e | ||
|
|
d6181522c0 | ||
|
|
04cf071ca6 | ||
|
|
e4371b5d02 | ||
|
|
52c52b2a0d | ||
|
|
8f40f10575 | ||
|
|
fe0f6fcd0b | ||
|
|
31b1ad8bb4 | ||
|
|
373680d854 | ||
|
|
9e3bc80495 | ||
|
|
52ccaeffd5 | ||
|
|
18136c12fd | ||
|
|
dec3f9e75e | ||
|
|
ccc0ee4d18 | ||
|
|
69e9708080 | ||
|
|
5944ba099a | ||
|
|
ada2718b5e | ||
|
|
1f87bd63e7 | ||
|
|
c0f3259cf6 | ||
|
|
e1d5749a40 | ||
|
|
a8d7eb27d9 | ||
|
|
1eecdf3829 | ||
|
|
be8b345889 | ||
|
|
074da389b3 | ||
|
|
4df2dc09fe | ||
|
|
e8d42ba074 | ||
|
|
c036483680 | ||
|
|
2818584db6 | ||
|
|
37585f760d | ||
|
|
f5477a03a1 | ||
|
|
50388425b2 | ||
|
|
725c59eab0 | ||
|
|
7bf1de29a4 | ||
|
|
d90c3fc7dd | ||
|
|
80785ce072 | ||
|
|
44ea896de8 | ||
|
|
d30cb8a0f0 | ||
|
|
6c7b333b34 | ||
|
|
6d34a00e77 | ||
|
|
4e10f8d1c5 | ||
|
|
a22cd853fc | ||
|
|
354e0d6a87 | ||
|
|
dfabe28645 | ||
|
|
fce230da48 | ||
|
|
6835e1e680 | ||
|
|
d8f89b8f67 | ||
|
|
ec28eba781 | ||
|
|
5ef8fff5bc | ||
|
|
10b0a3dabb | ||
|
|
a23907e009 | ||
|
|
dbeae4ca68 | ||
|
|
0fb0e94848 | ||
|
|
95d2a82d35 | ||
|
|
5bc6eb6774 | ||
|
|
3ba81e9aaa | ||
|
|
329a9b59c1 | ||
|
|
22c26e24b1 | ||
|
|
396045177c | ||
|
|
820571d993 | ||
|
|
e311d3767c | ||
|
|
24d7b80244 | ||
|
|
61f99e4d2e | ||
|
|
d5348bcf49 | ||
|
|
5d31d66365 | ||
|
|
29778a0154 | ||
|
|
165c0a5866 | ||
|
|
588323961e | ||
|
|
fd1c0b71c5 | ||
|
|
54fbdcadf9 | ||
|
|
69a30d0cf0 | ||
|
|
b8f9b44f38 | ||
|
|
cbf17d4eb5 | ||
|
|
327e025262 | ||
|
|
6b1e944bba | ||
|
|
668ed4d61b | ||
|
|
312c0584ed | ||
|
|
110d3acb58 | ||
|
|
ddbc60283b | ||
|
|
471bcecfd6 | ||
|
|
0245791b13 | ||
|
|
4794396ce8 | ||
|
|
c7088779d6 | ||
|
|
672905da12 | ||
|
|
47172b13be | ||
|
|
3668a10af6 | ||
|
|
a4e294c03f | ||
|
|
3fd6f4003f | ||
|
|
3eaf5cd530 | ||
|
|
c344fd8ca4 | ||
|
|
09639ca294 | ||
|
|
a81b6dca83 | ||
|
|
b74aefb08f | ||
|
|
fffc0c3add | ||
|
|
757f90aa7a | ||
|
|
022f9eb551 | ||
|
|
6e7b82cfcb | ||
|
|
b5fb24b360 | ||
|
|
b77345222c | ||
|
|
793e81406e | ||
|
|
cef1ec95d2 | ||
|
|
7e1b3c552b | ||
|
|
69736a63b6 | ||
|
|
fb4a9f9056 | ||
|
|
387d89d3af | ||
|
|
65d9ca9d39 | ||
|
|
8c842af4ba | ||
|
|
4faf9262c9 | ||
|
|
be7724c67d | ||
|
|
48d26347f7 | ||
|
|
bdb01ec8b5 | ||
|
|
9047815799 | ||
|
|
05bd94a2cc | ||
|
|
c9f7da84d0 | ||
|
|
bcc124e86a | ||
|
|
48d2733403 | ||
|
|
31fc6e4e56 | ||
|
|
fcdeef0146 | ||
|
|
1cd524c0cc | ||
|
|
0f746917a7 | ||
|
|
a2228d0169 | ||
|
|
e8a679d34b | ||
|
|
1912a42091 | ||
|
|
ca81f96635 | ||
|
|
eb3b8c4b31 | ||
|
|
6740d6d60b | ||
|
|
c46c35b248 | ||
|
|
0b2dcec4bc | ||
|
|
f8e2a4aff4 | ||
|
|
7256db8c4e | ||
|
|
b14d5d9bee | ||
|
|
12e15c3e4b | ||
|
|
51911bf272 | ||
|
|
6dc8061401 | ||
|
|
b8fa4f8797 | ||
|
|
cc0bea7f45 | ||
|
|
4e9124b244 | ||
|
|
f0eabef7b0 | ||
|
|
23e5557958 | ||
|
|
b1d02afa85 | ||
|
|
2edc380f47 | ||
|
|
cea8295c09 | ||
|
|
244bfc993a | ||
|
|
3a272a4493 | ||
|
|
a3300db770 | ||
|
|
b0394ce261 | ||
|
|
3123089790 | ||
|
|
f13cf66676 | ||
|
|
0c8c4d87fb | ||
|
|
066088fdeb | ||
|
|
cf641e41c7 | ||
|
|
5b48322e1b | ||
|
|
9d3f680d58 | ||
|
|
bed28d57e6 | ||
|
|
2538525103 | ||
|
|
6ed798db8c | ||
|
|
8e9d966829 | ||
|
|
be16640f92 | ||
|
|
0e1376dd2e | ||
|
|
0494575aa7 | ||
|
|
bed57534e0 | ||
|
|
1862d631d1 | ||
|
|
c977ce5690 | ||
|
|
84df377516 | ||
|
|
4d9a284f6e | ||
|
|
da7ad8b44a | ||
|
|
4164046323 | ||
|
|
72e743dfd1 | ||
|
|
7eb7edaf0a | ||
|
|
49368aaf76 | ||
|
|
b8c07a966f | ||
|
|
c6bcc0e3aa | ||
|
|
7719339f23 | ||
|
|
8ad64722ed | ||
|
|
611f8b8865 | ||
|
|
38bdc173e8 | ||
|
|
52244325d9 | ||
|
|
3fd3d99b8c | ||
|
|
d4ee5e91a2 | ||
|
|
c2ad2d7238 | ||
|
|
892e195f0e | ||
|
|
c5b1bed7dc | ||
|
|
0e388d7aa7 | ||
|
|
c3a0dbbe48 | ||
|
|
8b95b3ffc7 | ||
|
|
42b78461cd | ||
|
|
9190a41ca5 | ||
|
|
28a7251319 | ||
|
|
20565866ef | ||
|
|
246f10aee5 | ||
|
|
960017280d | ||
|
|
7218aaf52e | ||
|
|
62050cc7b6 | ||
|
|
f88a14ac0a | ||
|
|
9286761c30 | ||
|
|
07c3273247 | ||
|
|
eb8fe77582 | ||
|
|
b68ba0bff6 | ||
|
|
696657c09e | ||
|
|
12bea9b60a | ||
|
|
9334e9552f | ||
|
|
a43b04a98b | ||
|
|
f359ff995d | ||
|
|
9185d2646b | ||
|
|
33e61c762c | ||
|
|
e342e646ff | ||
|
|
ed163a80e0 | ||
|
|
b390df08b5 | ||
|
|
f0b3b9f7f4 | ||
|
|
a67d732507 | ||
|
|
ca0ebe0d75 | ||
|
|
94d113cbe0 | ||
|
|
25c3aeaa5f | ||
|
|
736d5a00b7 | ||
|
|
f1627b214c | ||
|
|
d9265ec7ea | ||
|
|
663e871bed | ||
|
|
5c5eaddef7 | ||
|
|
edaec4f1ae | ||
|
|
6d19acaa6c | ||
|
|
d29a619fbf | ||
|
|
b17808dd91 | ||
|
|
c5321a3667 | ||
|
|
8836691510 | ||
|
|
6568f6525d | ||
|
|
473fc27b49 | ||
|
|
74aeb55acb | ||
|
|
8efcf0529f | ||
|
|
06071c2f9c | ||
|
|
5eb4eca487 | ||
|
|
33f6153e12 | ||
|
|
df3283f526 | ||
|
|
b5901a2819 | ||
|
|
6d5f1402fe | ||
|
|
65e3782b2e | ||
|
|
135981dd30 | ||
|
|
fe5cf2d922 | ||
|
|
e15425cc2e | ||
|
|
c3cb0a4e33 | ||
|
|
cc32976bdd | ||
|
|
bc08318716 | ||
|
|
ee1ab30c2c | ||
|
|
7fa1a66f7e | ||
|
|
946bf37406 | ||
|
|
8706f6931a | ||
|
|
f551674860 | ||
|
|
d90fe0ef07 | ||
|
|
f1f33c79f4 | ||
|
|
d52eaafdbb | ||
|
|
e7a3e50ed1 | ||
|
|
2e09a87baa | ||
|
|
b92ae7e47e | ||
|
|
6fbf67f9a9 | ||
|
|
01008acfcd |
59
README.md
@@ -1,20 +1,21 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
||||||
|
|
||||||
|
`LogiKM开源至今备受关注,考虑到开源项目应该更贴合Apache Kafka未来发展方向,经项目组慎重考虑,预计22年5月份将其品牌升级成Know Streaming,届时项目名称和Logo也将统一更新,感谢大家一如既往的支持,敬请期待!`
|
||||||
|
|
||||||
|
阅读本README文档,您可以了解到滴滴Logi-KafkaManager的用户群体、产品定位等信息,并通过体验地址,快速体验Kafka集群指标监控与运维管控的全流程。
|
||||||
阅读本README文档,您可以了解到滴滴Logi-KafkaManager的用户群体、产品定位等信息,并通过体验地址,快速体验Kafka集群指标监控与运维管控的全流程。<br>若滴滴Logi-KafkaManager已在贵司的生产环境进行使用,并想要获得官方更好地支持和指导,可以通过[`OCE认证`](http://obsuite.didiyun.com/open/openAuth),加入官方交流平台。
|
|
||||||
|
|
||||||
|
|
||||||
## 1 产品简介
|
## 1 产品简介
|
||||||
滴滴Logi-KafkaManager脱胎于滴滴内部多年的Kafka运营实践经验,是面向Kafka用户、Kafka运维人员打造的共享多租户Kafka云平台。专注于Kafka运维管控、监控告警、资源治理等核心场景,经历过大规模集群、海量大数据的考验。内部满意度高达90%的同时,还与多家知名企业达成商业化合作。
|
滴滴Logi-KafkaManager脱胎于滴滴内部多年的Kafka运营实践经验,是面向Kafka用户、Kafka运维人员打造的共享多租户Kafka云平台。专注于Kafka运维管控、监控告警、资源治理等核心场景,经历过大规模集群、海量大数据的考验。内部满意度高达90%的同时,还与多家知名企业达成商业化合作。
|
||||||
|
|
||||||
### 1.1 快速体验地址
|
### 1.1 快速体验地址
|
||||||
- 体验地址 http://117.51.146.109:8080 账号密码 admin/admin
|
|
||||||
|
- 体验地址 http://117.51.150.133:8080 账号密码 admin/admin
|
||||||
|
|
||||||
### 1.2 体验地图
|
### 1.2 体验地图
|
||||||
相比较于同类产品的用户视角单一(大多为管理员视角),滴滴Logi-KafkaManager建立了基于分角色、多场景视角的体验地图。分别是:**用户体验地图、运维体验地图、运营体验地图**
|
相比较于同类产品的用户视角单一(大多为管理员视角),滴滴Logi-KafkaManager建立了基于分角色、多场景视角的体验地图。分别是:**用户体验地图、运维体验地图、运营体验地图**
|
||||||
@@ -54,38 +55,56 @@
|
|||||||
## 2 相关文档
|
## 2 相关文档
|
||||||
|
|
||||||
### 2.1 产品文档
|
### 2.1 产品文档
|
||||||
- [滴滴Logi-KafkaManager 安装手册](docs/install_guide/install_guide_cn.md)
|
- [滴滴LogiKM 安装手册](docs/install_guide/install_guide_cn.md)
|
||||||
- [滴滴Logi-KafkaManager 接入集群](docs/user_guide/add_cluster/add_cluster.md)
|
- [滴滴LogiKM 接入集群](docs/user_guide/add_cluster/add_cluster.md)
|
||||||
- [滴滴Logi-KafkaManager 用户使用手册](docs/user_guide/user_guide_cn.md)
|
- [滴滴LogiKM 用户使用手册](docs/user_guide/user_guide_cn.md)
|
||||||
- [滴滴Logi-KafkaManager FAQ](docs/user_guide/faq.md)
|
- [滴滴LogiKM FAQ](docs/user_guide/faq.md)
|
||||||
|
|
||||||
### 2.2 社区文章
|
### 2.2 社区文章
|
||||||
- [滴滴云官网产品介绍](https://www.didiyun.com/production/logi-KafkaManager.html)
|
- [滴滴云官网产品介绍](https://www.didiyun.com/production/logi-KafkaManager.html)
|
||||||
- [7年沉淀之作--滴滴Logi日志服务套件](https://mp.weixin.qq.com/s/-KQp-Qo3WKEOc9wIR2iFnw)
|
- [7年沉淀之作--滴滴Logi日志服务套件](https://mp.weixin.qq.com/s/-KQp-Qo3WKEOc9wIR2iFnw)
|
||||||
- [滴滴Logi-KafkaManager 一站式Kafka监控与管控平台](https://mp.weixin.qq.com/s/9qSZIkqCnU6u9nLMvOOjIQ)
|
- [滴滴LogiKM 一站式Kafka监控与管控平台](https://mp.weixin.qq.com/s/9qSZIkqCnU6u9nLMvOOjIQ)
|
||||||
- [滴滴Logi-KafkaManager 开源之路](https://xie.infoq.cn/article/0223091a99e697412073c0d64)
|
- [滴滴LogiKM 开源之路](https://xie.infoq.cn/article/0223091a99e697412073c0d64)
|
||||||
- [滴滴Logi-KafkaManager 系列视频教程](https://mp.weixin.qq.com/s/9X7gH0tptHPtfjPPSdGO8g)
|
- [滴滴LogiKM 系列视频教程](https://space.bilibili.com/442531657/channel/seriesdetail?sid=571649)
|
||||||
- [kafka实践(十五):滴滴开源Kafka管控平台 Logi-KafkaManager研究--A叶子叶来](https://blog.csdn.net/yezonggang/article/details/113106244)
|
- [kafka最强最全知识图谱](https://www.szzdzhp.com/kafka/)
|
||||||
|
- [滴滴LogiKM新用户入门系列文章专栏 --石臻臻](https://www.szzdzhp.com/categories/LogIKM/)
|
||||||
|
- [kafka实践(十五):滴滴开源Kafka管控平台 LogiKM研究--A叶子叶来](https://blog.csdn.net/yezonggang/article/details/113106244)
|
||||||
|
|
||||||
|
|
||||||
## 3 滴滴Logi开源用户交流群
|
## 3 滴滴Logi开源用户交流群
|
||||||
|
|
||||||
|
|
||||||

|

|
||||||
微信加群:关注公众号 Obsuite(官方公众号) 回复 "Logi加群"
|
|
||||||
|
|
||||||

|
想跟各个大佬交流Kafka Es 等中间件/大数据相关技术请 加微信进群。
|
||||||
钉钉群ID:32821440
|
|
||||||
|
|
||||||
|
微信加群:添加<font color=red>mike_zhangliang</font>、<font color=red>PenceXie</font>的微信号备注Logi加群或关注公众号 云原生可观测性 回复 "Logi加群"
|
||||||
|
|
||||||
## 4 OCE认证
|
## 4 知识星球
|
||||||
OCE是一个认证机制和交流平台,为滴滴Logi-KafkaManager生产用户量身打造,我们会为OCE企业提供更好的技术支持,比如专属的技术沙龙、企业一对一的交流机会、专属的答疑群等,如果贵司Logi-KafkaManager上了生产,[快来加入吧](http://obsuite.didiyun.com/open/openAuth)
|
|
||||||
|
|
||||||
|
<img width="447" alt="image" src="https://user-images.githubusercontent.com/71620349/147314042-843a371a-48c0-4d9a-a65e-ca40236f3300.png">
|
||||||
|
|
||||||
|
<br>
|
||||||
|
<center>
|
||||||
|
✅我们正在组建国内最大最权威的
|
||||||
|
</center>
|
||||||
|
<br>
|
||||||
|
<center>
|
||||||
|
<font color=red size=5><b>【Kafka中文社区】</b></font>
|
||||||
|
</center>
|
||||||
|
|
||||||
|
在这里你可以结交各大互联网Kafka大佬以及近2000+Kafka爱好者,一起实现知识共享,实时掌控最新行业资讯,期待您的加入中~https://z.didi.cn/5gSF9
|
||||||
|
|
||||||
|
<font color=red size=5>有问必答~! </font>
|
||||||
|
|
||||||
|
<font color=red size=5>互动有礼~! </font>
|
||||||
|
|
||||||
|
PS:提问请尽量把问题一次性描述清楚,并告知环境信息情况哦~!如使用版本、操作步骤、报错/警告信息等,方便大V们快速解答~
|
||||||
|
|
||||||
## 5 项目成员
|
## 5 项目成员
|
||||||
|
|
||||||
### 5.1 内部核心人员
|
### 5.1 内部核心人员
|
||||||
|
|
||||||
`iceyuhui`、`liuyaguang`、`limengmonty`、`zhangliangmike`、`nullhuangyiming`、`zengqiao`、`eilenexuzhe`、`huangjiaweihjw`、`zhaoyinrui`、`marzkonglingxu`、`joysunchao`
|
`iceyuhui`、`liuyaguang`、`limengmonty`、`zhangliangmike`、`xiepeng`、`nullhuangyiming`、`zengqiao`、`eilenexuzhe`、`huangjiaweihjw`、`zhaoyinrui`、`marzkonglingxu`、`joysunchao`、`石臻臻`
|
||||||
|
|
||||||
|
|
||||||
### 5.2 外部贡献者
|
### 5.2 外部贡献者
|
||||||
@@ -95,4 +114,4 @@ OCE是一个认证机制和交流平台,为滴滴Logi-KafkaManager生产用户
|
|||||||
|
|
||||||
## 6 协议
|
## 6 协议
|
||||||
|
|
||||||
`kafka-manager`基于`Apache-2.0`协议进行分发和使用,更多信息参见[协议文件](./LICENSE)
|
`LogiKM`基于`Apache-2.0`协议进行分发和使用,更多信息参见[协议文件](./LICENSE)
|
||||||
|
|||||||
@@ -7,6 +7,83 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## v2.6.0
|
||||||
|
|
||||||
|
版本上线时间:2022-01-24
|
||||||
|
|
||||||
|
### 能力提升
|
||||||
|
- 增加简单回退工具类
|
||||||
|
|
||||||
|
### 体验优化
|
||||||
|
- 补充周期任务说明文档
|
||||||
|
- 补充集群安装部署使用说明文档
|
||||||
|
- 升级Swagger、SpringFramework、SpringBoot、EChats版本
|
||||||
|
- 优化Task模块的日志输出
|
||||||
|
- 优化corn表达式解析失败后退出无任何日志提示问题
|
||||||
|
- Ldap用户接入时,增加部门及邮箱信息等
|
||||||
|
- 对Jmx模块,增加连接失败后的回退机制及错误日志优化
|
||||||
|
- 增加线程池、客户端池可配置
|
||||||
|
- 删除无用的jmx_prometheus_javaagent-0.14.0.jar
|
||||||
|
- 优化迁移任务名称
|
||||||
|
- 优化创建Region时,Region容量信息不能立即被更新问题
|
||||||
|
- 引入lombok
|
||||||
|
- 更新视频教程
|
||||||
|
- 优化kcm_script.sh脚本中的LogiKM地址为可通过程序传入
|
||||||
|
- 第三方接口及网关接口,增加是否跳过登录的开关
|
||||||
|
- extends模块相关配置调整为非必须在application.yml中配置
|
||||||
|
|
||||||
|
### bug修复
|
||||||
|
- 修复批量往DB写入空指标数组时报SQL语法异常的问题
|
||||||
|
- 修复网关增加配置及修改配置时,version不变化问题
|
||||||
|
- 修复集群列表页,提示框遮挡问题
|
||||||
|
- 修复对高版本Broker元信息协议解析失败的问题
|
||||||
|
- 修复Dockerfile执行时提示缺少application.yml文件的问题
|
||||||
|
- 修复逻辑集群更新时,会报空指针的问题
|
||||||
|
|
||||||
|
## v2.4.1+
|
||||||
|
|
||||||
|
版本上线时间:2021-05-21
|
||||||
|
|
||||||
|
### 能力提升
|
||||||
|
- 增加直接增加权限和配额的接口(v2.4.1)
|
||||||
|
- 增加接口调用可绕过登录的功能(v2.4.1)
|
||||||
|
|
||||||
|
### 体验优化
|
||||||
|
- tomcat 版本提升至8.5.66(v2.4.2)
|
||||||
|
- op接口优化,拆分util接口为topic、leader两类接口(v2.4.1)
|
||||||
|
- 简化Gateway配置的Key长度(v2.4.1)
|
||||||
|
|
||||||
|
### bug修复
|
||||||
|
- 修复页面展示版本错误问题(v2.4.2)
|
||||||
|
|
||||||
|
|
||||||
|
## v2.4.0
|
||||||
|
|
||||||
|
版本上线时间:2021-05-18
|
||||||
|
|
||||||
|
|
||||||
|
### 能力提升
|
||||||
|
|
||||||
|
- 增加App与Topic自动化审批开关
|
||||||
|
- Broker元信息中增加Rack信息
|
||||||
|
- 升级MySQL 驱动,支持MySQL 8+
|
||||||
|
- 增加操作记录查询界面
|
||||||
|
|
||||||
|
### 体验优化
|
||||||
|
|
||||||
|
- FAQ告警组说明优化
|
||||||
|
- 用户手册共享及 独享集群概念优化
|
||||||
|
- 用户管理界面,前端限制用户删除自己
|
||||||
|
|
||||||
|
### bug修复
|
||||||
|
|
||||||
|
- 修复op-util类中创建Topic失败的接口
|
||||||
|
- 周期同步Topic到DB的任务修复,将Topic列表查询从缓存调整为直接查DB
|
||||||
|
- 应用下线审批失败的功能修复,将权限为0(无权限)的数据进行过滤
|
||||||
|
- 修复登录及权限绕过的漏洞
|
||||||
|
- 修复研发角色展示接入集群、暂停监控等按钮的问题
|
||||||
|
|
||||||
|
|
||||||
## v2.3.0
|
## v2.3.0
|
||||||
|
|
||||||
版本上线时间:2021-02-08
|
版本上线时间:2021-02-08
|
||||||
|
|||||||
72
build.sh
@@ -1,72 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
workspace=$(cd $(dirname $0) && pwd -P)
|
|
||||||
cd $workspace
|
|
||||||
|
|
||||||
## constant
|
|
||||||
OUTPUT_DIR=./output
|
|
||||||
KM_VERSION=2.4.0
|
|
||||||
APP_NAME=kafka-manager
|
|
||||||
APP_DIR=${APP_NAME}-${KM_VERSION}
|
|
||||||
|
|
||||||
MYSQL_TABLE_SQL_FILE=./docs/install_guide/create_mysql_table.sql
|
|
||||||
CONFIG_FILE=./kafka-manager-web/src/main/resources/application.yml
|
|
||||||
|
|
||||||
## function
|
|
||||||
function build() {
|
|
||||||
# 编译命令
|
|
||||||
mvn -U clean package -Dmaven.test.skip=true
|
|
||||||
|
|
||||||
local sc=$?
|
|
||||||
if [ $sc -ne 0 ];then
|
|
||||||
## 编译失败, 退出码为 非0
|
|
||||||
echo "$APP_NAME build error"
|
|
||||||
exit $sc
|
|
||||||
else
|
|
||||||
echo "$APP_NAME build ok"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function make_output() {
|
|
||||||
# 新建output目录
|
|
||||||
rm -rf ${OUTPUT_DIR} &>/dev/null
|
|
||||||
mkdir -p ${OUTPUT_DIR}/${APP_DIR} &>/dev/null
|
|
||||||
|
|
||||||
# 填充output目录, output内的内容
|
|
||||||
(
|
|
||||||
cp -rf ${MYSQL_TABLE_SQL_FILE} ${OUTPUT_DIR}/${APP_DIR} && # 拷贝 sql 初始化脚本 至output目录
|
|
||||||
cp -rf ${CONFIG_FILE} ${OUTPUT_DIR}/${APP_DIR} && # 拷贝 application.yml 至output目录
|
|
||||||
|
|
||||||
# 拷贝程序包到output路径
|
|
||||||
cp kafka-manager-web/target/kafka-manager-web-${KM_VERSION}-SNAPSHOT.jar ${OUTPUT_DIR}/${APP_DIR}/${APP_NAME}.jar
|
|
||||||
echo -e "make output ok."
|
|
||||||
) || { echo -e "make output error"; exit 2; } # 填充output目录失败后, 退出码为 非0
|
|
||||||
}
|
|
||||||
|
|
||||||
function make_package() {
|
|
||||||
# 压缩output目录
|
|
||||||
(
|
|
||||||
cd ${OUTPUT_DIR} && tar cvzf ${APP_DIR}.tar.gz ${APP_DIR}
|
|
||||||
echo -e "make package ok."
|
|
||||||
) || { echo -e "make package error"; exit 2; } # 压缩output目录失败后, 退出码为 非0
|
|
||||||
}
|
|
||||||
|
|
||||||
##########################################
|
|
||||||
## main
|
|
||||||
## 其中,
|
|
||||||
## 1.进行编译
|
|
||||||
## 2.生成部署包output
|
|
||||||
## 3.生成tar.gz压缩包
|
|
||||||
##########################################
|
|
||||||
|
|
||||||
# 1.进行编译
|
|
||||||
build
|
|
||||||
|
|
||||||
# 2.生成部署包output
|
|
||||||
make_output
|
|
||||||
|
|
||||||
# 3.生成tar.gz压缩包
|
|
||||||
make_package
|
|
||||||
|
|
||||||
# 编译成功
|
|
||||||
echo -e "build done"
|
|
||||||
exit 0
|
|
||||||
@@ -1,43 +1,29 @@
|
|||||||
FROM openjdk:16-jdk-alpine3.13
|
FROM openjdk:16-jdk-alpine3.13
|
||||||
|
|
||||||
LABEL author="yangvipguang"
|
LABEL author="fengxsong"
|
||||||
|
RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories && apk add --no-cache tini
|
||||||
ENV VERSION 2.3.1
|
|
||||||
|
|
||||||
RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories
|
|
||||||
RUN apk add --no-cache --virtual .build-deps \
|
|
||||||
font-adobe-100dpi \
|
|
||||||
ttf-dejavu \
|
|
||||||
fontconfig \
|
|
||||||
curl \
|
|
||||||
apr \
|
|
||||||
apr-util \
|
|
||||||
apr-dev \
|
|
||||||
tomcat-native \
|
|
||||||
&& apk del .build-deps
|
|
||||||
|
|
||||||
RUN apk add --no-cache tini
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
ENV VERSION 2.4.2
|
||||||
|
WORKDIR /opt/
|
||||||
|
|
||||||
ENV AGENT_HOME /opt/agent/
|
ENV AGENT_HOME /opt/agent/
|
||||||
|
|
||||||
WORKDIR /tmp
|
|
||||||
|
|
||||||
COPY $JAR_PATH/kafka-manager.jar app.jar
|
|
||||||
# COPY application.yml application.yml ##默认使用helm 挂载,防止敏感配置泄露
|
|
||||||
|
|
||||||
COPY docker-depends/config.yaml $AGENT_HOME
|
COPY docker-depends/config.yaml $AGENT_HOME
|
||||||
COPY docker-depends/jmx_prometheus_javaagent-0.15.0.jar $AGENT_HOME
|
COPY docker-depends/jmx_prometheus_javaagent-0.15.0.jar $AGENT_HOME
|
||||||
|
|
||||||
ENV JAVA_AGENT="-javaagent:$AGENT_HOME/jmx_prometheus_javaagent-0.15.0.jar=9999:$AGENT_HOME/config.yaml"
|
ENV JAVA_AGENT="-javaagent:$AGENT_HOME/jmx_prometheus_javaagent-0.15.0.jar=9999:$AGENT_HOME/config.yaml"
|
||||||
ENV JAVA_HEAP_OPTS="-Xms1024M -Xmx1024M -Xmn100M "
|
ENV JAVA_HEAP_OPTS="-Xms1024M -Xmx1024M -Xmn100M "
|
||||||
ENV JAVA_OPTS="-verbose:gc \
|
ENV JAVA_OPTS="-verbose:gc \
|
||||||
-XX:MaxMetaspaceSize=256M -XX:+DisableExplicitGC -XX:+UseStringDeduplication \
|
-XX:MaxMetaspaceSize=256M -XX:+DisableExplicitGC -XX:+UseStringDeduplication \
|
||||||
-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:-UseContainerSupport"
|
-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:-UseContainerSupport"
|
||||||
|
|
||||||
|
RUN wget https://github.com/didi/Logi-KafkaManager/releases/download/v${VERSION}/kafka-manager-${VERSION}.tar.gz && \
|
||||||
|
tar xvf kafka-manager-${VERSION}.tar.gz && \
|
||||||
|
mv kafka-manager-${VERSION}/kafka-manager.jar /opt/app.jar && \
|
||||||
|
mv kafka-manager-${VERSION}/application.yml /opt/application.yml && \
|
||||||
|
rm -rf kafka-manager-${VERSION}*
|
||||||
|
|
||||||
EXPOSE 8080 9999
|
EXPOSE 8080 9999
|
||||||
|
|
||||||
ENTRYPOINT ["tini", "--"]
|
ENTRYPOINT ["tini", "--"]
|
||||||
|
|
||||||
CMD ["sh","-c","java -jar $JAVA_AGENT $JAVA_HEAP_OPTS $JAVA_OPTS app.jar --spring.config.location=application.yml"]
|
CMD [ "sh", "-c", "java -jar $JAVA_AGENT $JAVA_HEAP_OPTS $JAVA_OPTS app.jar --spring.config.location=application.yml"]
|
||||||
6
container/helm/Chart.lock
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
dependencies:
|
||||||
|
- name: mysql
|
||||||
|
repository: https://charts.bitnami.com/bitnami
|
||||||
|
version: 8.6.3
|
||||||
|
digest: sha256:d250c463c1d78ba30a24a338a06a551503c7a736621d974fe4999d2db7f6143e
|
||||||
|
generated: "2021-06-24T11:34:54.625217+08:00"
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
name: didi-km
|
name: didi-km
|
||||||
description: A Helm chart for Kubernetes
|
description: Logi-KafkaManager
|
||||||
|
|
||||||
# A chart can be either an 'application' or a 'library' chart.
|
# A chart can be either an 'application' or a 'library' chart.
|
||||||
#
|
#
|
||||||
@@ -21,4 +21,9 @@ version: 0.1.0
|
|||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "1.16.0"
|
appVersion: "2.4.2"
|
||||||
|
dependencies:
|
||||||
|
- condition: mysql.enabled
|
||||||
|
name: mysql
|
||||||
|
repository: https://charts.bitnami.com/bitnami
|
||||||
|
version: 8.x.x
|
||||||
|
|||||||
BIN
container/helm/charts/mysql-8.6.3.tgz
Normal file
@@ -1,7 +1,17 @@
|
|||||||
|
{{- define "datasource.mysql" -}}
|
||||||
|
{{- if .Values.mysql.enabled }}
|
||||||
|
{{- printf "%s-mysql" (include "didi-km.fullname" .) -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- printf "%s" .Values.externalDatabase.host -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
name: km-cm
|
name: {{ include "didi-km.fullname" . }}-configs
|
||||||
|
labels:
|
||||||
|
{{- include "didi-km.labels" . | nindent 4 }}
|
||||||
data:
|
data:
|
||||||
application.yml: |
|
application.yml: |
|
||||||
server:
|
server:
|
||||||
@@ -17,9 +27,9 @@ data:
|
|||||||
name: kafkamanager
|
name: kafkamanager
|
||||||
datasource:
|
datasource:
|
||||||
kafka-manager:
|
kafka-manager:
|
||||||
jdbc-url: jdbc:mysql://xxxxx:3306/kafka-manager?characterEncoding=UTF-8&serverTimezone=GMT%2B8&useSSL=false
|
jdbc-url: jdbc:mysql://{{ include "datasource.mysql" . }}:3306/{{ .Values.mysql.auth.database }}?characterEncoding=UTF-8&serverTimezone=GMT%2B8&useSSL=false
|
||||||
username: admin
|
username: {{ .Values.mysql.auth.username }}
|
||||||
password: admin
|
password: {{ .Values.mysql.auth.password }}
|
||||||
driver-class-name: com.mysql.jdbc.Driver
|
driver-class-name: com.mysql.jdbc.Driver
|
||||||
main:
|
main:
|
||||||
allow-bean-definition-overriding: true
|
allow-bean-definition-overriding: true
|
||||||
@@ -45,7 +55,7 @@ data:
|
|||||||
didi:
|
didi:
|
||||||
app-topic-metrics-enabled: false
|
app-topic-metrics-enabled: false
|
||||||
topic-request-time-metrics-enabled: false
|
topic-request-time-metrics-enabled: false
|
||||||
topic-throttled-metrics: false
|
topic-throttled-metrics-enabled: false
|
||||||
save-days: 7
|
save-days: 7
|
||||||
|
|
||||||
# 任务相关的开关
|
# 任务相关的开关
|
||||||
@@ -54,7 +64,19 @@ data:
|
|||||||
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
|
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
|
||||||
|
|
||||||
account:
|
account:
|
||||||
|
# ldap settings
|
||||||
ldap:
|
ldap:
|
||||||
|
enabled: false
|
||||||
|
url: ldap://127.0.0.1:389/
|
||||||
|
basedn: dc=tsign,dc=cn
|
||||||
|
factory: com.sun.jndi.ldap.LdapCtxFactory
|
||||||
|
filter: sAMAccountName
|
||||||
|
security:
|
||||||
|
authentication: simple
|
||||||
|
principal: cn=admin,dc=tsign,dc=cn
|
||||||
|
credentials: admin
|
||||||
|
auth-user-registration: false
|
||||||
|
auth-user-registration-role: normal
|
||||||
|
|
||||||
kcm:
|
kcm:
|
||||||
enabled: false
|
enabled: false
|
||||||
|
|||||||
@@ -42,6 +42,10 @@ spec:
|
|||||||
protocol: TCP
|
protocol: TCP
|
||||||
resources:
|
resources:
|
||||||
{{- toYaml .Values.resources | nindent 12 }}
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
|
volumeMounts:
|
||||||
|
- name: configs
|
||||||
|
mountPath: /tmp/application.yml
|
||||||
|
subPath: application.yml
|
||||||
{{- with .Values.nodeSelector }}
|
{{- with .Values.nodeSelector }}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
@@ -54,3 +58,7 @@ spec:
|
|||||||
tolerations:
|
tolerations:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
volumes:
|
||||||
|
- name: configs
|
||||||
|
configMap:
|
||||||
|
name: {{ include "didi-km.fullname" . }}-configs
|
||||||
|
|||||||
@@ -5,13 +5,14 @@
|
|||||||
replicaCount: 1
|
replicaCount: 1
|
||||||
|
|
||||||
image:
|
image:
|
||||||
repository: docker.io/yangvipguang/km
|
repository: docker.io/fengxsong/logi-kafka-manager
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
# Overrides the image tag whose default is the chart appVersion.
|
# Overrides the image tag whose default is the chart appVersion.
|
||||||
tag: "v18"
|
tag: "v2.4.2"
|
||||||
|
|
||||||
imagePullSecrets: []
|
imagePullSecrets: []
|
||||||
nameOverride: ""
|
nameOverride: ""
|
||||||
|
# fullnameOverride must set same as release name
|
||||||
fullnameOverride: "km"
|
fullnameOverride: "km"
|
||||||
|
|
||||||
serviceAccount:
|
serviceAccount:
|
||||||
@@ -59,10 +60,10 @@ resources:
|
|||||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||||
limits:
|
limits:
|
||||||
cpu: 50m
|
cpu: 500m
|
||||||
memory: 2048Mi
|
memory: 2048Mi
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 100m
|
||||||
memory: 200Mi
|
memory: 200Mi
|
||||||
|
|
||||||
autoscaling:
|
autoscaling:
|
||||||
@@ -77,3 +78,16 @@ nodeSelector: {}
|
|||||||
tolerations: []
|
tolerations: []
|
||||||
|
|
||||||
affinity: {}
|
affinity: {}
|
||||||
|
|
||||||
|
# more configurations are set with configmap in file template/configmap.yaml
|
||||||
|
externalDatabase:
|
||||||
|
host: ""
|
||||||
|
mysql:
|
||||||
|
# if enabled is set to false, then you should manually specified externalDatabase.host
|
||||||
|
enabled: true
|
||||||
|
architecture: standalone
|
||||||
|
auth:
|
||||||
|
rootPassword: "s3cretR00t"
|
||||||
|
database: "logi_kafka_manager"
|
||||||
|
username: "logi_kafka_manager"
|
||||||
|
password: "n0tp@55w0rd"
|
||||||
|
|||||||
16
distribution/bin/shutdown.sh
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
cd `dirname $0`/../target
|
||||||
|
target_dir=`pwd`
|
||||||
|
|
||||||
|
pid=`ps ax | grep -i 'kafka-manager' | grep ${target_dir} | grep java | grep -v grep | awk '{print $1}'`
|
||||||
|
if [ -z "$pid" ] ; then
|
||||||
|
echo "No kafka-manager running."
|
||||||
|
exit -1;
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "The kafka-manager (${pid}) is running..."
|
||||||
|
|
||||||
|
kill ${pid}
|
||||||
|
|
||||||
|
echo "Send shutdown request to kafka-manager (${pid}) OK"
|
||||||
81
distribution/bin/startup.sh
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
error_exit ()
|
||||||
|
{
|
||||||
|
echo "ERROR: $1 !!"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=$HOME/jdk/java
|
||||||
|
[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=/usr/java
|
||||||
|
[ ! -e "$JAVA_HOME/bin/java" ] && unset JAVA_HOME
|
||||||
|
|
||||||
|
if [ -z "$JAVA_HOME" ]; then
|
||||||
|
if $darwin; then
|
||||||
|
|
||||||
|
if [ -x '/usr/libexec/java_home' ] ; then
|
||||||
|
export JAVA_HOME=`/usr/libexec/java_home`
|
||||||
|
|
||||||
|
elif [ -d "/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home" ]; then
|
||||||
|
export JAVA_HOME="/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
JAVA_PATH=`dirname $(readlink -f $(which javac))`
|
||||||
|
if [ "x$JAVA_PATH" != "x" ]; then
|
||||||
|
export JAVA_HOME=`dirname $JAVA_PATH 2>/dev/null`
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
if [ -z "$JAVA_HOME" ]; then
|
||||||
|
error_exit "Please set the JAVA_HOME variable in your environment, We need java(x64)! jdk8 or later is better!"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
export WEB_SERVER="kafka-manager"
|
||||||
|
export JAVA_HOME
|
||||||
|
export JAVA="$JAVA_HOME/bin/java"
|
||||||
|
export BASE_DIR=`cd $(dirname $0)/..; pwd`
|
||||||
|
export CUSTOM_SEARCH_LOCATIONS=file:${BASE_DIR}/conf/
|
||||||
|
|
||||||
|
|
||||||
|
#===========================================================================================
|
||||||
|
# JVM Configuration
|
||||||
|
#===========================================================================================
|
||||||
|
|
||||||
|
JAVA_OPT="${JAVA_OPT} -server -Xms2g -Xmx2g -Xmn1g -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=320m"
|
||||||
|
JAVA_OPT="${JAVA_OPT} -XX:-OmitStackTraceInFastThrow -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${BASE_DIR}/logs/java_heapdump.hprof"
|
||||||
|
|
||||||
|
## jdk版本高的情况 有些 参数废弃了
|
||||||
|
JAVA_MAJOR_VERSION=$($JAVA -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p')
|
||||||
|
if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then
|
||||||
|
JAVA_OPT="${JAVA_OPT} -Xlog:gc*:file=${BASE_DIR}/logs/km_gc.log:time,tags:filecount=10,filesize=102400"
|
||||||
|
else
|
||||||
|
JAVA_OPT="${JAVA_OPT} -Djava.ext.dirs=${JAVA_HOME}/jre/lib/ext:${JAVA_HOME}/lib/ext"
|
||||||
|
JAVA_OPT="${JAVA_OPT} -Xloggc:${BASE_DIR}/logs/km_gc.log -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
JAVA_OPT="${JAVA_OPT} -jar ${BASE_DIR}/target/${WEB_SERVER}.jar"
|
||||||
|
JAVA_OPT="${JAVA_OPT} --spring.config.additional-location=${CUSTOM_SEARCH_LOCATIONS}"
|
||||||
|
JAVA_OPT="${JAVA_OPT} --logging.config=${BASE_DIR}/conf/logback-spring.xml"
|
||||||
|
JAVA_OPT="${JAVA_OPT} --server.max-http-header-size=524288"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if [ ! -d "${BASE_DIR}/logs" ]; then
|
||||||
|
mkdir ${BASE_DIR}/logs
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "$JAVA ${JAVA_OPT}"
|
||||||
|
|
||||||
|
# check the start.out log output file
|
||||||
|
if [ ! -f "${BASE_DIR}/logs/start.out" ]; then
|
||||||
|
touch "${BASE_DIR}/logs/start.out"
|
||||||
|
fi
|
||||||
|
# start
|
||||||
|
echo -e "---- 启动脚本 ------\n $JAVA ${JAVA_OPT}" > ${BASE_DIR}/logs/start.out 2>&1 &
|
||||||
|
|
||||||
|
|
||||||
|
nohup $JAVA ${JAVA_OPT} >> ${BASE_DIR}/logs/start.out 2>&1 &
|
||||||
|
|
||||||
|
echo "${WEB_SERVER} is starting,you can check the ${BASE_DIR}/logs/start.out"
|
||||||
29
distribution/conf/application.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
|
||||||
|
## kafka-manager的配置文件,该文件中的配置会覆盖默认配置
|
||||||
|
## 下面的配置信息基本就是jar中的 application.yml默认配置了;
|
||||||
|
## 可以只修改自己变更的配置,其他的删除就行了; 比如只配置一下mysql
|
||||||
|
|
||||||
|
|
||||||
|
server:
|
||||||
|
port: 8080
|
||||||
|
tomcat:
|
||||||
|
accept-count: 1000
|
||||||
|
max-connections: 10000
|
||||||
|
max-threads: 800
|
||||||
|
min-spare-threads: 100
|
||||||
|
|
||||||
|
spring:
|
||||||
|
application:
|
||||||
|
name: kafkamanager
|
||||||
|
version: 2.6.0
|
||||||
|
profiles:
|
||||||
|
active: dev
|
||||||
|
datasource:
|
||||||
|
kafka-manager:
|
||||||
|
jdbc-url: jdbc:mysql://localhost:3306/logi_kafka_manager?characterEncoding=UTF-8&useSSL=false&serverTimezone=GMT%2B8
|
||||||
|
username: root
|
||||||
|
password: 123456
|
||||||
|
driver-class-name: com.mysql.cj.jdbc.Driver
|
||||||
|
main:
|
||||||
|
allow-bean-definition-overriding: true
|
||||||
|
|
||||||
136
distribution/conf/application.yml.example
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
|
||||||
|
## kafka-manager的配置文件,该文件中的配置会覆盖默认配置
|
||||||
|
## 下面的配置信息基本就是jar中的 application.yml默认配置了;
|
||||||
|
## 可以只修改自己变更的配置,其他的删除就行了; 比如只配置一下mysql
|
||||||
|
|
||||||
|
|
||||||
|
server:
|
||||||
|
port: 8080
|
||||||
|
tomcat:
|
||||||
|
accept-count: 1000
|
||||||
|
max-connections: 10000
|
||||||
|
max-threads: 800
|
||||||
|
min-spare-threads: 100
|
||||||
|
|
||||||
|
spring:
|
||||||
|
application:
|
||||||
|
name: kafkamanager
|
||||||
|
version: 2.6.0
|
||||||
|
profiles:
|
||||||
|
active: dev
|
||||||
|
datasource:
|
||||||
|
kafka-manager:
|
||||||
|
jdbc-url: jdbc:mysql://localhost:3306/logi_kafka_manager?characterEncoding=UTF-8&useSSL=false&serverTimezone=GMT%2B8
|
||||||
|
username: root
|
||||||
|
password: 123456
|
||||||
|
driver-class-name: com.mysql.cj.jdbc.Driver
|
||||||
|
main:
|
||||||
|
allow-bean-definition-overriding: true
|
||||||
|
|
||||||
|
servlet:
|
||||||
|
multipart:
|
||||||
|
max-file-size: 100MB
|
||||||
|
max-request-size: 100MB
|
||||||
|
|
||||||
|
logging:
|
||||||
|
config: classpath:logback-spring.xml
|
||||||
|
|
||||||
|
custom:
|
||||||
|
idc: cn
|
||||||
|
store-metrics-task:
|
||||||
|
community:
|
||||||
|
topic-metrics-enabled: true
|
||||||
|
didi: # 滴滴Kafka特有的指标
|
||||||
|
app-topic-metrics-enabled: false
|
||||||
|
topic-request-time-metrics-enabled: false
|
||||||
|
topic-throttled-metrics-enabled: false
|
||||||
|
|
||||||
|
# 任务相关的配置
|
||||||
|
task:
|
||||||
|
op:
|
||||||
|
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
|
||||||
|
order-auto-exec: # 工单自动化审批线程的开关
|
||||||
|
topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启
|
||||||
|
app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启
|
||||||
|
metrics:
|
||||||
|
collect: # 收集指标
|
||||||
|
broker-metrics-enabled: true # 收集Broker指标
|
||||||
|
sink: # 上报指标
|
||||||
|
cluster-metrics: # 上报cluster指标
|
||||||
|
sink-db-enabled: true # 上报到db
|
||||||
|
broker-metrics: # 上报broker指标
|
||||||
|
sink-db-enabled: true # 上报到db
|
||||||
|
delete: # 删除指标
|
||||||
|
delete-limit-size: 1000 # 单次删除的批大小
|
||||||
|
cluster-metrics-save-days: 14 # 集群指标保存天数
|
||||||
|
broker-metrics-save-days: 14 # Broker指标保存天数
|
||||||
|
topic-metrics-save-days: 7 # Topic指标保存天数
|
||||||
|
topic-request-time-metrics-save-days: 7 # Topic请求耗时指标保存天数
|
||||||
|
topic-throttled-metrics-save-days: 7 # Topic限流指标保存天数
|
||||||
|
app-topic-metrics-save-days: 7 # App+Topic指标保存天数
|
||||||
|
|
||||||
|
thread-pool:
|
||||||
|
collect-metrics:
|
||||||
|
thread-num: 256 # 收集指标线程池大小
|
||||||
|
queue-size: 5000 # 收集指标线程池的queue大小
|
||||||
|
api-call:
|
||||||
|
thread-num: 16 # api服务线程池大小
|
||||||
|
queue-size: 5000 # api服务线程池的queue大小
|
||||||
|
|
||||||
|
client-pool:
|
||||||
|
kafka-consumer:
|
||||||
|
min-idle-client-num: 24 # 最小空闲客户端数
|
||||||
|
max-idle-client-num: 24 # 最大空闲客户端数
|
||||||
|
max-total-client-num: 24 # 最大客户端数
|
||||||
|
borrow-timeout-unit-ms: 3000 # 租借超时时间,单位毫秒
|
||||||
|
|
||||||
|
account:
|
||||||
|
jump-login:
|
||||||
|
gateway-api: false # 网关接口
|
||||||
|
third-part-api: false # 第三方接口
|
||||||
|
ldap:
|
||||||
|
enabled: false
|
||||||
|
url: ldap://127.0.0.1:389/
|
||||||
|
basedn: dc=tsign,dc=cn
|
||||||
|
factory: com.sun.jndi.ldap.LdapCtxFactory
|
||||||
|
filter: sAMAccountName
|
||||||
|
security:
|
||||||
|
authentication: simple
|
||||||
|
principal: cn=admin,dc=tsign,dc=cn
|
||||||
|
credentials: admin
|
||||||
|
auth-user-registration: true
|
||||||
|
auth-user-registration-role: normal
|
||||||
|
|
||||||
|
kcm: # 集群安装部署,仅安装broker
|
||||||
|
enabled: false # 是否开启
|
||||||
|
s3: # s3 存储服务
|
||||||
|
endpoint: s3.didiyunapi.com
|
||||||
|
access-key: 1234567890
|
||||||
|
secret-key: 0987654321
|
||||||
|
bucket: logi-kafka
|
||||||
|
n9e: # 夜莺
|
||||||
|
base-url: http://127.0.0.1:8004 # 夜莺job服务地址
|
||||||
|
user-token: 12345678 # 用户的token
|
||||||
|
timeout: 300 # 当台操作的超时时间
|
||||||
|
account: root # 操作时使用的账号
|
||||||
|
script-file: kcm_script.sh # 脚本,已内置好,在源码的kcm模块内,此处配置无需修改
|
||||||
|
logikm-url: http://127.0.0.1:8080 # logikm部署地址,部署时kcm_script.sh会调用logikm检查部署中的一些状态
|
||||||
|
|
||||||
|
monitor:
|
||||||
|
enabled: false
|
||||||
|
n9e:
|
||||||
|
nid: 2
|
||||||
|
user-token: 1234567890
|
||||||
|
mon:
|
||||||
|
base-url: http://127.0.0.1:8000 # 夜莺v4版本,默认端口统一调整为了8000
|
||||||
|
sink:
|
||||||
|
base-url: http://127.0.0.1:8000 # 夜莺v4版本,默认端口统一调整为了8000
|
||||||
|
rdb:
|
||||||
|
base-url: http://127.0.0.1:8000 # 夜莺v4版本,默认端口统一调整为了8000
|
||||||
|
|
||||||
|
notify:
|
||||||
|
kafka:
|
||||||
|
cluster-id: 95
|
||||||
|
topic-name: didi-kafka-notify
|
||||||
|
order:
|
||||||
|
detail-url: http://127.0.0.1
|
||||||
@@ -13,6 +13,9 @@ CREATE TABLE `account` (
|
|||||||
`username` varchar(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '用户名',
|
`username` varchar(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '用户名',
|
||||||
`password` varchar(128) NOT NULL DEFAULT '' COMMENT '密码',
|
`password` varchar(128) NOT NULL DEFAULT '' COMMENT '密码',
|
||||||
`role` tinyint(8) NOT NULL DEFAULT '0' COMMENT '角色类型, 0:普通用户 1:研发 2:运维',
|
`role` tinyint(8) NOT NULL DEFAULT '0' COMMENT '角色类型, 0:普通用户 1:研发 2:运维',
|
||||||
|
`department` varchar(256) DEFAULT '' COMMENT '部门名',
|
||||||
|
`display_name` varchar(256) DEFAULT '' COMMENT '用户姓名',
|
||||||
|
`mail` varchar(256) DEFAULT '' COMMENT '邮箱',
|
||||||
`status` int(16) NOT NULL DEFAULT '0' COMMENT '0标识使用中,-1标识已废弃',
|
`status` int(16) NOT NULL DEFAULT '0' COMMENT '0标识使用中,-1标识已废弃',
|
||||||
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||||
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||||
@@ -210,11 +213,11 @@ CREATE TABLE `gateway_config` (
|
|||||||
PRIMARY KEY (`id`),
|
PRIMARY KEY (`id`),
|
||||||
UNIQUE KEY `uniq_type_name` (`type`,`name`)
|
UNIQUE KEY `uniq_type_name` (`type`,`name`)
|
||||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='gateway配置';
|
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='gateway配置';
|
||||||
INSERT INTO gateway_config(type, name, value, `version`) values('SERVICE_DISCOVERY_QUEUE_SIZE', 'SERVICE_DISCOVERY_QUEUE_SIZE', 100000000, 1);
|
INSERT INTO gateway_config(type, name, value, `version`, `description`) values('SD_QUEUE_SIZE', 'SD_QUEUE_SIZE', 100000000, 1, '任意集群队列大小');
|
||||||
INSERT INTO gateway_config(type, name, value, `version`) values('SERVICE_DISCOVERY_APPID_RATE', 'SERVICE_DISCOVERY_APPID_RATE', 100000000, 1);
|
INSERT INTO gateway_config(type, name, value, `version`, `description`) values('SD_APP_RATE', 'SD_APP_RATE', 100000000, 1, '任意一个App限速');
|
||||||
INSERT INTO gateway_config(type, name, value, `version`) values('SERVICE_DISCOVERY_IP_RATE', 'SERVICE_DISCOVERY_IP_RATE', 100000000, 1);
|
INSERT INTO gateway_config(type, name, value, `version`, `description`) values('SD_IP_RATE', 'SD_IP_RATE', 100000000, 1, '任意一个IP限速');
|
||||||
INSERT INTO gateway_config(type, name, value, `version`) values('SERVICE_DISCOVERY_SP_RATE', 'app_01234567', 100000000, 1);
|
INSERT INTO gateway_config(type, name, value, `version`, `description`) values('SD_SP_RATE', 'app_01234567', 100000000, 1, '指定App限速');
|
||||||
INSERT INTO gateway_config(type, name, value, `version`) values('SERVICE_DISCOVERY_SP_RATE', '192.168.0.1', 100000000, 1);
|
INSERT INTO gateway_config(type, name, value, `version`, `description`) values('SD_SP_RATE', '192.168.0.1', 100000000, 1, '指定IP限速');
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Table structure for table `heartbeat`
|
-- Table structure for table `heartbeat`
|
||||||
215
distribution/conf/logback-spring.xml
Normal file
@@ -0,0 +1,215 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<configuration scan="true" scanPeriod="10 seconds">
|
||||||
|
<contextName>logback</contextName>
|
||||||
|
<property name="log.path" value="./logs" />
|
||||||
|
|
||||||
|
<!-- 彩色日志 -->
|
||||||
|
<!-- 彩色日志依赖的渲染类 -->
|
||||||
|
<conversionRule conversionWord="clr" converterClass="org.springframework.boot.logging.logback.ColorConverter" />
|
||||||
|
<conversionRule conversionWord="wex" converterClass="org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter" />
|
||||||
|
<conversionRule conversionWord="wEx" converterClass="org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter" />
|
||||||
|
<!-- 彩色日志格式 -->
|
||||||
|
<property name="CONSOLE_LOG_PATTERN" value="${CONSOLE_LOG_PATTERN:-%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>
|
||||||
|
|
||||||
|
<!--输出到控制台-->
|
||||||
|
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
||||||
|
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||||
|
<level>info</level>
|
||||||
|
</filter>
|
||||||
|
<encoder>
|
||||||
|
<Pattern>${CONSOLE_LOG_PATTERN}</Pattern>
|
||||||
|
<charset>UTF-8</charset>
|
||||||
|
</encoder>
|
||||||
|
</appender>
|
||||||
|
|
||||||
|
|
||||||
|
<!--输出到文件-->
|
||||||
|
|
||||||
|
<!-- 时间滚动输出 level为 DEBUG 日志 -->
|
||||||
|
<appender name="DEBUG_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||||
|
<file>${log.path}/log_debug.log</file>
|
||||||
|
<!--日志文件输出格式-->
|
||||||
|
<encoder>
|
||||||
|
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
|
||||||
|
<charset>UTF-8</charset> <!-- 设置字符集 -->
|
||||||
|
</encoder>
|
||||||
|
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
|
||||||
|
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
|
||||||
|
<!-- 日志归档 -->
|
||||||
|
<fileNamePattern>${log.path}/log_debug_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
|
||||||
|
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
|
||||||
|
<maxFileSize>100MB</maxFileSize>
|
||||||
|
</timeBasedFileNamingAndTriggeringPolicy>
|
||||||
|
<!--日志文件保留天数-->
|
||||||
|
<maxHistory>7</maxHistory>
|
||||||
|
</rollingPolicy>
|
||||||
|
<!-- 此日志文件只记录debug级别的 -->
|
||||||
|
<filter class="ch.qos.logback.classic.filter.LevelFilter">
|
||||||
|
<level>debug</level>
|
||||||
|
<onMatch>ACCEPT</onMatch>
|
||||||
|
<onMismatch>DENY</onMismatch>
|
||||||
|
</filter>
|
||||||
|
</appender>
|
||||||
|
|
||||||
|
<!-- 时间滚动输出 level为 INFO 日志 -->
|
||||||
|
<appender name="INFO_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||||
|
<!-- 正在记录的日志文件的路径及文件名 -->
|
||||||
|
<file>${log.path}/log_info.log</file>
|
||||||
|
<!--日志文件输出格式-->
|
||||||
|
<encoder>
|
||||||
|
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
|
||||||
|
<charset>UTF-8</charset>
|
||||||
|
</encoder>
|
||||||
|
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
|
||||||
|
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
|
||||||
|
<!-- 每天日志归档路径以及格式 -->
|
||||||
|
<fileNamePattern>${log.path}/log_info_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
|
||||||
|
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
|
||||||
|
<maxFileSize>100MB</maxFileSize>
|
||||||
|
</timeBasedFileNamingAndTriggeringPolicy>
|
||||||
|
<!--日志文件保留天数-->
|
||||||
|
<maxHistory>7</maxHistory>
|
||||||
|
</rollingPolicy>
|
||||||
|
<!-- 此日志文件只记录info级别的 -->
|
||||||
|
<filter class="ch.qos.logback.classic.filter.LevelFilter">
|
||||||
|
<level>info</level>
|
||||||
|
<onMatch>ACCEPT</onMatch>
|
||||||
|
<onMismatch>DENY</onMismatch>
|
||||||
|
</filter>
|
||||||
|
</appender>
|
||||||
|
|
||||||
|
<!-- 时间滚动输出 level为 WARN 日志 -->
|
||||||
|
<appender name="WARN_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||||
|
<!-- 正在记录的日志文件的路径及文件名 -->
|
||||||
|
<file>${log.path}/log_warn.log</file>
|
||||||
|
<!--日志文件输出格式-->
|
||||||
|
<encoder>
|
||||||
|
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
|
||||||
|
<charset>UTF-8</charset> <!-- 此处设置字符集 -->
|
||||||
|
</encoder>
|
||||||
|
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
|
||||||
|
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
|
||||||
|
<fileNamePattern>${log.path}/log_warn_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
|
||||||
|
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
|
||||||
|
<maxFileSize>100MB</maxFileSize>
|
||||||
|
</timeBasedFileNamingAndTriggeringPolicy>
|
||||||
|
<!--日志文件保留天数-->
|
||||||
|
<maxHistory>7</maxHistory>
|
||||||
|
</rollingPolicy>
|
||||||
|
<!-- 此日志文件只记录warn级别的 -->
|
||||||
|
<filter class="ch.qos.logback.classic.filter.LevelFilter">
|
||||||
|
<level>warn</level>
|
||||||
|
<onMatch>ACCEPT</onMatch>
|
||||||
|
<onMismatch>DENY</onMismatch>
|
||||||
|
</filter>
|
||||||
|
</appender>
|
||||||
|
|
||||||
|
|
||||||
|
<!-- 时间滚动输出 level为 ERROR 日志 -->
|
||||||
|
<appender name="ERROR_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||||
|
<!-- 正在记录的日志文件的路径及文件名 -->
|
||||||
|
<file>${log.path}/log_error.log</file>
|
||||||
|
<!--日志文件输出格式-->
|
||||||
|
<encoder>
|
||||||
|
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
|
||||||
|
<charset>UTF-8</charset> <!-- 此处设置字符集 -->
|
||||||
|
</encoder>
|
||||||
|
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
|
||||||
|
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
|
||||||
|
<fileNamePattern>${log.path}/log_error_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
|
||||||
|
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
|
||||||
|
<maxFileSize>100MB</maxFileSize>
|
||||||
|
</timeBasedFileNamingAndTriggeringPolicy>
|
||||||
|
<!--日志文件保留天数-->
|
||||||
|
<maxHistory>7</maxHistory>
|
||||||
|
</rollingPolicy>
|
||||||
|
<!-- 此日志文件只记录ERROR级别的 -->
|
||||||
|
<filter class="ch.qos.logback.classic.filter.LevelFilter">
|
||||||
|
<level>ERROR</level>
|
||||||
|
<onMatch>ACCEPT</onMatch>
|
||||||
|
<onMismatch>DENY</onMismatch>
|
||||||
|
</filter>
|
||||||
|
</appender>
|
||||||
|
|
||||||
|
<!-- Metrics信息收集日志 -->
|
||||||
|
<appender name="COLLECTOR_METRICS_LOGGER" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||||
|
<file>${log.path}/metrics/collector_metrics.log</file>
|
||||||
|
<encoder>
|
||||||
|
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
|
||||||
|
<charset>UTF-8</charset>
|
||||||
|
</encoder>
|
||||||
|
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
|
||||||
|
<fileNamePattern>${log.path}/metrics/collector_metrics_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
|
||||||
|
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
|
||||||
|
<maxFileSize>100MB</maxFileSize>
|
||||||
|
</timeBasedFileNamingAndTriggeringPolicy>
|
||||||
|
<maxHistory>3</maxHistory>
|
||||||
|
</rollingPolicy>
|
||||||
|
</appender>
|
||||||
|
|
||||||
|
<!-- Metrics信息收集日志 -->
|
||||||
|
<appender name="API_METRICS_LOGGER" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||||
|
<file>${log.path}/metrics/api_metrics.log</file>
|
||||||
|
<encoder>
|
||||||
|
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
|
||||||
|
<charset>UTF-8</charset>
|
||||||
|
</encoder>
|
||||||
|
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
|
||||||
|
<fileNamePattern>${log.path}/metrics/api_metrics_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
|
||||||
|
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
|
||||||
|
<maxFileSize>100MB</maxFileSize>
|
||||||
|
</timeBasedFileNamingAndTriggeringPolicy>
|
||||||
|
<maxHistory>3</maxHistory>
|
||||||
|
</rollingPolicy>
|
||||||
|
</appender>
|
||||||
|
|
||||||
|
<!-- Metrics信息收集日志 -->
|
||||||
|
<appender name="SCHEDULED_TASK_LOGGER" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||||
|
<file>${log.path}/metrics/scheduled_tasks.log</file>
|
||||||
|
<encoder>
|
||||||
|
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
|
||||||
|
<charset>UTF-8</charset>
|
||||||
|
</encoder>
|
||||||
|
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
|
||||||
|
<fileNamePattern>${log.path}/metrics/scheduled_tasks_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
|
||||||
|
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
|
||||||
|
<maxFileSize>100MB</maxFileSize>
|
||||||
|
</timeBasedFileNamingAndTriggeringPolicy>
|
||||||
|
<maxHistory>5</maxHistory>
|
||||||
|
</rollingPolicy>
|
||||||
|
</appender>
|
||||||
|
|
||||||
|
<logger name="COLLECTOR_METRICS_LOGGER" level="DEBUG" additivity="false">
|
||||||
|
<appender-ref ref="COLLECTOR_METRICS_LOGGER"/>
|
||||||
|
</logger>
|
||||||
|
<logger name="API_METRICS_LOGGER" level="DEBUG" additivity="false">
|
||||||
|
<appender-ref ref="API_METRICS_LOGGER"/>
|
||||||
|
</logger>
|
||||||
|
<logger name="SCHEDULED_TASK_LOGGER" level="DEBUG" additivity="false">
|
||||||
|
<appender-ref ref="SCHEDULED_TASK_LOGGER"/>
|
||||||
|
</logger>
|
||||||
|
|
||||||
|
<logger name="org.apache.ibatis" level="INFO" additivity="false" />
|
||||||
|
<logger name="org.mybatis.spring" level="INFO" additivity="false" />
|
||||||
|
<logger name="com.github.miemiedev.mybatis.paginator" level="INFO" additivity="false" />
|
||||||
|
|
||||||
|
<root level="info">
|
||||||
|
<appender-ref ref="CONSOLE" />
|
||||||
|
<appender-ref ref="DEBUG_FILE" />
|
||||||
|
<appender-ref ref="INFO_FILE" />
|
||||||
|
<appender-ref ref="WARN_FILE" />
|
||||||
|
<appender-ref ref="ERROR_FILE" />
|
||||||
|
<!--<appender-ref ref="METRICS_LOG" />-->
|
||||||
|
</root>
|
||||||
|
|
||||||
|
<!--生产环境:输出到文件-->
|
||||||
|
<!--<springProfile name="pro">-->
|
||||||
|
<!--<root level="info">-->
|
||||||
|
<!--<appender-ref ref="CONSOLE" />-->
|
||||||
|
<!--<appender-ref ref="DEBUG_FILE" />-->
|
||||||
|
<!--<appender-ref ref="INFO_FILE" />-->
|
||||||
|
<!--<appender-ref ref="ERROR_FILE" />-->
|
||||||
|
<!--<appender-ref ref="WARN_FILE" />-->
|
||||||
|
<!--</root>-->
|
||||||
|
<!--</springProfile>-->
|
||||||
|
</configuration>
|
||||||
64
distribution/pom.xml
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
|
||||||
|
<parent>
|
||||||
|
<artifactId>kafka-manager</artifactId>
|
||||||
|
<groupId>com.xiaojukeji.kafka</groupId>
|
||||||
|
<version>${kafka-manager.revision}</version>
|
||||||
|
</parent>
|
||||||
|
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<artifactId>distribution</artifactId>
|
||||||
|
<name>distribution</name>
|
||||||
|
<packaging>pom</packaging>
|
||||||
|
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>${project.groupId}</groupId>
|
||||||
|
<artifactId>kafka-manager-web</artifactId>
|
||||||
|
<version>${kafka-manager.revision}</version>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
|
||||||
|
<profiles>
|
||||||
|
|
||||||
|
<profile>
|
||||||
|
<id>release-kafka-manager</id>
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>${project.groupId}</groupId>
|
||||||
|
<artifactId>kafka-manager-web</artifactId>
|
||||||
|
<version>${kafka-manager.revision}</version>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-assembly-plugin</artifactId>
|
||||||
|
<configuration>
|
||||||
|
<descriptors>
|
||||||
|
<descriptor>release-km.xml</descriptor>
|
||||||
|
</descriptors>
|
||||||
|
<tarLongFileMode>posix</tarLongFileMode>
|
||||||
|
</configuration>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>make-assembly</id>
|
||||||
|
<phase>install</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>single</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
<finalName>kafka-manager</finalName>
|
||||||
|
</build>
|
||||||
|
</profile>
|
||||||
|
</profiles>
|
||||||
|
</project>
|
||||||
22
distribution/readme.md
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
## 说明
|
||||||
|
|
||||||
|
### 1.创建mysql数据库文件
|
||||||
|
> conf/create_mysql_table.sql
|
||||||
|
|
||||||
|
### 2. 修改配置文件
|
||||||
|
> conf/application.yml.example
|
||||||
|
> 请将application.yml.example 复制一份改名为application.yml;
|
||||||
|
> 并放在同级目录下(conf/); 并修改成自己的配置
|
||||||
|
> 这里的优先级比jar包内配置文件的默认值高;
|
||||||
|
>
|
||||||
|
|
||||||
|
### 3.启动/关闭kafka-manager
|
||||||
|
> sh bin/startup.sh 启动
|
||||||
|
>
|
||||||
|
> sh shutdown.sh 关闭
|
||||||
|
>
|
||||||
|
|
||||||
|
|
||||||
|
### 4.升级jar包
|
||||||
|
> 如果是升级, 可以看看文件 `upgrade_config.md` 的配置变更历史;
|
||||||
|
>
|
||||||
51
distribution/release-km.xml
Executable file
@@ -0,0 +1,51 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
|
||||||
|
<assembly>
|
||||||
|
<id>${project.version}</id>
|
||||||
|
<includeBaseDirectory>true</includeBaseDirectory>
|
||||||
|
<formats>
|
||||||
|
<format>dir</format>
|
||||||
|
<format>tar.gz</format>
|
||||||
|
<format>zip</format>
|
||||||
|
</formats>
|
||||||
|
<fileSets>
|
||||||
|
<fileSet>
|
||||||
|
<includes>
|
||||||
|
<include>conf/**</include>
|
||||||
|
</includes>
|
||||||
|
</fileSet>
|
||||||
|
|
||||||
|
<fileSet>
|
||||||
|
<includes>
|
||||||
|
<include>bin/*</include>
|
||||||
|
</includes>
|
||||||
|
<fileMode>0755</fileMode>
|
||||||
|
</fileSet>
|
||||||
|
</fileSets>
|
||||||
|
<files>
|
||||||
|
|
||||||
|
|
||||||
|
<file>
|
||||||
|
<source>readme.md</source>
|
||||||
|
<destName>readme.md</destName>
|
||||||
|
</file>
|
||||||
|
<file>
|
||||||
|
<source>upgrade_config.md</source>
|
||||||
|
<destName>upgrade_config.md</destName>
|
||||||
|
</file>
|
||||||
|
<file>
|
||||||
|
<!--打好的jar包名称和放置目录-->
|
||||||
|
<source>../kafka-manager-web/target/kafka-manager.jar</source>
|
||||||
|
<outputDirectory>target/</outputDirectory>
|
||||||
|
</file>
|
||||||
|
</files>
|
||||||
|
|
||||||
|
<moduleSets>
|
||||||
|
<moduleSet>
|
||||||
|
<useAllReactorProjects>true</useAllReactorProjects>
|
||||||
|
<includes>
|
||||||
|
<include>com.xiaojukeji.kafka:kafka-manager-web</include>
|
||||||
|
</includes>
|
||||||
|
</moduleSet>
|
||||||
|
</moduleSets>
|
||||||
|
</assembly>
|
||||||
@@ -1,13 +1,17 @@
|
|||||||
|
|
||||||
---
|
## 版本升级配置变更
|
||||||
|
> 本文件 从 V2.2.0 开始记录; 如果配置有变更则会填写到下文中; 如果没有,则表示无变更;
|
||||||
|
> 当您从一个很低的版本升级时候,应该依次执行中间有过变更的sql脚本
|
||||||
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
# 升级至`2.2.0`版本
|
### 1.升级至`V2.2.0`版本
|
||||||
|
|
||||||
|
#### 1.mysql变更
|
||||||
|
|
||||||
`2.2.0`版本在`cluster`表及`logical_cluster`各增加了一个字段,因此需要执行下面的sql进行字段的增加。
|
`2.2.0`版本在`cluster`表及`logical_cluster`各增加了一个字段,因此需要执行下面的sql进行字段的增加。
|
||||||
|
|
||||||
@@ -25,3 +29,24 @@ UPDATE `logical_cluster` SET `identification`=`name` WHERE id>=0;
|
|||||||
ALTER TABLE `logical_cluster` ADD INDEX `uniq_identification` (`identification` ASC);
|
ALTER TABLE `logical_cluster` ADD INDEX `uniq_identification` (`identification` ASC);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### 升级至`2.3.0`版本
|
||||||
|
|
||||||
|
#### 1.mysql变更
|
||||||
|
`2.3.0`版本在`gateway_config`表增加了一个描述说明的字段,因此需要执行下面的sql进行字段的增加。
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ALTER TABLE `gateway_config`
|
||||||
|
ADD COLUMN `description` TEXT NULL COMMENT '描述信息' AFTER `version`;
|
||||||
|
```
|
||||||
|
|
||||||
|
### 升级至`2.6.0`版本
|
||||||
|
|
||||||
|
#### 1.mysql变更
|
||||||
|
`2.6.0`版本在`account`表增加用户姓名,部门名,邮箱三个字段,因此需要执行下面的sql进行字段的增加。
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ALTER TABLE `account`
|
||||||
|
ADD COLUMN `display_name` VARCHAR(256) NOT NULL DEFAULT '' COMMENT '用户名' AFTER `role`,
|
||||||
|
ADD COLUMN `department` VARCHAR(256) NOT NULL DEFAULT '' COMMENT '部门名' AFTER `display_name`,
|
||||||
|
ADD COLUMN `mail` VARCHAR(256) NOT NULL DEFAULT '' COMMENT '邮箱' AFTER `department`;
|
||||||
|
```
|
||||||
|
Before Width: | Height: | Size: 20 KiB |
|
After Width: | Height: | Size: 785 KiB |
|
After Width: | Height: | Size: 2.5 MiB |
BIN
docs/dev_guide/assets/kcm/kcm_principle.png
Normal file
|
After Width: | Height: | Size: 69 KiB |
89
docs/dev_guide/drawio/KCM实现原理.drawio
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
<mxfile host="65bd71144e">
|
||||||
|
<diagram id="bhaMuW99Q1BzDTtcfRXp" name="Page-1">
|
||||||
|
<mxGraphModel dx="1138" dy="830" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="1169" pageHeight="827" math="0" shadow="0">
|
||||||
|
<root>
|
||||||
|
<mxCell id="0"/>
|
||||||
|
<mxCell id="1" parent="0"/>
|
||||||
|
<mxCell id="11" value="待部署Kafka-Broker的机器" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=1;labelPosition=center;verticalLabelPosition=bottom;align=center;verticalAlign=top;dashed=1;" vertex="1" parent="1">
|
||||||
|
<mxGeometry x="380" y="240" width="320" height="240" as="geometry"/>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="24" value="" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=1;labelPosition=center;verticalLabelPosition=bottom;align=center;verticalAlign=top;dashed=1;fillColor=#eeeeee;strokeColor=#36393d;" vertex="1" parent="1">
|
||||||
|
<mxGeometry x="410" y="310" width="260" height="160" as="geometry"/>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="6" style="edgeStyle=none;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="2" target="3">
|
||||||
|
<mxGeometry relative="1" as="geometry"/>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="7" value="调用夜莺接口,<br>创建集群安装部署任务" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="6">
|
||||||
|
<mxGeometry x="-0.0875" y="1" relative="1" as="geometry">
|
||||||
|
<mxPoint x="9" y="1" as="offset"/>
|
||||||
|
</mxGeometry>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="9" style="edgeStyle=none;html=1;" edge="1" parent="1" source="2" target="4">
|
||||||
|
<mxGeometry relative="1" as="geometry"/>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="10" value="通过版本管理,将Kafka的安装包,<br>server配置上传到s3中" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="9">
|
||||||
|
<mxGeometry x="0.0125" y="2" relative="1" as="geometry">
|
||||||
|
<mxPoint as="offset"/>
|
||||||
|
</mxGeometry>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="2" value="LogiKM" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" vertex="1" parent="1">
|
||||||
|
<mxGeometry x="40" y="100" width="120" height="40" as="geometry"/>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="12" style="edgeStyle=none;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="3" target="5">
|
||||||
|
<mxGeometry relative="1" as="geometry"/>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="13" value="1、下发任务脚本(kcm_script.sh);<br>2、下发任务操作命令;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="12">
|
||||||
|
<mxGeometry x="-0.0731" y="2" relative="1" as="geometry">
|
||||||
|
<mxPoint x="-2" y="-16" as="offset"/>
|
||||||
|
</mxGeometry>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="3" value="夜莺——任务中心" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=1;fillColor=#cdeb8b;strokeColor=#36393d;" vertex="1" parent="1">
|
||||||
|
<mxGeometry x="480" y="100" width="120" height="40" as="geometry"/>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="4" value="S3" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=1;fillColor=#ffe6cc;strokeColor=#d79b00;" vertex="1" parent="1">
|
||||||
|
<mxGeometry x="40" y="310" width="120" height="40" as="geometry"/>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="5" value="夜莺——Agent(<font color="#ff3333">代理执行kcm_script.sh脚本</font>)" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=1;fillColor=#d5e8d4;strokeColor=#82b366;" vertex="1" parent="1">
|
||||||
|
<mxGeometry x="400" y="260" width="280" height="40" as="geometry"/>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="22" style="edgeStyle=orthogonalEdgeStyle;html=1;entryX=1;entryY=0.5;entryDx=0;entryDy=0;fontColor=#FF3333;exitX=0;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="14" target="4">
|
||||||
|
<mxGeometry relative="1" as="geometry"/>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="25" value="下载安装包" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontColor=#000000;" vertex="1" connectable="0" parent="22">
|
||||||
|
<mxGeometry x="0.2226" y="-2" relative="1" as="geometry">
|
||||||
|
<mxPoint x="27" y="2" as="offset"/>
|
||||||
|
</mxGeometry>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="14" value="执行kcm_script.sh脚本:下载安装包" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=1;fillColor=#eeeeee;strokeColor=#36393d;" vertex="1" parent="1">
|
||||||
|
<mxGeometry x="425" y="320" width="235" height="20" as="geometry"/>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="18" value="执行kcm_script.sh脚本:安装" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=1;fillColor=#eeeeee;strokeColor=#36393d;" vertex="1" parent="1">
|
||||||
|
<mxGeometry x="425" y="350" width="235" height="20" as="geometry"/>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="19" value="执行kcm_script.sh脚本:检查安装结果" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=1;fillColor=#eeeeee;strokeColor=#36393d;" vertex="1" parent="1">
|
||||||
|
<mxGeometry x="425" y="380" width="235" height="20" as="geometry"/>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="23" style="edgeStyle=orthogonalEdgeStyle;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;fontColor=#FF3333;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="20" target="2">
|
||||||
|
<mxGeometry relative="1" as="geometry">
|
||||||
|
<Array as="points">
|
||||||
|
<mxPoint x="770" y="420"/>
|
||||||
|
<mxPoint x="770" y="40"/>
|
||||||
|
<mxPoint x="100" y="40"/>
|
||||||
|
</Array>
|
||||||
|
</mxGeometry>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="26" value="检查副本同步状态" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontColor=#000000;" vertex="1" connectable="0" parent="23">
|
||||||
|
<mxGeometry x="-0.3344" relative="1" as="geometry">
|
||||||
|
<mxPoint as="offset"/>
|
||||||
|
</mxGeometry>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="20" value="执行kcm_script.sh脚本:检查副本同步" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=1;fillColor=#eeeeee;strokeColor=#36393d;" vertex="1" parent="1">
|
||||||
|
<mxGeometry x="425" y="410" width="235" height="20" as="geometry"/>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="21" value="执行kcm_script.sh脚本:结束" style="rounded=0;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=1;fillColor=#eeeeee;strokeColor=#36393d;" vertex="1" parent="1">
|
||||||
|
<mxGeometry x="425" y="440" width="235" height="20" as="geometry"/>
|
||||||
|
</mxCell>
|
||||||
|
</root>
|
||||||
|
</mxGraphModel>
|
||||||
|
</diagram>
|
||||||
|
</mxfile>
|
||||||
@@ -14,6 +14,8 @@
|
|||||||
- 1、Topic定时同步任务
|
- 1、Topic定时同步任务
|
||||||
- 2、专家服务——Topic分区热点
|
- 2、专家服务——Topic分区热点
|
||||||
- 3、专家服务——Topic分区不足
|
- 3、专家服务——Topic分区不足
|
||||||
|
- 4、专家服务——Topic资源治理
|
||||||
|
- 5、账单配置
|
||||||
|
|
||||||
|
|
||||||
## 1、Topic定时同步任务
|
## 1、Topic定时同步任务
|
||||||
@@ -119,4 +121,49 @@ TOPIC_INSUFFICIENT_PARTITION_CONFIG
|
|||||||
50
|
50
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
## 4、专家服务——Topic资源治理
|
||||||
|
|
||||||
|
首先,我们认为在一定的时间长度内,Topic的分区offset没有任何变化的Topic,即没有数据写入的Topic,为过期的Topic。
|
||||||
|
|
||||||
|
Topic分区不足相关的动态配置(页面在运维管控->平台管理->配置管理):
|
||||||
|
|
||||||
|
配置Key:
|
||||||
|
```
|
||||||
|
EXPIRED_TOPIC_CONFIG
|
||||||
|
```
|
||||||
|
|
||||||
|
配置Value:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"minExpiredDay": 30, #过期时间大于此值才显示,
|
||||||
|
"filterRegex": ".*XXX\\s+", #忽略符合此正则规则的Topic
|
||||||
|
"ignoreClusterIdList": [ # 忽略的集群
|
||||||
|
50
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 5、账单配置
|
||||||
|
|
||||||
|
Logi-KafkaManager除了作为Kafka运维管控平台之外,实际上还会有一些资源定价相关的功能。
|
||||||
|
|
||||||
|
当前定价方式:当月Topic的maxAvgDay天的峰值的均值流量作为Topic的使用额度。使用的额度 * 单价 * 溢价(预留buffer) 就等于当月的费用。
|
||||||
|
详细的计算逻辑见:com.xiaojukeji.kafka.manager.task.dispatch.biz.CalKafkaTopicBill; 和 com.xiaojukeji.kafka.manager.task.dispatch.biz.CalTopicStatistics;
|
||||||
|
|
||||||
|
这块在计算Topic的费用的配置如下所示:
|
||||||
|
|
||||||
|
配置Key:
|
||||||
|
```
|
||||||
|
KAFKA_TOPIC_BILL_CONFIG
|
||||||
|
```
|
||||||
|
|
||||||
|
配置Value:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"maxAvgDay": 10, # 使用额度的计算规则
|
||||||
|
"quotaRatio": 1.5, # 溢价率
|
||||||
|
"priseUnitMB": 100 # 单价,即单MB/s流量多少钱
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|||||||
@@ -1,17 +0,0 @@
|
|||||||
|
|
||||||
---
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
# 升级至`2.3.0`版本
|
|
||||||
|
|
||||||
`2.3.0`版本在`gateway_config`表增加了一个描述说明的字段,因此需要执行下面的sql进行字段的增加。
|
|
||||||
|
|
||||||
```sql
|
|
||||||
ALTER TABLE `gateway_config`
|
|
||||||
ADD COLUMN `description` TEXT NULL COMMENT '描述信息' AFTER `version`;
|
|
||||||
```
|
|
||||||
39
docs/dev_guide/周期任务说明文档.md
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
| 定时任务名称或方法名 | 所在类 | 详细说明 | cron | cron说明 | 线程数量 |
|
||||||
|
| -------------------------------------- | -------------------------------------- | ------------------------------------------ | --------------- | --------------------------------------- | -------- |
|
||||||
|
| calKafkaBill | CalKafkaTopicBill | 计算Kafka使用账单 | 0 0 1 * * ? | 每天凌晨1点执行一次 | 1 |
|
||||||
|
| calRegionCapacity | CalRegionCapacity | 计算Region容量 | 0 0 0/12 * * ? | 每隔12小时执行一次,在0分钟0秒时触发 | 1 |
|
||||||
|
| calTopicStatistics | CalTopicStatistics | 定时计算Topic统计数据 | 0 0 0/4 * * ? | 每隔4小时执行一次,在0分钟0秒时触发 | 5 |
|
||||||
|
| flushBrokerTable | FlushBrokerTable | 定时刷新BrokerTable数据 | 0 0 0/1 * * ? | 每隔1小时执行一次,在0分钟0秒时触发 | 1 |
|
||||||
|
| flushExpiredTopic | FlushExpiredTopic | 定期更新过期Topic | 0 0 0/5 * * ? | 每隔5小时执行一次,在0分钟0秒时触发 | 1 |
|
||||||
|
| syncClusterTaskState | SyncClusterTaskState | 同步更新集群任务状态 | 0 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的0秒时触发 | 1 |
|
||||||
|
| newCollectAndPublishCGData | CollectAndPublishCGData | 收集并发布消费者指标数据 | 30 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的30秒时触发 | 10 |
|
||||||
|
| collectAndPublishCommunityTopicMetrics | CollectAndPublishCommunityTopicMetrics | Topic社区指标收集 | 31 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的30秒时触发 | 5 |
|
||||||
|
| collectAndPublishTopicThrottledMetrics | CollectAndPublishTopicThrottledMetrics | 收集和发布Topic限流信息 | 11 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的11秒时触发 | 5 |
|
||||||
|
| deleteMetrics | DeleteMetrics | 定期删除Metrics信息 | 0 0/2 * * * ? | 每隔2分钟执行一次,在每分钟的0秒时触发 | 1 |
|
||||||
|
| storeDiDiAppTopicMetrics | StoreDiDiAppTopicMetrics | JMX中获取appId维度的流量信息存DB | 41 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的41秒时触发 | 5 |
|
||||||
|
| storeDiDiTopicRequestTimeMetrics | StoreDiDiTopicRequestTimeMetrics | JMX中获取的TopicRequestTimeMetrics信息存DB | 51 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的51秒时触发 | 5 |
|
||||||
|
| autoHandleTopicOrder | AutoHandleTopicOrder | 定时自动处理Topic相关工单 | 0 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的0秒时触发 | 1 |
|
||||||
|
| automatedHandleOrder | AutomatedHandleOrder | 工单自动化审批 | 0 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的0秒时触发 | 1 |
|
||||||
|
| flushReassignment | FlushReassignment | 定时处理分区迁移任务 | 0 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的0秒时触发 | 1 |
|
||||||
|
| syncTopic2DB | SyncTopic2DB | 定期将未落盘的Topic刷新到DB中 | 0 0/2 * * * ? | 每隔2分钟执行一次,在每分钟的0秒时触发 | 1 |
|
||||||
|
| sinkCommunityTopicMetrics2Monitor | SinkCommunityTopicMetrics2Monitor | 定时上报Topic监控指标 | 1 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的1秒时触发 | 5 |
|
||||||
|
| flush方法 | LogicalClusterMetadataManager | 定时刷新逻辑集群元数据到缓存中 | 0/30 * * * * ? | 每隔30秒执行一次 | 1 |
|
||||||
|
| flush方法 | AccountServiceImpl | 定时刷新account信息到缓存中 | 0/5 * * * * ? | 每隔5秒执行一次 | 1 |
|
||||||
|
| ipFlush方法 | HeartBeat | 定时获取管控平台所在机器IP等信息到DB | 0/10 * * * * ? | 每隔10秒执行一次 | 1 |
|
||||||
|
| flushTopicMetrics方法 | FlushTopicMetrics | 定时刷新topic指标到缓存中 | 5 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的5秒时触发 | 1 |
|
||||||
|
| schedule方法 | FlushBKConsumerGroupMetadata | 定时刷新broker上消费组信息到缓存中 | 15 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的15秒时触发 | 1 |
|
||||||
|
| flush方法 | FlushClusterMetadata | 定时刷新物理集群元信息到缓存中 | 0/30 * * * * ? | 每隔30秒执行一次 | 1 |
|
||||||
|
| flush方法 | FlushTopicProperties | 定时刷新物理集群配置到缓存中 | 25 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的25秒时触发 | 1 |
|
||||||
|
| schedule方法 | FlushZKConsumerGroupMetadata | 定时刷新zk上的消费组信息到缓存中 | 35 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的35秒时触发 | 1 |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
89
docs/dev_guide/如何使用集群安装部署功能.md
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# 如何使用集群安装部署功能?
|
||||||
|
|
||||||
|
[TOC]
|
||||||
|
|
||||||
|
## 1、实现原理
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
- LogiKM上传安装包到S3服务;
|
||||||
|
- LogiKM调用夜莺-Job服务接口,创建执行[kcm_script.sh](https://github.com/didi/LogiKM/blob/master/kafka-manager-extends/kafka-manager-kcm/src/main/resources/kcm_script.sh)脚本的任务,kcm_script.sh脚本是安装部署Kafka集群的脚本;
|
||||||
|
- 夜莺将任务脚本下发到具体的机器上,通过夜莺Agent执行该脚本;
|
||||||
|
- kcm_script.sh脚本会进行Kafka-Broker的安装部署;
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2、使用方式
|
||||||
|
|
||||||
|
### 2.1、第一步:修改配置
|
||||||
|
|
||||||
|
**配置application.yml文件**
|
||||||
|
```yaml
|
||||||
|
#
|
||||||
|
kcm:
|
||||||
|
enabled: false # 是否开启,将其修改为true
|
||||||
|
s3: # s3 存储服务
|
||||||
|
endpoint: s3.didiyunapi.com
|
||||||
|
access-key: 1234567890
|
||||||
|
secret-key: 0987654321
|
||||||
|
bucket: logi-kafka
|
||||||
|
n9e: # 夜莺
|
||||||
|
base-url: http://127.0.0.1:8004 # 夜莺job服务地址
|
||||||
|
user-token: 12345678 # 用户的token
|
||||||
|
timeout: 300 # 单台操作的超时时间
|
||||||
|
account: root # 操作时使用的账号
|
||||||
|
script-file: kcm_script.sh # 脚本,已内置好,在源码的kcm模块内,此处配置无需修改
|
||||||
|
logikm-url: http://127.0.0.1:8080 # logikm部署地址,部署时kcm_script.sh会调用logikm检查部署中的一些状态,这里只需要填写 http://IP:PORT 就可以了
|
||||||
|
|
||||||
|
|
||||||
|
account:
|
||||||
|
jump-login:
|
||||||
|
gateway-api: false # 网关接口
|
||||||
|
third-part-api: false # 第三方接口,将其修改为true,即允许未登录情况下调用开放的第三方接口
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2.2、第二步:检查服务
|
||||||
|
|
||||||
|
**检查s3服务**
|
||||||
|
- 测试 "运维管控-》集群运维-》版本管理" 页面的上传,查看等功能是否都正常。如果存在不正常,则需要查看s3的配置是否正确;
|
||||||
|
- 如果都没有问题,则上传Kafka的以.tgz结尾的安装包以及server.properties文件;
|
||||||
|
|
||||||
|
**检查夜莺Job服务**
|
||||||
|
- 创建一个job任务,机器选择需要安装Kafka集群的机器,然后执行的命令是echo "Hello LogiKM",看能否被成功执行。如果不行,则需要检查夜莺的安装;
|
||||||
|
- 如果没有问题则表示夜莺和所需部署的机器之间的交互是没有问题的;
|
||||||
|
|
||||||
|
### 2.3、第三步:接入集群
|
||||||
|
|
||||||
|
在LogiKM的 “运维管控-》集群列表” 中接入需要安装部署的集群,**PS:此时是允许接入一个没有任何Broker的空的Kafka集群**,其中对的bootstrapServers配置搭建完成后的Kafka集群地址就可以了,而ZK地址必须和集群的server.properties中的ZK地址保持一致;
|
||||||
|
|
||||||
|
### 2.4、第四步:部署集群
|
||||||
|
|
||||||
|
- 打开LogiKM的 “运维管控-》集群运维-》集群任务” 页面,点击 “新建集群任务” 按钮;
|
||||||
|
- 选择集群、任务类型、包版本、server配置及填写主机列表,然后点击确认,即可在夜莺的Job服务中心中创建一个任务出来。**PS:如果创建失败,可以看一下日志我为什么创建失败**;
|
||||||
|
- 随后可以点击详情及状态对任务进行操作;
|
||||||
|
|
||||||
|
### 2.5、可能问题
|
||||||
|
|
||||||
|
#### 2.5.1、问题一:任务执行超时、失败等
|
||||||
|
|
||||||
|
进入夜莺Job服务中心,查看对应的任务的相关日志;
|
||||||
|
|
||||||
|
- 提示安装包下载失败,则需要查看对应的s3服务是否可以直接wget下载安装包,如果不可以则需要对kcm_script.sh脚本进行修改;
|
||||||
|
- 提示调用LogiKM失败,则可以使用postman手动测试一下kcm_script.sh脚本调用LogiKM的那个接口是否有问题,如果存在问题则进行相应的修改;PS:具体接口见kcm_script.sh脚本
|
||||||
|
|
||||||
|
|
||||||
|
## 3、备注说明
|
||||||
|
|
||||||
|
- 集群安装部署,仅安装部署Kafka-Broker,不安装Kafka的ZK服务;
|
||||||
|
- 安装部署中,有任何定制化的需求,例如修改安装的目录等,可以通过修改kcm_script.sh脚本实现;
|
||||||
|
- kcm_script.sh脚本位置:[kcm_script.sh](https://github.com/didi/LogiKM/blob/master/kafka-manager-extends/kafka-manager-kcm/src/main/resources/kcm_script.sh);
|
||||||
53
docs/dev_guide/如何增加上报监控系统指标.md
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# 如何增加上报监控系统指标?
|
||||||
|
|
||||||
|
## 0、前言
|
||||||
|
|
||||||
|
LogiKM是 **一站式`Apache Kafka`集群指标监控与运维管控平台** ,当前会将消费Lag,Topic流量等指标上报到监控系统中,从而方便用户在监控系统中对这些指标配置监控告警规则,进而达到监控自身客户端是否正常的目的。
|
||||||
|
|
||||||
|
那么,如果我们想增加一个新的监控指标,应该如何做呢,比如我们想监控Broker的流量,监控Broker的存活信息,监控集群Controller个数等等。
|
||||||
|
|
||||||
|
在具体介绍之前,我们大家都知道,Kafka监控相关的信息,基本都存储于Broker、Jmx以及ZK中。当前LogiKM也已经具备从这三个地方获取数据的基本能力,因此基于LogiKM我们再获取其他指标,总体上还是非常方便的。
|
||||||
|
|
||||||
|
这里我们就以已经获取到的Topic流量信息为例,看LogiKM如何实现Topic指标的获取并上报的。
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1、确定指标位置
|
||||||
|
|
||||||
|
基于对Kafka的了解,我们知道Topic流量信息这个指标是存储于Jmx中的,因此我们需要从Jmx中获取。大家如果对于自己所需要获取的指标存储在何处不太清楚的,可以加入我们维护的Kafka中文社区(README中有二维码)中今天沟通交流。
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2、指标获取
|
||||||
|
|
||||||
|
Topic流量指标的获取详细见图中说明。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3、指标上报
|
||||||
|
|
||||||
|
上一步我们已经采集到Topic流量指标了,下一步就是将该指标上报到监控系统,这块只需要按照监控系统要求的格式,将数据上报即可。
|
||||||
|
|
||||||
|
LogiKM中有一个monitor模块,具体的如下图所示:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
## 4、补充说明
|
||||||
|
|
||||||
|
监控系统对接的相关内容见:
|
||||||
|
|
||||||
|
[监控系统集成](./monitor_system_integrate_with_self.md)
|
||||||
|
|
||||||
|
[监控系统集成例子——集成夜莺](./monitor_system_integrate_with_n9e.md)
|
||||||
@@ -51,13 +51,16 @@ custom:
|
|||||||
didi:
|
didi:
|
||||||
app-topic-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭
|
app-topic-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭
|
||||||
topic-request-time-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭
|
topic-request-time-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭
|
||||||
topic-throttled-metrics: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭
|
topic-throttled-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭
|
||||||
save-days: 7 #指标在DB中保持的天数,-1表示永久保存,7表示保存近7天的数据
|
save-days: 7 #指标在DB中保持的天数,-1表示永久保存,7表示保存近7天的数据
|
||||||
|
|
||||||
# 任务相关的开关
|
# 任务相关的开关
|
||||||
task:
|
task:
|
||||||
op:
|
op:
|
||||||
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
|
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
|
||||||
|
order-auto-exec: # 工单自动化审批线程的开关
|
||||||
|
topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启
|
||||||
|
app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启
|
||||||
|
|
||||||
account: # ldap相关的配置, 社区版本暂时支持不够完善,可以先忽略,欢迎贡献代码对这块做优化
|
account: # ldap相关的配置, 社区版本暂时支持不够完善,可以先忽略,欢迎贡献代码对这块做优化
|
||||||
ldap:
|
ldap:
|
||||||
|
|||||||
@@ -31,17 +31,23 @@
|
|||||||
|
|
||||||
**2、源代码进行打包**
|
**2、源代码进行打包**
|
||||||
|
|
||||||
下载好代码之后,进入`Logi-KafkaManager`的主目录,执行`sh build.sh`命令即可,执行完成之后会在`output/kafka-manager-xxx`目录下面生成一个jar包。
|
下载好代码之后,进入`Logi-KafkaManager`的主目录,执行`mvn -Prelease-kafka-manager -Dmaven.test.skip=true clean install -U `命令即可,
|
||||||
|
执行完成之后会在`distribution/target`目录下面生成一个`kafka-manager-*.tar.gz`。
|
||||||
|
和一个`kafka-manager-*.zip` 文件,随便任意一个压缩包都可以;
|
||||||
|
当然此时同级目录有一个已经解压好的文件夹;
|
||||||
|
|
||||||
对于`windows`环境的用户,估计执行不了`sh build.sh`命令,因此可以直接执行`mvn install`,然后在`kafka-manager-web/target`目录下生成一个kafka-manager-web-xxx.jar的包。
|
|
||||||
|
|
||||||
获取到jar包之后,我们继续下面的步骤。
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 3、MySQL-DB初始化
|
## 3. 解压安装包
|
||||||
|
解压完成后; 在文件目录中可以看到有`kafka-manager/conf/create_mysql_table.sql` 有个mysql初始化文件
|
||||||
|
先初始化DB
|
||||||
|
|
||||||
执行[create_mysql_table.sql](create_mysql_table.sql)中的SQL命令,从而创建所需的MySQL库及表,默认创建的库名是`logi_kafka_manager`。
|
|
||||||
|
## 4、MySQL-DB初始化
|
||||||
|
|
||||||
|
执行[create_mysql_table.sql](../../distribution/conf/create_mysql_table.sql)中的SQL命令,从而创建所需的MySQL库及表,默认创建的库名是`logi_kafka_manager`。
|
||||||
|
|
||||||
```
|
```
|
||||||
# 示例:
|
# 示例:
|
||||||
@@ -50,15 +56,38 @@ mysql -uXXXX -pXXX -h XXX.XXX.XXX.XXX -PXXXX < ./create_mysql_table.sql
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 4、启动
|
## 5.修该配置
|
||||||
|
请将`conf/application.yml.example` 文件复制一份出来命名为`application.yml` 放在同级目录:conf/application.yml ;
|
||||||
|
并且修改配置; 当然不修改的话 就会用默认的配置;
|
||||||
|
至少 mysql配置成自己的吧
|
||||||
|
|
||||||
```
|
|
||||||
# application.yml 是配置文件,最简单的是仅修改MySQL相关的配置即可启动
|
|
||||||
|
|
||||||
nohup java -jar kafka-manager.jar --spring.config.location=./application.yml > /dev/null 2>&1 &
|
## 6、启动/关闭
|
||||||
```
|
解压包中有启动和关闭脚本
|
||||||
|
`kafka-manager/bin/shutdown.sh`
|
||||||
|
`kafka-manager/bin/startup.sh`
|
||||||
|
|
||||||
### 5、使用
|
执行 sh startup.sh 启动
|
||||||
|
执行 sh shutdown.sh 关闭
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### 6、使用
|
||||||
|
|
||||||
本地启动的话,访问`http://localhost:8080`,输入帐号及密码(默认`admin/admin`)进行登录。更多参考:[kafka-manager 用户使用手册](../user_guide/user_guide_cn.md)
|
本地启动的话,访问`http://localhost:8080`,输入帐号及密码(默认`admin/admin`)进行登录。更多参考:[kafka-manager 用户使用手册](../user_guide/user_guide_cn.md)
|
||||||
|
|
||||||
|
### 7. 升级
|
||||||
|
|
||||||
|
如果是升级版本,请查看文件 [kafka-manager 升级手册](../../distribution/upgrade_config.md)
|
||||||
|
在您下载的启动包(V2.5及其后)中也有记录,在 kafka-manager/upgrade_config.md 中
|
||||||
|
|
||||||
|
|
||||||
|
### 8. 在IDE中启动
|
||||||
|
> 如果想参与开发或者想在IDE中启动的话
|
||||||
|
> 先执行 `mvn -Dmaven.test.skip=true clean install -U `
|
||||||
|
>
|
||||||
|
> 然后这个时候可以选择去 [pom.xml](../../pom.xml) 中将`kafka-manager-console`模块注释掉;
|
||||||
|
> 注释是因为每次install的时候都会把前端文件`kafka-manager-console`重新打包进`kafka-manager-web`
|
||||||
|
>
|
||||||
|
> 完事之后,只需要直接用IDE启动运行`kafka-manager-web`模块中的
|
||||||
|
> com.xiaojukeji.kafka.manager.web.MainApplication main方法就行了
|
||||||
49
docs/user_guide/call_api_bypass_login.md
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# 登录绕过
|
||||||
|
|
||||||
|
## 背景
|
||||||
|
|
||||||
|
现在除了开放出来的第三方接口,其他接口都需要走登录认证。
|
||||||
|
|
||||||
|
但是第三方接口不多,开放出来的能力有限,但是登录的接口又需要登录,非常的麻烦。
|
||||||
|
|
||||||
|
因此,新增了一个登录绕过的功能,为一些紧急临时的需求,提供一个调用不需要登录的能力。
|
||||||
|
|
||||||
|
## 使用方式
|
||||||
|
|
||||||
|
步骤一:接口调用时,在header中,增加如下信息:
|
||||||
|
```shell
|
||||||
|
# 表示开启登录绕过
|
||||||
|
Trick-Login-Switch : on
|
||||||
|
|
||||||
|
# 登录绕过的用户, 这里可以是admin, 或者是其他的, 但是必须在运维管控->平台管理->用户管理中设置了该用户。
|
||||||
|
Trick-Login-User : admin
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
步骤二:在运维管控->平台管理->平台配置上,设置允许了该用户以绕过的方式登录
|
||||||
|
```shell
|
||||||
|
# 设置的key,必须是这个
|
||||||
|
SECURITY.TRICK_USERS
|
||||||
|
|
||||||
|
# 设置的value,是json数组的格式,例如
|
||||||
|
[ "admin", "logi"]
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
步骤三:解释说明
|
||||||
|
|
||||||
|
设置完成上面两步之后,就可以直接调用需要登录的接口了。
|
||||||
|
|
||||||
|
但是还有一点需要注意,绕过的用户仅能调用他有权限的接口,比如一个普通用户,那么他就只能调用普通的接口,不能去调用运维人员的接口。
|
||||||
|
|
||||||
@@ -20,8 +20,17 @@
|
|||||||
- 8、`topic biz data not exist`错误及处理方式
|
- 8、`topic biz data not exist`错误及处理方式
|
||||||
- 9、进程启动后,如何查看API文档
|
- 9、进程启动后,如何查看API文档
|
||||||
- 10、如何创建告警组?
|
- 10、如何创建告警组?
|
||||||
- 11、连接信息、耗时信息为什么没有数据?
|
- 11、连接信息、耗时信息、磁盘信息为什么没有数据?
|
||||||
- 12、逻辑集群申请审批通过之后为什么看不到逻辑集群?
|
- 12、逻辑集群申请审批通过之后为什么看不到逻辑集群?
|
||||||
|
- 13、heartbeat表关联业务和使用场景是什么?
|
||||||
|
- 14、集群的删除,是否会真正的删除集群?
|
||||||
|
- 15、APP(应用)如何被使用起来?
|
||||||
|
- 16、为什么下线应用提示operation forbidden?
|
||||||
|
- 17、删除Topic成功,为什么过一会儿之后又出现了?
|
||||||
|
- 18、如何在不登录的情况下,调用一些需要登录的接口?
|
||||||
|
- 19、为什么无法看到连接信息、耗时信息等指标?
|
||||||
|
- 20、AppID鉴权、生产消费配额不起作用
|
||||||
|
- 21、如何查看周期任务说明文档
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -70,7 +79,7 @@
|
|||||||
|
|
||||||
- 3、数据库时区问题。
|
- 3、数据库时区问题。
|
||||||
|
|
||||||
检查MySQL的topic_metrics表,查看是否有数据,如果有数据,那么再检查设置的时区是否正确。
|
检查MySQL的cluster表的gmt_modify字段,做一个update动作,看这个gmt_modify时间是否是当前时间,如果不是,那么就是时区问题了。时区不对问题具体可以搜索一下看如何解决。
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -113,11 +122,14 @@
|
|||||||
|
|
||||||
### 10、如何创建告警组?
|
### 10、如何创建告警组?
|
||||||
|
|
||||||
这块需要配合监控系统进行使用,现在默认已经实现了夜莺的对接,当然也可以对接自己内部的监控系统,不过需要实现一些接口。
|
告警组的创建需要到Logi-KM对接的监控系统中创建,比如我们现在默认是对接了夜莺,那么告警组需要到夜莺中创建,如果没有安装夜莺,那么需要安装一下夜莺并进行对接。当然,这里也可以对接自己内部的监控系统,不过需要实现一些接口。
|
||||||
|
|
||||||
具体的文档可见:[监控功能对接夜莺](../dev_guide/monitor_system_integrate_with_n9e.md)、[监控功能对接其他系统](../dev_guide/monitor_system_integrate_with_self.md)
|
具体的文档可见:[监控功能对接夜莺](../dev_guide/monitor_system_integrate_with_n9e.md)、[监控功能对接其他系统](../dev_guide/monitor_system_integrate_with_self.md)
|
||||||
|
|
||||||
### 11、连接信息、耗时信息为什么没有数据?
|
那么在夜莺中,如何创建告警组呢?
|
||||||
|
需要前往夜莺平台-用户资源中心-团队管理中新建团队。新建过团队之后再次回到Logi-KM中刷新页面就可以在该下拉框中选择告警接收组了。
|
||||||
|
|
||||||
|
### 11、连接信息、耗时信息、磁盘信息为什么没有数据?
|
||||||
|
|
||||||
这块需要结合滴滴内部的kafka-gateway一同使用才会有数据,滴滴kafka-gateway暂未开源。
|
这块需要结合滴滴内部的kafka-gateway一同使用才会有数据,滴滴kafka-gateway暂未开源。
|
||||||
|
|
||||||
@@ -126,3 +138,82 @@
|
|||||||
逻辑集群的申请与审批仅仅只是一个工单流程,并不会去实际创建逻辑集群,逻辑集群的创建还需要手动去创建。
|
逻辑集群的申请与审批仅仅只是一个工单流程,并不会去实际创建逻辑集群,逻辑集群的创建还需要手动去创建。
|
||||||
|
|
||||||
具体的操作可见:[kafka-manager 接入集群](add_cluster/add_cluster.md)。
|
具体的操作可见:[kafka-manager 接入集群](add_cluster/add_cluster.md)。
|
||||||
|
|
||||||
|
|
||||||
|
### 13、heartbeat表关联业务和使用场景是什么?
|
||||||
|
|
||||||
|
做任务抢占用的。
|
||||||
|
|
||||||
|
KM支持HA的方式部署,那么部署多台的时候,就会出现每一台都可能去做指标收集的事情,这块就使用heartbeat表做KM的存活性判断,然后进行任务的抢占或者是均衡。
|
||||||
|
|
||||||
|
更多详细的内容,可以看一下源码中,heartbeat表在哪里被使用了。
|
||||||
|
|
||||||
|
|
||||||
|
### 14、集群的删除,是否会真正的删除集群?
|
||||||
|
|
||||||
|
Logi-KM的运维管控,集群列表中的集群删除,仅仅只是将该集群从Logi-KM中进行删除,并不会对真正的物理集群做什么操作。
|
||||||
|
|
||||||
|
|
||||||
|
### 15、APP(应用)如何被使用起来?
|
||||||
|
|
||||||
|
app在Logi-KM中可以近似理解为租户,或者是kafka里面的一个账号的概念。
|
||||||
|
|
||||||
|
界面中显示的app信息、权限信息等,在平台层面仅仅只是控制Topic或集群在平台上的可见性,如果使用的是社区版本的Kafka,那么实际上是不能真正的管控到客户端对Topic的生产和消费。
|
||||||
|
|
||||||
|
但是如果是使用的滴滴的Kafka-Gateway,那么是可以做到对客户端的生产和消费的权限管控。滴滴的Kafka-Gateway暂未开源,属于企业服务,具体的可以入群交流,群地址在README中。
|
||||||
|
|
||||||
|
|
||||||
|
### 16、为什么下线应用提示operation forbidden?
|
||||||
|
|
||||||
|
**原因一:**
|
||||||
|
|
||||||
|
该应用还存在对Topic的权限,因此导致下线失败。具体查看的位置在"Topic管理-》应用管理-》详情",可以看到应用对哪些Topic还有权限。
|
||||||
|
|
||||||
|
只有当权限全部去除之后,才可以下线应用。
|
||||||
|
|
||||||
|
**原因二:**
|
||||||
|
|
||||||
|
使用的是2.4.0之前的旧版本,旧版本存在缓存更新的BUG,建议升级至最新的版本,或者简单粗暴的就是重启一下KM。
|
||||||
|
|
||||||
|
|
||||||
|
### 17、删除Topic成功,为什么过一会儿之后又出现了?
|
||||||
|
|
||||||
|
**原因说明:**
|
||||||
|
|
||||||
|
Logi-KM会去请求Topic的endoffset信息,要获取这个信息就需要发送metadata请求,发送metadata请求的时候,如果集群允许自动创建Topic,那么当Topic不存在时,就会自动将该Topic创建出来。
|
||||||
|
|
||||||
|
|
||||||
|
**问题解决:**
|
||||||
|
|
||||||
|
因为在Logi-KM上,禁止Kafka客户端内部元信息获取这个动作非常的难做到,因此短时间内这个问题不好从Logi-KM上解决。
|
||||||
|
|
||||||
|
当然,对于不存在的Topic,Logi-KM是不会进行元信息请求的,因此也不用担心会莫名其妙的创建一个Topic出来。
|
||||||
|
|
||||||
|
但是,另外一点,对于开启允许Topic自动创建的集群,建议是关闭该功能,开启是非常危险的,如果关闭之后,Logi-KM也不会有这个问题。
|
||||||
|
|
||||||
|
最后这里举个开启这个配置后,非常危险的代码例子吧:
|
||||||
|
|
||||||
|
```java
|
||||||
|
for (int i= 0; i < 100000; ++i) {
|
||||||
|
// 如果是客户端类似这样写的,那么一启动,那么将创建10万个Topic出来,集群元信息瞬间爆炸,controller可能就不可服务了。
|
||||||
|
producer.send(new ProducerRecord<String, String>("logi_km" + i,"hello logi_km"));
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 18、如何在不登录的情况下,调用一些需要登录的接口?
|
||||||
|
|
||||||
|
具体见:[登录绕过](./call_api_bypass_login.md)
|
||||||
|
|
||||||
|
### 19、为什么无法看到连接信息、耗时信息等指标?
|
||||||
|
连接信息、耗时信息等指标依赖于滴滴kafka-gateway和滴滴Kafka引擎,通过gateway可获取到连接到该Topic的应用情况,提高对Topic的管控能力。通过滴滴Kafka引擎的自带埋点,可获取到耗时信息,提升Topic生产消费时的可观测性。这部分内容是属于商业版的范畴,暂未开源。如有需要,可进行商业合作。
|
||||||
|
|
||||||
|
具体见:[滴滴Logi-KafkaManager开源版和商业版特性对比](../开源版与商业版特性对比.md)
|
||||||
|
|
||||||
|
### 20、AppID鉴权、生产消费配额不起作用?
|
||||||
|
AppID鉴权、生产消费配额依赖于滴滴kafka-gateway,通过gateway进行身份鉴权和生产消费限流,可避免用户无限制的使用集群的流量,流量大的用户会耗尽系统资源从而影响其他用户的使用,造成集群的节点故障。这部分内容是属于商业版的范畴,暂未开源。如有需要,可进行商业合作。
|
||||||
|
|
||||||
|
具体见:[滴滴Logi-KafkaManager开源版和商业版特性对比](../开源版与商业版特性对比.md)
|
||||||
|
|
||||||
|
### 20、如何查看周期任务说明文档
|
||||||
|
|
||||||
|
具体见:[周期任务说明文档](../dev_guide/周期任务说明文档.md)
|
||||||
@@ -4,11 +4,16 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## 报警策略-监控指标说明
|
||||||
|
|
||||||
|
| 指标 | 含义 |备注 |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| online-kafka-consumer-lag | 消费时,按照分区的维度进行监控lag数 | lag表示有多少数据没有被消费,因为按照分区的维度监控,所以告警时一般会有分区信息 |
|
||||||
|
| online-kafka-consumer-maxLag | 消费时,按照整个Topic的维度,监控Topic所有的分区里面的那个最大的lag | 比如每个分区的lag分别是3、5、7,那么maxLag的值就是max(3,5,7)=7 |
|
||||||
|
| online-kafka-consumer-maxDelayTime | 消费时,按照Topic维度监控预计的消费延迟 | 这块是按照lag和messagesIn之间的关系计算出来的,可能会有误差 |
|
||||||
|
|
||||||
## 报警策略-报警函数介绍
|
## 报警策略-报警函数介绍
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
| 类别 | 函数 | 含义 |函数文案 |备注 |
|
| 类别 | 函数 | 含义 |函数文案 |备注 |
|
||||||
| --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- |
|
||||||
| 发生次数 |all,n | 最近$n个周期内,全发生 | 连续发生(all) | |
|
| 发生次数 |all,n | 最近$n个周期内,全发生 | 连续发生(all) | |
|
||||||
@@ -609,10 +609,10 @@ Lag:表示该消费客户端是否有堆积;等于 partition offset-consume
|
|||||||
|
|
||||||
集群类型:选择创建的集群为“独享”还是“独立”。
|
集群类型:选择创建的集群为“独享”还是“独立”。
|
||||||
|
|
||||||
* 独享集群意味着,您独自拥有一个
|
* 独享集群意味着, 您拥有一个集群中,部分broker的使用权限。
|
||||||
集群;
|
|
||||||
|
|
||||||
* 独立集群意味着,您拥有一个集群中,部分broker的使用权限。
|
* 独立集群意味着,您独自拥有一个
|
||||||
|
物理集群;
|
||||||
|
|
||||||
* <font color = red>共享集群意味着,大家共用一个集群及其中broker。</font>
|
* <font color = red>共享集群意味着,大家共用一个集群及其中broker。</font>
|
||||||
|
|
||||||
|
|||||||
55
docs/开源版与商业版特性对比.md
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**开源版、商业版对比**
|
||||||
|
|
||||||
|
纲要:Logi-KafakManager的商业特性是强依赖于滴滴Kafka Gateway和滴滴kafka引擎。
|
||||||
|
滴滴KafkaGateway主要负责:服务发现、安全管控(身份鉴权、生产消费鉴权等)、流量管控(应用配额等)等;
|
||||||
|
滴滴Kafka引擎主要负责:更丰富的监控指标(broker实时耗时、压缩指标、分区落盘等)、磁盘过载保护等
|
||||||
|
备注:两个版本的产品功能页面是一样的。区别在于开源版未使用滴滴KafkaGateway(滴滴Kafka引擎),部分产品功能/功能不起作用或者页面无数据
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
| 模块 |对比指标 |底层依赖 |开源版 |商业版 |备注 |
|
||||||
|
| --- | --- | --- | --- | --- | --- |
|
||||||
|
| 服务发现 | bootstrap地址变更对客户端无影响 | Gateway | | 是| |
|
||||||
|
| 安全管控 | 身份鉴权(appID+password) | Gateway | | 是 | |
|
||||||
|
| | 权限鉴权(Topic+appID) | Gateway | | 是 | |
|
||||||
|
| 指标监控 | Topic实时流量、历史流量 | | 是 | 是 | |
|
||||||
|
| | Broker实时耗时、历史耗时 | 引擎 | | 是 | |
|
||||||
|
| | 分区落盘 | 引擎 | | 是 | |
|
||||||
|
| | Topic里的数据压缩格式 | 引擎 | | 是 | |
|
||||||
|
| | 连接信息(Topic上有哪些应用连接) | Gateway | | 是| |
|
||||||
|
| | 流量管控(应用配额、生产消费限流等) | Gateway | | 是 | |
|
||||||
|
| 监控报警 | | | 是 | 是 | 监控指标上报,需对接外部监控系统(夜莺or企业内部监控系统) |
|
||||||
|
| Topic运维 | 申请分区 | | 是 | 是 | |
|
||||||
|
| | 调整配额 | Gateway | | | 是 |
|
||||||
|
| | Topic数据采样 | | 是 | 是 | |
|
||||||
|
| | 消费组管理(重置消费偏移等) | | 是 | 是 | |
|
||||||
|
| 集群管理 | 集群接入(部署) | | 是 | 是 | 需手动部署集群,或借助外部的自动化部署系统(夜莺)来部署系统 |
|
||||||
|
| | 集群指标监控 | | 是 | 是 | |
|
||||||
|
| | 按照Region、逻辑集群进行管理 | | 是 | 是 | |
|
||||||
|
| | Topic迁移 | | 是 | 是| |
|
||||||
|
| | 集群任务(集群版本管理、升级、扩缩容、回滚等) | | 是 | 是 | 需借助夜莺或自动化部署系统来实现 |
|
||||||
|
| | 磁盘过载保护 | 引擎 | | 是 | |
|
||||||
|
| | 指定broker作为优选controller | Gateway | | 是 | |
|
||||||
|
| Gateway管理 | 管理 Gateway的配置文件 | Gateway | | 是 | |
|
||||||
|
| 资源治理 | 专家服务(Topic分区热点、Topic分区不足、Topic长期未使用、Topic流量异常) | | 是 | 是 | 开源版:具备问题发现与基础的问题解决能力;商业版:可在此基础上,融入滴滴内部的资源治理经验,提供更加专家化的问题解决方法 |
|
||||||
|
| | 健康分 | | 是 | 是 | 开源版:具备基础的健康分算法;商业版:可融入更多的指标统计,及定制化的健康分算法 |
|
||||||
|
| 运营管理 | 资源审批(应用申请、Topic申请、分区申请、配额申请、集群申请等,都需要通过工单进行审批) | |是 | 是 | |
|
||||||
|
| | 账单体系(根据流量核算Topic、集群费用) | | 是 | 是| |
|
||||||
|
|
||||||
|
|
||||||
|
**总结**
|
||||||
|
|
||||||
|
滴滴LogiKM的商业特性体现在滴滴Kafka Gateway、滴滴Kafka引擎、内部沉淀出的资源治理专家经验、可定制化的健康分算法。
|
||||||
|
从场景来看,滴滴Logi-KafkaManager的开源版本在kafka集群运维、的Topic管理、监控告警、资源治理等kafka核心场景都充分开源用户的使用需求并且有着出色的表现。而商业版相较于开源版在安全管控、流量管控、更丰富的指标监控、资源治理专家经验的具有明显提升,更加符合企业业务需求。
|
||||||
|
除此之外,商业版还可根据企业实际需求对平台源码进行定制化改造,并提供运维保障,稳定性保障,运营保障等服务。
|
||||||
|
|
||||||
@@ -21,15 +21,12 @@
|
|||||||
<java_target_version>1.8</java_target_version>
|
<java_target_version>1.8</java_target_version>
|
||||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||||
<file_encoding>UTF-8</file_encoding>
|
<file_encoding>UTF-8</file_encoding>
|
||||||
|
|
||||||
<spring-version>5.1.3.RELEASE</spring-version>
|
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.springframework</groupId>
|
<groupId>org.springframework</groupId>
|
||||||
<artifactId>spring-web</artifactId>
|
<artifactId>spring-web</artifactId>
|
||||||
<version>${spring-version}</version>
|
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<!-- http -->
|
<!-- http -->
|
||||||
@@ -109,5 +106,11 @@
|
|||||||
<groupId>junit</groupId>
|
<groupId>junit</groupId>
|
||||||
<artifactId>junit</artifactId>
|
<artifactId>junit</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.projectlombok</groupId>
|
||||||
|
<artifactId>lombok</artifactId>
|
||||||
|
<scope>compile</scope>
|
||||||
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
</project>
|
</project>
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @author zengqiao
|
|
||||||
* @date 20/7/27
|
|
||||||
*/
|
|
||||||
public enum ApiLevelEnum {
|
|
||||||
LEVEL_0(0),
|
|
||||||
LEVEL_1(1),
|
|
||||||
LEVEL_2(2),
|
|
||||||
LEVEL_3(3)
|
|
||||||
;
|
|
||||||
|
|
||||||
private int level;
|
|
||||||
|
|
||||||
ApiLevelEnum(int level) {
|
|
||||||
this.level = level;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -24,18 +24,10 @@ public enum ConsumeHealthEnum {
|
|||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setCode(Integer code) {
|
|
||||||
this.code = code;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getMessage() {
|
public String getMessage() {
|
||||||
return message;
|
return message;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setMessage(String message) {
|
|
||||||
this.message = message;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "ConsumeHealthEnum{" +
|
return "ConsumeHealthEnum{" +
|
||||||
|
|||||||
@@ -19,7 +19,10 @@ public enum DBStatusEnum {
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setStatus(int status) {
|
@Override
|
||||||
this.status = status;
|
public String toString() {
|
||||||
|
return "DBStatusEnum{" +
|
||||||
|
"status=" + status +
|
||||||
|
'}';
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -21,18 +21,10 @@ public enum IDCEnum {
|
|||||||
return idc;
|
return idc;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setIdc(String idc) {
|
|
||||||
this.idc = idc;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getName() {
|
public String getName() {
|
||||||
return name;
|
return name;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setName(String name) {
|
|
||||||
this.name = name;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "IDCEnum{" +
|
return "IDCEnum{" +
|
||||||
|
|||||||
@@ -21,10 +21,6 @@ public enum KafkaBrokerRoleEnum {
|
|||||||
return role;
|
return role;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setRole(String role) {
|
|
||||||
this.role = role;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "KafkaBrokerRoleEnum{" +
|
return "KafkaBrokerRoleEnum{" +
|
||||||
|
|||||||
@@ -24,18 +24,10 @@ public enum KafkaClientEnum {
|
|||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setCode(Integer code) {
|
|
||||||
this.code = code;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getName() {
|
public String getName() {
|
||||||
return name;
|
return name;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setName(String name) {
|
|
||||||
this.name = name;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "KafkaClientEnum{" +
|
return "KafkaClientEnum{" +
|
||||||
|
|||||||
@@ -18,4 +18,11 @@ public enum OffsetResetTypeEnum {
|
|||||||
public Integer getCode() {
|
public Integer getCode() {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "OffsetResetTypeEnum{" +
|
||||||
|
"code=" + code +
|
||||||
|
'}';
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -27,4 +27,12 @@ public enum OperationStatusEnum {
|
|||||||
public String getMessage() {
|
public String getMessage() {
|
||||||
return message;
|
return message;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "OperationStatusEnum{" +
|
||||||
|
"code=" + code +
|
||||||
|
", message='" + message + '\'' +
|
||||||
|
'}';
|
||||||
|
}
|
||||||
}
|
}
|
||||||
@@ -15,9 +15,9 @@ public enum PeakFlowStatusEnum {
|
|||||||
|
|
||||||
;
|
;
|
||||||
|
|
||||||
public Integer code;
|
private Integer code;
|
||||||
|
|
||||||
public String message;
|
private String message;
|
||||||
|
|
||||||
PeakFlowStatusEnum(Integer code, String message) {
|
PeakFlowStatusEnum(Integer code, String message) {
|
||||||
this.code = code;
|
this.code = code;
|
||||||
@@ -28,18 +28,10 @@ public enum PeakFlowStatusEnum {
|
|||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setCode(Integer code) {
|
|
||||||
this.code = code;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getMessage() {
|
public String getMessage() {
|
||||||
return message;
|
return message;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setMessage(String message) {
|
|
||||||
this.message = message;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "PeakFlowStatusEnum{" +
|
return "PeakFlowStatusEnum{" +
|
||||||
|
|||||||
@@ -29,4 +29,12 @@ public enum RebalanceDimensionEnum {
|
|||||||
public String getMessage() {
|
public String getMessage() {
|
||||||
return message;
|
return message;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "RebalanceDimensionEnum{" +
|
||||||
|
"code=" + code +
|
||||||
|
", message='" + message + '\'' +
|
||||||
|
'}';
|
||||||
|
}
|
||||||
}
|
}
|
||||||
@@ -43,18 +43,10 @@ public enum TaskStatusEnum {
|
|||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setCode(Integer code) {
|
|
||||||
this.code = code;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getMessage() {
|
public String getMessage() {
|
||||||
return message;
|
return message;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setMessage(String message) {
|
|
||||||
this.message = message;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "TaskStatusEnum{" +
|
return "TaskStatusEnum{" +
|
||||||
@@ -64,9 +56,6 @@ public enum TaskStatusEnum {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public static Boolean isFinished(Integer code) {
|
public static Boolean isFinished(Integer code) {
|
||||||
if (code >= FINISHED.getCode()) {
|
return code >= FINISHED.getCode();
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -45,11 +45,6 @@ public enum TaskStatusReassignEnum {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public static Boolean isFinished(Integer code) {
|
public static Boolean isFinished(Integer code) {
|
||||||
if (SUCCEED.getCode().equals(code)
|
return SUCCEED.getCode().equals(code) || FAILED.getCode().equals(code) || CANCELED.getCode().equals(code);
|
||||||
|| FAILED.getCode().equals(code)
|
|
||||||
|| CANCELED.getCode().equals(code)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -33,4 +33,12 @@ public enum TopicAuthorityEnum {
|
|||||||
public String getMessage() {
|
public String getMessage() {
|
||||||
return message;
|
return message;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "TopicAuthorityEnum{" +
|
||||||
|
"code=" + code +
|
||||||
|
", message='" + message + '\'' +
|
||||||
|
'}';
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,4 +29,12 @@ public enum TopicExpiredStatusEnum {
|
|||||||
public String getMessage() {
|
public String getMessage() {
|
||||||
return message;
|
return message;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "TopicExpiredStatusEnum{" +
|
||||||
|
"status=" + status +
|
||||||
|
", message='" + message + '\'' +
|
||||||
|
'}';
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,18 +23,10 @@ public enum TopicOffsetChangedEnum {
|
|||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setCode(Integer code) {
|
|
||||||
this.code = code;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getMessage() {
|
public String getMessage() {
|
||||||
return message;
|
return message;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setMessage(String message) {
|
|
||||||
this.message = message;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "TopicOffsetChangedEnum{" +
|
return "TopicOffsetChangedEnum{" +
|
||||||
|
|||||||
@@ -5,11 +5,11 @@ package com.xiaojukeji.kafka.manager.common.bizenum.gateway;
|
|||||||
* @date 20/7/28
|
* @date 20/7/28
|
||||||
*/
|
*/
|
||||||
public enum GatewayConfigKeyEnum {
|
public enum GatewayConfigKeyEnum {
|
||||||
SD_CLUSTER_ID("SERVICE_DISCOVERY_CLUSTER_ID", "SERVICE_DISCOVERY_CLUSTER_ID"),
|
SD_CLUSTER_ID("SD_CLUSTER_ID", "SD_CLUSTER_ID"),
|
||||||
SD_QUEUE_SIZE("SERVICE_DISCOVERY_QUEUE_SIZE", "SERVICE_DISCOVERY_QUEUE_SIZE"),
|
SD_QUEUE_SIZE("SD_QUEUE_SIZE", "SD_QUEUE_SIZE"),
|
||||||
SD_APP_ID_RATE("SERVICE_DISCOVERY_APPID_RATE", "SERVICE_DISCOVERY_APPID_RATE"),
|
SD_APP_RATE("SD_APP_RATE", "SD_APP_RATE"),
|
||||||
SD_IP_RATE("SERVICE_DISCOVERY_IP_RATE", "SERVICE_DISCOVERY_IP_RATE"),
|
SD_IP_RATE("SD_IP_RATE", "SD_IP_RATE"),
|
||||||
SD_SP_RATE("SERVICE_DISCOVERY_SP_RATE", "SERVICE_DISCOVERY_SP_RATE"),
|
SD_SP_RATE("SD_SP_RATE", "SD_SP_RATE"),
|
||||||
|
|
||||||
;
|
;
|
||||||
|
|
||||||
@@ -26,18 +26,10 @@ public enum GatewayConfigKeyEnum {
|
|||||||
return configType;
|
return configType;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setConfigType(String configType) {
|
|
||||||
this.configType = configType;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getConfigName() {
|
public String getConfigName() {
|
||||||
return configName;
|
return configName;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setConfigName(String configName) {
|
|
||||||
this.configName = configName;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "GatewayConfigKeyEnum{" +
|
return "GatewayConfigKeyEnum{" +
|
||||||
|
|||||||
@@ -12,4 +12,7 @@ public class ApiLevelContent {
|
|||||||
public static final int LEVEL_NORMAL_3 = 3;
|
public static final int LEVEL_NORMAL_3 = 3;
|
||||||
|
|
||||||
public static final int LEVEL_DEFAULT_4 = 4;
|
public static final int LEVEL_DEFAULT_4 = 4;
|
||||||
|
|
||||||
|
private ApiLevelContent() {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
@@ -22,4 +22,7 @@ public class ApiPrefix {
|
|||||||
|
|
||||||
// gateway
|
// gateway
|
||||||
public static final String GATEWAY_API_V1_PREFIX = "/gateway" + API_V1_PREFIX;
|
public static final String GATEWAY_API_V1_PREFIX = "/gateway" + API_V1_PREFIX;
|
||||||
|
|
||||||
|
private ApiPrefix() {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
@@ -30,4 +30,7 @@ public class ConfigConstant {
|
|||||||
public static final String BROKER_CAPACITY_LIMIT_CONFIG_KEY = "BROKER_CAPACITY_LIMIT_CONFIG";
|
public static final String BROKER_CAPACITY_LIMIT_CONFIG_KEY = "BROKER_CAPACITY_LIMIT_CONFIG";
|
||||||
|
|
||||||
public static final String KAFKA_CLUSTER_DO_CONFIG_KEY = "KAFKA_CLUSTER_DO_CONFIG";
|
public static final String KAFKA_CLUSTER_DO_CONFIG_KEY = "KAFKA_CLUSTER_DO_CONFIG";
|
||||||
|
|
||||||
|
private ConfigConstant() {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ public class Constant {
|
|||||||
|
|
||||||
public static final Integer MAX_AVG_BYTES_DURATION = 10;
|
public static final Integer MAX_AVG_BYTES_DURATION = 10;
|
||||||
|
|
||||||
public static final Integer BATCH_INSERT_SIZE = 50;
|
public static final Integer BATCH_INSERT_SIZE = 30;
|
||||||
|
|
||||||
public static final Integer DEFAULT_SESSION_TIMEOUT_UNIT_MS = 30000;
|
public static final Integer DEFAULT_SESSION_TIMEOUT_UNIT_MS = 30000;
|
||||||
|
|
||||||
@@ -45,4 +45,7 @@ public class Constant {
|
|||||||
public static final Integer DEFAULT_MAX_CAL_TOPIC_EXPIRED_DAY = 90;
|
public static final Integer DEFAULT_MAX_CAL_TOPIC_EXPIRED_DAY = 90;
|
||||||
|
|
||||||
public static final Integer INVALID_CODE = -1;
|
public static final Integer INVALID_CODE = -1;
|
||||||
|
|
||||||
|
private Constant() {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,4 +16,11 @@ public class KafkaConstant {
|
|||||||
public static final String CLIENT_VERSION_NAME_UNKNOWN = "unknown";
|
public static final String CLIENT_VERSION_NAME_UNKNOWN = "unknown";
|
||||||
|
|
||||||
public static final String RETENTION_MS_KEY = "retention.ms";
|
public static final String RETENTION_MS_KEY = "retention.ms";
|
||||||
|
|
||||||
|
public static final String EXTERNAL_KEY = "EXTERNAL";
|
||||||
|
|
||||||
|
public static final String INTERNAL_KEY = "INTERNAL";
|
||||||
|
|
||||||
|
private KafkaConstant() {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
@@ -39,4 +39,7 @@ public class KafkaMetricsCollections {
|
|||||||
* Broker信息
|
* Broker信息
|
||||||
*/
|
*/
|
||||||
public static final int BROKER_VERSION = 400;
|
public static final int BROKER_VERSION = 400;
|
||||||
|
|
||||||
|
private KafkaMetricsCollections() {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +0,0 @@
|
|||||||
package com.xiaojukeji.kafka.manager.common.constant;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @author zengqiao
|
|
||||||
* @date 20/8/10
|
|
||||||
*/
|
|
||||||
public class LogConstant {
|
|
||||||
public static final String COLLECTOR_METRICS_LOGGER = "COLLECTOR_METRICS_LOGGER";
|
|
||||||
|
|
||||||
public static final String API_METRICS_LOGGER = "API_METRICS_LOGGER";
|
|
||||||
|
|
||||||
public static final String SCHEDULED_TASK_LOGGER = "SCHEDULED_TASK_LOGGER";
|
|
||||||
}
|
|
||||||
@@ -11,4 +11,7 @@ public class LoginConstant {
|
|||||||
public static final String COOKIE_CHINESE_USERNAME_KEY = "chineseName";
|
public static final String COOKIE_CHINESE_USERNAME_KEY = "chineseName";
|
||||||
|
|
||||||
public static final Integer COOKIE_OR_SESSION_MAX_AGE_UNIT_MS = 24 * 60 * 60 * 1000;
|
public static final Integer COOKIE_OR_SESSION_MAX_AGE_UNIT_MS = 24 * 60 * 60 * 1000;
|
||||||
|
|
||||||
|
private LoginConstant() {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
@@ -6,4 +6,7 @@ package com.xiaojukeji.kafka.manager.common.constant;
|
|||||||
*/
|
*/
|
||||||
public class SystemCodeConstant {
|
public class SystemCodeConstant {
|
||||||
public static final String KAFKA_MANAGER = "kafka-manager";
|
public static final String KAFKA_MANAGER = "kafka-manager";
|
||||||
|
|
||||||
|
private SystemCodeConstant() {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
@@ -25,6 +25,8 @@ public class TopicCreationConstant {
|
|||||||
|
|
||||||
public static final String TOPIC_RETENTION_TIME_KEY_NAME = "retention.ms";
|
public static final String TOPIC_RETENTION_TIME_KEY_NAME = "retention.ms";
|
||||||
|
|
||||||
|
public static final String TOPIC_RETENTION_BYTES_KEY_NAME = "retention.bytes";
|
||||||
|
|
||||||
public static final Long DEFAULT_QUOTA = 3 * 1024 * 1024L;
|
public static final Long DEFAULT_QUOTA = 3 * 1024 * 1024L;
|
||||||
|
|
||||||
public static Properties createNewProperties(Long retentionTime) {
|
public static Properties createNewProperties(Long retentionTime) {
|
||||||
@@ -54,4 +56,7 @@ public class TopicCreationConstant {
|
|||||||
* 单次自动化审批, 最多允许的通过单子
|
* 单次自动化审批, 最多允许的通过单子
|
||||||
*/
|
*/
|
||||||
public static final Integer MAX_PASSED_ORDER_NUM_PER_TASK = 200;
|
public static final Integer MAX_PASSED_ORDER_NUM_PER_TASK = 200;
|
||||||
|
|
||||||
|
private TopicCreationConstant() {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
@@ -16,4 +16,7 @@ public class TopicSampleConstant {
|
|||||||
public static final Integer MAX_TIMEOUT_UNIT_MS = 10000;
|
public static final Integer MAX_TIMEOUT_UNIT_MS = 10000;
|
||||||
public static final Integer POLL_TIME_OUT_UNIT_MS = 2000;
|
public static final Integer POLL_TIME_OUT_UNIT_MS = 2000;
|
||||||
public static final Integer MAX_DATA_LENGTH_UNIT_BYTE = 2048;
|
public static final Integer MAX_DATA_LENGTH_UNIT_BYTE = 2048;
|
||||||
|
|
||||||
|
private TopicSampleConstant() {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
package com.xiaojukeji.kafka.manager.common.constant;
|
||||||
|
|
||||||
|
public class TrickLoginConstant {
|
||||||
|
/**
|
||||||
|
* HTTP Header key
|
||||||
|
*/
|
||||||
|
public static final String TRICK_LOGIN_SWITCH = "Trick-Login-Switch";
|
||||||
|
|
||||||
|
public static final String TRICK_LOGIN_USER = "Trick-Login-User";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 配置允许 trick 登录用户名单
|
||||||
|
*/
|
||||||
|
public static final String TRICK_LOGIN_LEGAL_USER_CONFIG_KEY = "SECURITY.TRICK_USERS";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 开关状态值
|
||||||
|
*/
|
||||||
|
public static final String TRICK_LOGIN_SWITCH_ON = "on";
|
||||||
|
public static final String TRICK_LOGIN_SWITCH_OFF = "off";
|
||||||
|
|
||||||
|
private TrickLoginConstant() {
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,18 @@
|
|||||||
|
package com.xiaojukeji.kafka.manager.common.entity.ao.common;
|
||||||
|
|
||||||
|
import lombok.AllArgsConstructor;
|
||||||
|
import lombok.Data;
|
||||||
|
import lombok.NoArgsConstructor;
|
||||||
|
|
||||||
|
import java.io.Serializable;
|
||||||
|
|
||||||
|
@Data
|
||||||
|
@NoArgsConstructor
|
||||||
|
@AllArgsConstructor
|
||||||
|
public class IpPortData implements Serializable {
|
||||||
|
private static final long serialVersionUID = -428897032994630685L;
|
||||||
|
|
||||||
|
private String ip;
|
||||||
|
|
||||||
|
private String port;
|
||||||
|
}
|
||||||
@@ -10,6 +10,8 @@ import java.util.List;
|
|||||||
public class TopicExpiredConfig {
|
public class TopicExpiredConfig {
|
||||||
private Integer minExpiredDay = 30;
|
private Integer minExpiredDay = 30;
|
||||||
|
|
||||||
|
private String filterRegex = "";
|
||||||
|
|
||||||
private List<Long> ignoreClusterIdList = new ArrayList<>();
|
private List<Long> ignoreClusterIdList = new ArrayList<>();
|
||||||
|
|
||||||
public Integer getMinExpiredDay() {
|
public Integer getMinExpiredDay() {
|
||||||
@@ -28,10 +30,19 @@ public class TopicExpiredConfig {
|
|||||||
this.ignoreClusterIdList = ignoreClusterIdList;
|
this.ignoreClusterIdList = ignoreClusterIdList;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getFilterRegex() {
|
||||||
|
return filterRegex;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setFilterRegex(String filterRegex) {
|
||||||
|
this.filterRegex = filterRegex;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "TopicExpiredConfig{" +
|
return "TopicExpiredConfig{" +
|
||||||
"minExpiredDay=" + minExpiredDay +
|
"minExpiredDay=" + minExpiredDay +
|
||||||
|
", filterRegex='" + filterRegex + '\'' +
|
||||||
", ignoreClusterIdList=" + ignoreClusterIdList +
|
", ignoreClusterIdList=" + ignoreClusterIdList +
|
||||||
'}';
|
'}';
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
package com.xiaojukeji.kafka.manager.common.entity.ao.gateway;
|
package com.xiaojukeji.kafka.manager.common.entity.ao.gateway;
|
||||||
|
|
||||||
|
import com.xiaojukeji.kafka.manager.common.entity.dto.gateway.TopicQuotaDTO;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @author zhongyuankai
|
* @author zhongyuankai
|
||||||
* @date 2020/4/27
|
* @date 2020/4/27
|
||||||
@@ -65,4 +67,15 @@ public class TopicQuota {
|
|||||||
", consumeQuota=" + consumeQuota +
|
", consumeQuota=" + consumeQuota +
|
||||||
'}';
|
'}';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static TopicQuota buildFrom(TopicQuotaDTO dto) {
|
||||||
|
TopicQuota topicQuota = new TopicQuota();
|
||||||
|
topicQuota.setAppId(dto.getAppId());
|
||||||
|
topicQuota.setClusterId(dto.getClusterId());
|
||||||
|
topicQuota.setTopicName(dto.getTopicName());
|
||||||
|
topicQuota.setProduceQuota(dto.getProduceQuota());
|
||||||
|
topicQuota.setConsumeQuota(dto.getConsumeQuota());
|
||||||
|
return topicQuota;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,6 +25,8 @@ public class MineTopicSummary {
|
|||||||
|
|
||||||
private Integer access;
|
private Integer access;
|
||||||
|
|
||||||
|
private String description;
|
||||||
|
|
||||||
public Long getLogicalClusterId() {
|
public Long getLogicalClusterId() {
|
||||||
return logicalClusterId;
|
return logicalClusterId;
|
||||||
}
|
}
|
||||||
@@ -105,6 +107,14 @@ public class MineTopicSummary {
|
|||||||
this.access = access;
|
this.access = access;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getDescription() {
|
||||||
|
return description;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setDescription(String description) {
|
||||||
|
this.description = description;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "MineTopicSummary{" +
|
return "MineTopicSummary{" +
|
||||||
|
|||||||
@@ -37,6 +37,8 @@ public class TopicBasicDTO {
|
|||||||
|
|
||||||
private Long retentionTime;
|
private Long retentionTime;
|
||||||
|
|
||||||
|
private Long retentionBytes;
|
||||||
|
|
||||||
public Long getClusterId() {
|
public Long getClusterId() {
|
||||||
return clusterId;
|
return clusterId;
|
||||||
}
|
}
|
||||||
@@ -157,6 +159,14 @@ public class TopicBasicDTO {
|
|||||||
this.retentionTime = retentionTime;
|
this.retentionTime = retentionTime;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Long getRetentionBytes() {
|
||||||
|
return retentionBytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setRetentionBytes(Long retentionBytes) {
|
||||||
|
this.retentionBytes = retentionBytes;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "TopicBasicDTO{" +
|
return "TopicBasicDTO{" +
|
||||||
@@ -166,7 +176,7 @@ public class TopicBasicDTO {
|
|||||||
", principals='" + principals + '\'' +
|
", principals='" + principals + '\'' +
|
||||||
", topicName='" + topicName + '\'' +
|
", topicName='" + topicName + '\'' +
|
||||||
", description='" + description + '\'' +
|
", description='" + description + '\'' +
|
||||||
", regionNameList='" + regionNameList + '\'' +
|
", regionNameList=" + regionNameList +
|
||||||
", score=" + score +
|
", score=" + score +
|
||||||
", topicCodeC='" + topicCodeC + '\'' +
|
", topicCodeC='" + topicCodeC + '\'' +
|
||||||
", partitionNum=" + partitionNum +
|
", partitionNum=" + partitionNum +
|
||||||
@@ -175,6 +185,7 @@ public class TopicBasicDTO {
|
|||||||
", modifyTime=" + modifyTime +
|
", modifyTime=" + modifyTime +
|
||||||
", createTime=" + createTime +
|
", createTime=" + createTime +
|
||||||
", retentionTime=" + retentionTime +
|
", retentionTime=" + retentionTime +
|
||||||
|
", retentionBytes=" + retentionBytes +
|
||||||
'}';
|
'}';
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,47 @@
|
|||||||
|
package com.xiaojukeji.kafka.manager.common.entity.dto.gateway;
|
||||||
|
|
||||||
|
import com.xiaojukeji.kafka.manager.common.entity.dto.ClusterTopicDTO;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||||
|
import io.swagger.annotations.ApiModel;
|
||||||
|
import io.swagger.annotations.ApiModelProperty;
|
||||||
|
|
||||||
|
@ApiModel(description = "配额调整")
|
||||||
|
public class TopicQuotaDTO extends ClusterTopicDTO {
|
||||||
|
@ApiModelProperty(value = "appId")
|
||||||
|
private String appId;
|
||||||
|
|
||||||
|
@ApiModelProperty(value = "发送数据速率B/s")
|
||||||
|
private Long produceQuota;
|
||||||
|
|
||||||
|
@ApiModelProperty(value = "消费数据速率B/s")
|
||||||
|
private Long consumeQuota;
|
||||||
|
|
||||||
|
public String getAppId() {
|
||||||
|
return appId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setAppId(String appId) {
|
||||||
|
this.appId = appId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Long getProduceQuota() {
|
||||||
|
return produceQuota;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setProduceQuota(Long produceQuota) {
|
||||||
|
this.produceQuota = produceQuota;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Long getConsumeQuota() {
|
||||||
|
return consumeQuota;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setConsumeQuota(Long consumeQuota) {
|
||||||
|
this.consumeQuota = consumeQuota;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean paramLegal() {
|
||||||
|
return !ValidateUtils.isNullOrLessThanZero(clusterId) && !ValidateUtils.isBlank(topicName) && !ValidateUtils.isBlank(appId);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -21,6 +21,15 @@ public class AccountDTO {
|
|||||||
@ApiModelProperty(value = "角色")
|
@ApiModelProperty(value = "角色")
|
||||||
private Integer role;
|
private Integer role;
|
||||||
|
|
||||||
|
@ApiModelProperty(value = "用户姓名")
|
||||||
|
private String displayName;
|
||||||
|
|
||||||
|
@ApiModelProperty(value = "部门")
|
||||||
|
private String department;
|
||||||
|
|
||||||
|
@ApiModelProperty(value = "邮箱")
|
||||||
|
private String mail;
|
||||||
|
|
||||||
public String getUsername() {
|
public String getUsername() {
|
||||||
return username;
|
return username;
|
||||||
}
|
}
|
||||||
@@ -45,12 +54,39 @@ public class AccountDTO {
|
|||||||
this.role = role;
|
this.role = role;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getDisplayName() {
|
||||||
|
return displayName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setDisplayName(String displayName) {
|
||||||
|
this.displayName = displayName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getDepartment() {
|
||||||
|
return department;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setDepartment(String department) {
|
||||||
|
this.department = department;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getMail() {
|
||||||
|
return mail;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setMail(String mail) {
|
||||||
|
this.mail = mail;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "AccountDTO{" +
|
return "AccountDTO{" +
|
||||||
"username='" + username + '\'' +
|
"username='" + username + '\'' +
|
||||||
", password='" + password + '\'' +
|
", password='" + password + '\'' +
|
||||||
", role=" + role +
|
", role=" + role +
|
||||||
|
", displayName='" + displayName + '\'' +
|
||||||
|
", department='" + department + '\'' +
|
||||||
|
", mail='" + mail + '\'' +
|
||||||
'}';
|
'}';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,6 +21,12 @@ public class AccountDO {
|
|||||||
|
|
||||||
private Integer role;
|
private Integer role;
|
||||||
|
|
||||||
|
private String displayName;
|
||||||
|
|
||||||
|
private String department;
|
||||||
|
|
||||||
|
private String mail;
|
||||||
|
|
||||||
public String getUsername() {
|
public String getUsername() {
|
||||||
return username;
|
return username;
|
||||||
}
|
}
|
||||||
@@ -45,16 +51,43 @@ public class AccountDO {
|
|||||||
this.role = role;
|
this.role = role;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getDisplayName() {
|
||||||
|
return displayName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setDisplayName(String displayName) {
|
||||||
|
this.displayName = displayName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getDepartment() {
|
||||||
|
return department;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setDepartment(String department) {
|
||||||
|
this.department = department;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getMail() {
|
||||||
|
return mail;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setMail(String mail) {
|
||||||
|
this.mail = mail;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "AccountDO{" +
|
return "AccountDO{" +
|
||||||
"username='" + username + '\'' +
|
"id=" + id +
|
||||||
", password='" + password + '\'' +
|
|
||||||
", role=" + role +
|
|
||||||
", id=" + id +
|
|
||||||
", status=" + status +
|
", status=" + status +
|
||||||
", gmtCreate=" + gmtCreate +
|
", gmtCreate=" + gmtCreate +
|
||||||
", gmtModify=" + gmtModify +
|
", gmtModify=" + gmtModify +
|
||||||
|
", username='" + username + '\'' +
|
||||||
|
", password='" + password + '\'' +
|
||||||
|
", role=" + role +
|
||||||
|
", displayName='" + displayName + '\'' +
|
||||||
|
", department='" + department + '\'' +
|
||||||
|
", mail='" + mail + '\'' +
|
||||||
'}';
|
'}';
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -33,7 +33,7 @@ public class BrokerOverviewVO {
|
|||||||
@ApiModelProperty(value = "分区数")
|
@ApiModelProperty(value = "分区数")
|
||||||
private Integer partitionCount;
|
private Integer partitionCount;
|
||||||
|
|
||||||
@ApiModelProperty(value = "已同步副本数")
|
@ApiModelProperty(value = "失效副本分区的个数")
|
||||||
private Integer underReplicatedPartitions;
|
private Integer underReplicatedPartitions;
|
||||||
|
|
||||||
@ApiModelProperty(value = "未同步")
|
@ApiModelProperty(value = "未同步")
|
||||||
|
|||||||
@@ -27,8 +27,11 @@ public class OrderVO {
|
|||||||
@ApiModelProperty(value = "工单状态, 0:待审批, 1:通过, 2:拒绝, 3:取消")
|
@ApiModelProperty(value = "工单状态, 0:待审批, 1:通过, 2:拒绝, 3:取消")
|
||||||
private Integer status;
|
private Integer status;
|
||||||
|
|
||||||
@ApiModelProperty(value = "申请/审核时间")
|
@ApiModelProperty(value = "申请时间")
|
||||||
private Date gmtTime;
|
private Date gmtCreate;
|
||||||
|
|
||||||
|
@ApiModelProperty(value = "审核时间")
|
||||||
|
private Date gmtHandle;
|
||||||
|
|
||||||
public Long getId() {
|
public Long getId() {
|
||||||
return id;
|
return id;
|
||||||
@@ -70,12 +73,20 @@ public class OrderVO {
|
|||||||
this.status = status;
|
this.status = status;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Date getGmtTime() {
|
public Date getGmtCreate() {
|
||||||
return gmtTime;
|
return gmtCreate;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setGmtTime(Date gmtTime) {
|
public void setGmtCreate(Date gmtCreate) {
|
||||||
this.gmtTime = gmtTime;
|
this.gmtCreate = gmtCreate;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Date getGmtHandle() {
|
||||||
|
return gmtHandle;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setGmtHandle(Date gmtHandle) {
|
||||||
|
this.gmtHandle = gmtHandle;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getApplicant() {
|
public String getApplicant() {
|
||||||
@@ -95,7 +106,7 @@ public class OrderVO {
|
|||||||
", applicant='" + applicant + '\'' +
|
", applicant='" + applicant + '\'' +
|
||||||
", description='" + description + '\'' +
|
", description='" + description + '\'' +
|
||||||
", status=" + status +
|
", status=" + status +
|
||||||
", gmtTime=" + gmtTime +
|
", gmtTime=" + gmtCreate +
|
||||||
'}';
|
'}';
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -33,6 +33,9 @@ public class TopicBasicVO {
|
|||||||
@ApiModelProperty(value = "存储时间(ms)")
|
@ApiModelProperty(value = "存储时间(ms)")
|
||||||
private Long retentionTime;
|
private Long retentionTime;
|
||||||
|
|
||||||
|
@ApiModelProperty(value = "单分区数据保存大小(Byte)")
|
||||||
|
private Long retentionBytes;
|
||||||
|
|
||||||
@ApiModelProperty(value = "创建时间")
|
@ApiModelProperty(value = "创建时间")
|
||||||
private Long createTime;
|
private Long createTime;
|
||||||
|
|
||||||
@@ -62,12 +65,20 @@ public class TopicBasicVO {
|
|||||||
this.clusterId = clusterId;
|
this.clusterId = clusterId;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getTopicCodeC() {
|
public String getAppId() {
|
||||||
return topicCodeC;
|
return appId;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setTopicCodeC(String topicCodeC) {
|
public void setAppId(String appId) {
|
||||||
this.topicCodeC = topicCodeC;
|
this.appId = appId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getAppName() {
|
||||||
|
return appName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setAppName(String appName) {
|
||||||
|
this.appName = appName;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Integer getPartitionNum() {
|
public Integer getPartitionNum() {
|
||||||
@@ -86,22 +97,6 @@ public class TopicBasicVO {
|
|||||||
this.replicaNum = replicaNum;
|
this.replicaNum = replicaNum;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Long getModifyTime() {
|
|
||||||
return modifyTime;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setModifyTime(Long modifyTime) {
|
|
||||||
this.modifyTime = modifyTime;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Long getCreateTime() {
|
|
||||||
return createTime;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setCreateTime(Long createTime) {
|
|
||||||
this.createTime = createTime;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getPrincipals() {
|
public String getPrincipals() {
|
||||||
return principals;
|
return principals;
|
||||||
}
|
}
|
||||||
@@ -110,30 +105,6 @@ public class TopicBasicVO {
|
|||||||
this.principals = principals;
|
this.principals = principals;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getDescription() {
|
|
||||||
return description;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setDescription(String description) {
|
|
||||||
this.description = description;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setAppId(String appId) {
|
|
||||||
this.appId = appId;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setBootstrapServers(String bootstrapServers) {
|
|
||||||
this.bootstrapServers = bootstrapServers;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getAppId() {
|
|
||||||
return appId;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getBootstrapServers() {
|
|
||||||
return bootstrapServers;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Long getRetentionTime() {
|
public Long getRetentionTime() {
|
||||||
return retentionTime;
|
return retentionTime;
|
||||||
}
|
}
|
||||||
@@ -142,12 +113,28 @@ public class TopicBasicVO {
|
|||||||
this.retentionTime = retentionTime;
|
this.retentionTime = retentionTime;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getAppName() {
|
public Long getRetentionBytes() {
|
||||||
return appName;
|
return retentionBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setAppName(String appName) {
|
public void setRetentionBytes(Long retentionBytes) {
|
||||||
this.appName = appName;
|
this.retentionBytes = retentionBytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Long getCreateTime() {
|
||||||
|
return createTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setCreateTime(Long createTime) {
|
||||||
|
this.createTime = createTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Long getModifyTime() {
|
||||||
|
return modifyTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setModifyTime(Long modifyTime) {
|
||||||
|
this.modifyTime = modifyTime;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Integer getScore() {
|
public Integer getScore() {
|
||||||
@@ -158,6 +145,30 @@ public class TopicBasicVO {
|
|||||||
this.score = score;
|
this.score = score;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getTopicCodeC() {
|
||||||
|
return topicCodeC;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setTopicCodeC(String topicCodeC) {
|
||||||
|
this.topicCodeC = topicCodeC;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getDescription() {
|
||||||
|
return description;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setDescription(String description) {
|
||||||
|
this.description = description;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getBootstrapServers() {
|
||||||
|
return bootstrapServers;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setBootstrapServers(String bootstrapServers) {
|
||||||
|
this.bootstrapServers = bootstrapServers;
|
||||||
|
}
|
||||||
|
|
||||||
public List<String> getRegionNameList() {
|
public List<String> getRegionNameList() {
|
||||||
return regionNameList;
|
return regionNameList;
|
||||||
}
|
}
|
||||||
@@ -176,6 +187,7 @@ public class TopicBasicVO {
|
|||||||
", replicaNum=" + replicaNum +
|
", replicaNum=" + replicaNum +
|
||||||
", principals='" + principals + '\'' +
|
", principals='" + principals + '\'' +
|
||||||
", retentionTime=" + retentionTime +
|
", retentionTime=" + retentionTime +
|
||||||
|
", retentionBytes=" + retentionBytes +
|
||||||
", createTime=" + createTime +
|
", createTime=" + createTime +
|
||||||
", modifyTime=" + modifyTime +
|
", modifyTime=" + modifyTime +
|
||||||
", score=" + score +
|
", score=" + score +
|
||||||
|
|||||||
@@ -36,6 +36,9 @@ public class TopicMineVO {
|
|||||||
@ApiModelProperty(value = "状态, 0:无权限, 1:可消费 2:可发送 3:可消费发送 4:可管理")
|
@ApiModelProperty(value = "状态, 0:无权限, 1:可消费 2:可发送 3:可消费发送 4:可管理")
|
||||||
private Integer access;
|
private Integer access;
|
||||||
|
|
||||||
|
@ApiModelProperty(value = "备注")
|
||||||
|
private String description;
|
||||||
|
|
||||||
public Long getClusterId() {
|
public Long getClusterId() {
|
||||||
return clusterId;
|
return clusterId;
|
||||||
}
|
}
|
||||||
@@ -108,6 +111,14 @@ public class TopicMineVO {
|
|||||||
this.access = access;
|
this.access = access;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getDescription() {
|
||||||
|
return description;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setDescription(String description) {
|
||||||
|
this.description = description;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "TopicMineVO{" +
|
return "TopicMineVO{" +
|
||||||
|
|||||||
@@ -0,0 +1,20 @@
|
|||||||
|
package com.xiaojukeji.kafka.manager.common.events;
|
||||||
|
|
||||||
|
import com.xiaojukeji.kafka.manager.common.entity.pojo.RegionDO;
|
||||||
|
import lombok.Getter;
|
||||||
|
import org.springframework.context.ApplicationEvent;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Region创建事件
|
||||||
|
* @author zengqiao
|
||||||
|
* @date 22/01/1
|
||||||
|
*/
|
||||||
|
@Getter
|
||||||
|
public class RegionCreatedEvent extends ApplicationEvent {
|
||||||
|
private final RegionDO regionDO;
|
||||||
|
|
||||||
|
public RegionCreatedEvent(Object source, RegionDO regionDO) {
|
||||||
|
super(source);
|
||||||
|
this.regionDO = regionDO;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,33 @@
|
|||||||
|
package com.xiaojukeji.kafka.manager.common.events.metrics;
|
||||||
|
|
||||||
|
import org.springframework.context.ApplicationEvent;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @author zengqiao
|
||||||
|
* @date 22/01/17
|
||||||
|
*/
|
||||||
|
public class BaseMetricsCollectedEvent extends ApplicationEvent {
|
||||||
|
/**
|
||||||
|
* 物理集群ID
|
||||||
|
*/
|
||||||
|
protected final Long physicalClusterId;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 收集时间,依据业务需要来设置,可以设置任务开始时间,也可以设置任务结束时间
|
||||||
|
*/
|
||||||
|
protected final Long collectTime;
|
||||||
|
|
||||||
|
public BaseMetricsCollectedEvent(Object source, Long physicalClusterId, Long collectTime) {
|
||||||
|
super(source);
|
||||||
|
this.physicalClusterId = physicalClusterId;
|
||||||
|
this.collectTime = collectTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Long getPhysicalClusterId() {
|
||||||
|
return physicalClusterId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Long getCollectTime() {
|
||||||
|
return collectTime;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,22 @@
|
|||||||
|
package com.xiaojukeji.kafka.manager.common.events.metrics;
|
||||||
|
|
||||||
|
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @author zengqiao
|
||||||
|
* @date 20/8/31
|
||||||
|
*/
|
||||||
|
public class BatchBrokerMetricsCollectedEvent extends BaseMetricsCollectedEvent {
|
||||||
|
private final List<BrokerMetrics> metricsList;
|
||||||
|
|
||||||
|
public BatchBrokerMetricsCollectedEvent(Object source, Long physicalClusterId, Long collectTime, List<BrokerMetrics> metricsList) {
|
||||||
|
super(source, physicalClusterId, collectTime);
|
||||||
|
this.metricsList = metricsList;
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<BrokerMetrics> getMetricsList() {
|
||||||
|
return metricsList;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,75 @@
|
|||||||
|
package com.xiaojukeji.kafka.manager.common.utils;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
|
||||||
|
public class BackoffUtils {
|
||||||
|
private BackoffUtils() {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 需要进行回退的事件信息
|
||||||
|
* <回退事件名,回退结束时间>
|
||||||
|
*/
|
||||||
|
private static final Map<String, Long> NEED_BACK_OFF_EVENT_MAP = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
|
public static void backoff(long timeUnitMs) {
|
||||||
|
if (timeUnitMs <= 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
Thread.sleep(timeUnitMs);
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
} catch (Exception e) {
|
||||||
|
// ignore
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 记录回退设置
|
||||||
|
* @param backoffEventKey 回退事件key
|
||||||
|
* @param backoffTimeUnitMs 回退时间(ms)
|
||||||
|
*/
|
||||||
|
public static void putNeedBackoffEvent(String backoffEventKey, Long backoffTimeUnitMs) {
|
||||||
|
if (backoffEventKey == null || backoffTimeUnitMs == null || backoffTimeUnitMs <= 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
NEED_BACK_OFF_EVENT_MAP.put(backoffEventKey, backoffTimeUnitMs + System.currentTimeMillis());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 移除回退设置
|
||||||
|
* @param backoffEventKey 回退事件key
|
||||||
|
*/
|
||||||
|
public static void removeNeedBackoffEvent(String backoffEventKey) {
|
||||||
|
NEED_BACK_OFF_EVENT_MAP.remove(backoffEventKey);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 检查是否需要回退
|
||||||
|
* @param backoffEventKey 回退事件key
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public static boolean isNeedBackoff(String backoffEventKey) {
|
||||||
|
Long backoffEventEndTimeUnitMs = NEED_BACK_OFF_EVENT_MAP.get(backoffEventKey);
|
||||||
|
if (backoffEventEndTimeUnitMs == null) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (backoffEventEndTimeUnitMs > System.currentTimeMillis()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 移除
|
||||||
|
try {
|
||||||
|
NEED_BACK_OFF_EVENT_MAP.remove(backoffEventKey, backoffEventEndTimeUnitMs);
|
||||||
|
} catch (Exception e) {
|
||||||
|
// 如果key不存在,这里可能出现NPE,不过不管什么异常都可以忽略
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
package com.xiaojukeji.kafka.manager.common.utils;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @className: SplitUtils
|
||||||
|
* @description: Split string of type keyValue
|
||||||
|
* @author: Hu.Yue
|
||||||
|
* @date: 2021/8/4
|
||||||
|
**/
|
||||||
|
public class SplitUtils {
|
||||||
|
|
||||||
|
public static String keyValueSplit(String keyValue){
|
||||||
|
return keyValue.split(":\\s+")[1];
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,6 +2,7 @@ package com.xiaojukeji.kafka.manager.common.utils;
|
|||||||
|
|
||||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||||
import com.xiaojukeji.kafka.manager.common.constant.LoginConstant;
|
import com.xiaojukeji.kafka.manager.common.constant.LoginConstant;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.constant.TrickLoginConstant;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.springframework.beans.BeansException;
|
import org.springframework.beans.BeansException;
|
||||||
@@ -53,13 +54,6 @@ public class SpringTool implements ApplicationContextAware, DisposableBean {
|
|||||||
return getApplicationContext().getBeansOfType(type);
|
return getApplicationContext().getBeansOfType(type);
|
||||||
}
|
}
|
||||||
|
|
||||||
// /**
|
|
||||||
// * 从静态变量applicationContext中去的Bean,自动转型为所复制对象的类型
|
|
||||||
// */
|
|
||||||
// public static <T> T getBean(Class<T> requiredType) {
|
|
||||||
// return (T) applicationContext.getBean(requiredType);
|
|
||||||
// }
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 清除SpringContextHolder中的ApplicationContext为Null
|
* 清除SpringContextHolder中的ApplicationContext为Null
|
||||||
*/
|
*/
|
||||||
@@ -87,10 +81,18 @@ public class SpringTool implements ApplicationContextAware, DisposableBean {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public static String getUserName(){
|
public static String getUserName(){
|
||||||
HttpServletRequest request =
|
HttpServletRequest request = ((ServletRequestAttributes) RequestContextHolder.getRequestAttributes()).getRequest();
|
||||||
((ServletRequestAttributes) RequestContextHolder.getRequestAttributes()).getRequest();
|
|
||||||
HttpSession session = request.getSession();
|
String username = null;
|
||||||
String username = (String) session.getAttribute(LoginConstant.SESSION_USERNAME_KEY);
|
if (TrickLoginConstant.TRICK_LOGIN_SWITCH_ON.equals(request.getHeader(TrickLoginConstant.TRICK_LOGIN_SWITCH))) {
|
||||||
|
// trick登录方式的获取用户
|
||||||
|
username = request.getHeader(TrickLoginConstant.TRICK_LOGIN_USER);
|
||||||
|
} else {
|
||||||
|
// 走页面登录方式登录的获取用户
|
||||||
|
HttpSession session = request.getSession();
|
||||||
|
username = (String) session.getAttribute(LoginConstant.SESSION_USERNAME_KEY);
|
||||||
|
}
|
||||||
|
|
||||||
if (ValidateUtils.isNull(username)) {
|
if (ValidateUtils.isNull(username)) {
|
||||||
return Constant.DEFAULT_USER_NAME;
|
return Constant.DEFAULT_USER_NAME;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
package com.xiaojukeji.kafka.manager.common.utils.factory;
|
package com.xiaojukeji.kafka.manager.common.utils.factory;
|
||||||
|
|
||||||
import com.alibaba.fastjson.JSONObject;
|
|
||||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
|
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
|
||||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||||
import org.apache.commons.pool2.BasePooledObjectFactory;
|
import org.apache.commons.pool2.BasePooledObjectFactory;
|
||||||
import org.apache.commons.pool2.PooledObject;
|
import org.apache.commons.pool2.PooledObject;
|
||||||
@@ -16,7 +16,7 @@ import java.util.Properties;
|
|||||||
* @author zengqiao
|
* @author zengqiao
|
||||||
* @date 20/8/24
|
* @date 20/8/24
|
||||||
*/
|
*/
|
||||||
public class KafkaConsumerFactory extends BasePooledObjectFactory<KafkaConsumer> {
|
public class KafkaConsumerFactory extends BasePooledObjectFactory<KafkaConsumer<String, String>> {
|
||||||
private ClusterDO clusterDO;
|
private ClusterDO clusterDO;
|
||||||
|
|
||||||
public KafkaConsumerFactory(ClusterDO clusterDO) {
|
public KafkaConsumerFactory(ClusterDO clusterDO) {
|
||||||
@@ -25,17 +25,17 @@ public class KafkaConsumerFactory extends BasePooledObjectFactory<KafkaConsumer>
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public KafkaConsumer create() {
|
public KafkaConsumer create() {
|
||||||
return new KafkaConsumer(createKafkaConsumerProperties(clusterDO));
|
return new KafkaConsumer<String, String>(createKafkaConsumerProperties(clusterDO));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public PooledObject<KafkaConsumer> wrap(KafkaConsumer obj) {
|
public PooledObject<KafkaConsumer<String, String>> wrap(KafkaConsumer<String, String> obj) {
|
||||||
return new DefaultPooledObject<KafkaConsumer>(obj);
|
return new DefaultPooledObject<>(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void destroyObject(final PooledObject<KafkaConsumer> p) throws Exception {
|
public void destroyObject(final PooledObject<KafkaConsumer<String, String>> p) throws Exception {
|
||||||
KafkaConsumer kafkaConsumer = p.getObject();
|
KafkaConsumer<String, String> kafkaConsumer = p.getObject();
|
||||||
if (ValidateUtils.isNull(kafkaConsumer)) {
|
if (ValidateUtils.isNull(kafkaConsumer)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -57,7 +57,7 @@ public class KafkaConsumerFactory extends BasePooledObjectFactory<KafkaConsumer>
|
|||||||
if (ValidateUtils.isBlank(clusterDO.getSecurityProperties())) {
|
if (ValidateUtils.isBlank(clusterDO.getSecurityProperties())) {
|
||||||
return properties;
|
return properties;
|
||||||
}
|
}
|
||||||
properties.putAll(JSONObject.parseObject(clusterDO.getSecurityProperties(), Properties.class));
|
properties.putAll(JsonUtils.stringToObj(clusterDO.getSecurityProperties(), Properties.class));
|
||||||
return properties;
|
return properties;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1,5 +1,10 @@
|
|||||||
package com.xiaojukeji.kafka.manager.common.utils.jmx;
|
package com.xiaojukeji.kafka.manager.common.utils.jmx;
|
||||||
|
|
||||||
|
import lombok.Data;
|
||||||
|
import lombok.ToString;
|
||||||
|
|
||||||
|
@Data
|
||||||
|
@ToString
|
||||||
public class JmxConfig {
|
public class JmxConfig {
|
||||||
/**
|
/**
|
||||||
* 单台最大连接数
|
* 单台最大连接数
|
||||||
@@ -21,45 +26,8 @@ public class JmxConfig {
|
|||||||
*/
|
*/
|
||||||
private Boolean openSSL;
|
private Boolean openSSL;
|
||||||
|
|
||||||
public Integer getMaxConn() {
|
/**
|
||||||
return maxConn;
|
* 连接重试回退事件
|
||||||
}
|
*/
|
||||||
|
private Long retryConnectBackoffTimeUnitMs;
|
||||||
public void setMaxConn(Integer maxConn) {
|
|
||||||
this.maxConn = maxConn;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getUsername() {
|
|
||||||
return username;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setUsername(String username) {
|
|
||||||
this.username = username;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getPassword() {
|
|
||||||
return password;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setPassword(String password) {
|
|
||||||
this.password = password;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Boolean isOpenSSL() {
|
|
||||||
return openSSL;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setOpenSSL(Boolean openSSL) {
|
|
||||||
this.openSSL = openSSL;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return "JmxConfig{" +
|
|
||||||
"maxConn=" + maxConn +
|
|
||||||
", username='" + username + '\'' +
|
|
||||||
", password='" + password + '\'' +
|
|
||||||
", openSSL=" + openSSL +
|
|
||||||
'}';
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
package com.xiaojukeji.kafka.manager.common.utils.jmx;
|
package com.xiaojukeji.kafka.manager.common.utils.jmx;
|
||||||
|
|
||||||
|
import com.xiaojukeji.kafka.manager.common.utils.BackoffUtils;
|
||||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
@@ -13,11 +14,11 @@ import javax.naming.Context;
|
|||||||
import javax.rmi.ssl.SslRMIClientSocketFactory;
|
import javax.rmi.ssl.SslRMIClientSocketFactory;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* JMXConnector包装类
|
* JMXConnector包装类
|
||||||
@@ -25,19 +26,27 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||||||
* @date 2015/11/9.
|
* @date 2015/11/9.
|
||||||
*/
|
*/
|
||||||
public class JmxConnectorWrap {
|
public class JmxConnectorWrap {
|
||||||
private final static Logger LOGGER = LoggerFactory.getLogger(JmxConnectorWrap.class);
|
private static final Logger LOGGER = LoggerFactory.getLogger(JmxConnectorWrap.class);
|
||||||
|
|
||||||
private String host;
|
private final Long physicalClusterId;
|
||||||
|
|
||||||
private int port;
|
private final Integer brokerId;
|
||||||
|
|
||||||
|
private final String host;
|
||||||
|
|
||||||
|
private final int port;
|
||||||
|
|
||||||
private JMXConnector jmxConnector;
|
private JMXConnector jmxConnector;
|
||||||
|
|
||||||
private AtomicInteger atomicInteger;
|
private final AtomicInteger atomicInteger;
|
||||||
|
|
||||||
private JmxConfig jmxConfig;
|
private JmxConfig jmxConfig;
|
||||||
|
|
||||||
public JmxConnectorWrap(String host, int port, JmxConfig jmxConfig) {
|
private final ReentrantLock modifyJMXConnectorLock = new ReentrantLock();
|
||||||
|
|
||||||
|
public JmxConnectorWrap(Long physicalClusterId, Integer brokerId, String host, int port, JmxConfig jmxConfig) {
|
||||||
|
this.physicalClusterId = physicalClusterId;
|
||||||
|
this.brokerId = brokerId;
|
||||||
this.host = host;
|
this.host = host;
|
||||||
this.port = port;
|
this.port = port;
|
||||||
this.jmxConfig = jmxConfig;
|
this.jmxConfig = jmxConfig;
|
||||||
@@ -45,7 +54,12 @@ public class JmxConnectorWrap {
|
|||||||
this.jmxConfig = new JmxConfig();
|
this.jmxConfig = new JmxConfig();
|
||||||
}
|
}
|
||||||
if (ValidateUtils.isNullOrLessThanZero(this.jmxConfig.getMaxConn())) {
|
if (ValidateUtils.isNullOrLessThanZero(this.jmxConfig.getMaxConn())) {
|
||||||
this.jmxConfig.setMaxConn(1);
|
// 默认设置20
|
||||||
|
this.jmxConfig.setMaxConn(20);
|
||||||
|
}
|
||||||
|
if (ValidateUtils.isNullOrLessThanZero(this.jmxConfig.getRetryConnectBackoffTimeUnitMs())) {
|
||||||
|
// 默认回退10分钟
|
||||||
|
this.jmxConfig.setRetryConnectBackoffTimeUnitMs(10 * 60 * 1000L);
|
||||||
}
|
}
|
||||||
this.atomicInteger = new AtomicInteger(this.jmxConfig.getMaxConn());
|
this.atomicInteger = new AtomicInteger(this.jmxConfig.getMaxConn());
|
||||||
}
|
}
|
||||||
@@ -57,17 +71,40 @@ public class JmxConnectorWrap {
|
|||||||
if (port == -1) {
|
if (port == -1) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return createJmxConnector();
|
return safeCreateJmxConnector();
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void close() {
|
public void close() {
|
||||||
|
this.closeJmxConnect();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void closeJmxConnect() {
|
||||||
if (jmxConnector == null) {
|
if (jmxConnector == null) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
modifyJMXConnectorLock.lock();
|
||||||
|
|
||||||
|
// 移除设置的backoff事件
|
||||||
|
BackoffUtils.removeNeedBackoffEvent(buildConnectJmxFailedBackoffEventKey(physicalClusterId, brokerId));
|
||||||
|
|
||||||
jmxConnector.close();
|
jmxConnector.close();
|
||||||
} catch (IOException e) {
|
} catch (Exception e) {
|
||||||
LOGGER.warn("close JmxConnector exception, host:{} port:{}.", host, port, e);
|
LOGGER.error("close JmxConnector exception, physicalClusterId:{} brokerId:{} host:{} port:{}.", physicalClusterId, brokerId, host, port, e);
|
||||||
|
} finally {
|
||||||
|
jmxConnector = null;
|
||||||
|
|
||||||
|
modifyJMXConnectorLock.unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean safeCreateJmxConnector() {
|
||||||
|
try {
|
||||||
|
modifyJMXConnectorLock.lock();
|
||||||
|
return createJmxConnector();
|
||||||
|
} finally {
|
||||||
|
modifyJMXConnectorLock.unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -75,13 +112,22 @@ public class JmxConnectorWrap {
|
|||||||
if (jmxConnector != null) {
|
if (jmxConnector != null) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (BackoffUtils.isNeedBackoff(buildConnectJmxFailedBackoffEventKey(physicalClusterId, brokerId))) {
|
||||||
|
// 被设置了需要进行回退,则本次不进行创建
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
String jmxUrl = String.format("service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi", host, port);
|
String jmxUrl = String.format("service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi", host, port);
|
||||||
try {
|
try {
|
||||||
Map<String, Object> environment = new HashMap<String, Object>();
|
Map<String, Object> environment = new HashMap<String, Object>();
|
||||||
if (!ValidateUtils.isBlank(this.jmxConfig.getUsername()) && !ValidateUtils.isBlank(this.jmxConfig.getPassword())) {
|
if (!ValidateUtils.isBlank(this.jmxConfig.getUsername()) && !ValidateUtils.isBlank(this.jmxConfig.getPassword())) {
|
||||||
environment.put(JMXConnector.CREDENTIALS, Arrays.asList(this.jmxConfig.getUsername(), this.jmxConfig.getPassword()));
|
// fixed by riyuetianmu
|
||||||
|
environment.put(JMXConnector.CREDENTIALS, new String[]{this.jmxConfig.getUsername(), this.jmxConfig.getPassword()});
|
||||||
}
|
}
|
||||||
if (jmxConfig.isOpenSSL() != null && this.jmxConfig.isOpenSSL()) {
|
|
||||||
|
if (jmxConfig.getOpenSSL() != null && this.jmxConfig.getOpenSSL()) {
|
||||||
|
// 开启ssl
|
||||||
environment.put(Context.SECURITY_PROTOCOL, "ssl");
|
environment.put(Context.SECURITY_PROTOCOL, "ssl");
|
||||||
SslRMIClientSocketFactory clientSocketFactory = new SslRMIClientSocketFactory();
|
SslRMIClientSocketFactory clientSocketFactory = new SslRMIClientSocketFactory();
|
||||||
environment.put(RMIConnectorServer.RMI_CLIENT_SOCKET_FACTORY_ATTRIBUTE, clientSocketFactory);
|
environment.put(RMIConnectorServer.RMI_CLIENT_SOCKET_FACTORY_ATTRIBUTE, clientSocketFactory);
|
||||||
@@ -89,13 +135,17 @@ public class JmxConnectorWrap {
|
|||||||
}
|
}
|
||||||
|
|
||||||
jmxConnector = JMXConnectorFactory.connect(new JMXServiceURL(jmxUrl), environment);
|
jmxConnector = JMXConnectorFactory.connect(new JMXServiceURL(jmxUrl), environment);
|
||||||
LOGGER.info("JMX connect success, host:{} port:{}.", host, port);
|
LOGGER.info("connect JMX success, physicalClusterId:{} brokerId:{} host:{} port:{}.", physicalClusterId, brokerId, host, port);
|
||||||
return true;
|
return true;
|
||||||
} catch (MalformedURLException e) {
|
} catch (MalformedURLException e) {
|
||||||
LOGGER.error("JMX url exception, host:{} port:{} jmxUrl:{}", host, port, jmxUrl, e);
|
LOGGER.error("connect JMX failed, JMX url exception, physicalClusterId:{} brokerId:{} host:{} port:{} jmxUrl:{}.", physicalClusterId, brokerId, host, port, jmxUrl, e);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOGGER.error("JMX connect exception, host:{} port:{}.", host, port, e);
|
LOGGER.error("connect JMX failed, physicalClusterId:{} brokerId:{} host:{} port:{}.", physicalClusterId, brokerId, host, port, e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 设置连接backoff
|
||||||
|
BackoffUtils.putNeedBackoffEvent(buildConnectJmxFailedBackoffEventKey(physicalClusterId, brokerId), this.jmxConfig.getRetryConnectBackoffTimeUnitMs());
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -109,6 +159,11 @@ public class JmxConnectorWrap {
|
|||||||
acquire();
|
acquire();
|
||||||
MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection();
|
MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection();
|
||||||
return mBeanServerConnection.getAttribute(name, attribute);
|
return mBeanServerConnection.getAttribute(name, attribute);
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
// io错误,则重置连接
|
||||||
|
this.closeJmxConnect();
|
||||||
|
|
||||||
|
throw ioe;
|
||||||
} finally {
|
} finally {
|
||||||
atomicInteger.incrementAndGet();
|
atomicInteger.incrementAndGet();
|
||||||
}
|
}
|
||||||
@@ -124,6 +179,11 @@ public class JmxConnectorWrap {
|
|||||||
acquire();
|
acquire();
|
||||||
MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection();
|
MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection();
|
||||||
return mBeanServerConnection.getAttributes(name, attributes);
|
return mBeanServerConnection.getAttributes(name, attributes);
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
// io错误,则重置连接
|
||||||
|
this.closeJmxConnect();
|
||||||
|
|
||||||
|
throw ioe;
|
||||||
} finally {
|
} finally {
|
||||||
atomicInteger.incrementAndGet();
|
atomicInteger.incrementAndGet();
|
||||||
}
|
}
|
||||||
@@ -136,6 +196,11 @@ public class JmxConnectorWrap {
|
|||||||
acquire();
|
acquire();
|
||||||
MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection();
|
MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection();
|
||||||
return mBeanServerConnection.queryNames(name, query);
|
return mBeanServerConnection.queryNames(name, query);
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
// io错误,则重置连接
|
||||||
|
this.closeJmxConnect();
|
||||||
|
|
||||||
|
throw ioe;
|
||||||
} finally {
|
} finally {
|
||||||
atomicInteger.incrementAndGet();
|
atomicInteger.incrementAndGet();
|
||||||
}
|
}
|
||||||
@@ -145,19 +210,21 @@ public class JmxConnectorWrap {
|
|||||||
long now = System.currentTimeMillis();
|
long now = System.currentTimeMillis();
|
||||||
while (true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
if (System.currentTimeMillis() - now > 60000) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
int num = atomicInteger.get();
|
int num = atomicInteger.get();
|
||||||
if (num <= 0) {
|
if (num <= 0) {
|
||||||
Thread.sleep(2);
|
BackoffUtils.backoff(2);
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
if (atomicInteger.compareAndSet(num, num - 1)) {
|
|
||||||
|
if (atomicInteger.compareAndSet(num, num - 1) || System.currentTimeMillis() - now > 6000) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
// ignore
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static String buildConnectJmxFailedBackoffEventKey(Long physicalClusterId, Integer brokerId) {
|
||||||
|
return "CONNECT_JMX_FAILED_BACK_OFF_EVENT_PHY_" + physicalClusterId + "_BROKER_" + brokerId;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -119,4 +119,7 @@ public class ZkPathUtil {
|
|||||||
public static String getControllerCandidatePath(Integer brokerId) {
|
public static String getControllerCandidatePath(Integer brokerId) {
|
||||||
return D_CONTROLLER_CANDIDATES + ZOOKEEPER_SEPARATOR + brokerId;
|
return D_CONTROLLER_CANDIDATES + ZOOKEEPER_SEPARATOR + brokerId;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private ZkPathUtil() {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,17 @@
|
|||||||
package com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers;
|
package com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers;
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||||
|
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||||
|
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.constant.KafkaConstant;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.entity.ao.common.IpPortData;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.utils.NumberUtils;
|
||||||
|
import lombok.Data;
|
||||||
|
|
||||||
|
import java.io.Serializable;
|
||||||
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @author zengqiao
|
* @author zengqiao
|
||||||
@@ -10,7 +21,7 @@ import java.util.List;
|
|||||||
* 节点结构:
|
* 节点结构:
|
||||||
* {
|
* {
|
||||||
* "listener_security_protocol_map":{"SASL_PLAINTEXT":"SASL_PLAINTEXT"},
|
* "listener_security_protocol_map":{"SASL_PLAINTEXT":"SASL_PLAINTEXT"},
|
||||||
* "endpoints":["SASL_PLAINTEXT://10.179.162.202:9093"],
|
* "endpoints":["SASL_PLAINTEXT://127.0.0.1:9093"],
|
||||||
* "jmx_port":9999,
|
* "jmx_port":9999,
|
||||||
* "host":null,
|
* "host":null,
|
||||||
* "timestamp":"1546632983233",
|
* "timestamp":"1546632983233",
|
||||||
@@ -18,22 +29,48 @@ import java.util.List;
|
|||||||
* "version":4,
|
* "version":4,
|
||||||
* "rack": "CY"
|
* "rack": "CY"
|
||||||
* }
|
* }
|
||||||
|
*
|
||||||
|
* {
|
||||||
|
* "listener_security_protocol_map":{"SASL_PLAINTEXT":"SASL_PLAINTEXT","PLAINTEXT":"PLAINTEXT"},
|
||||||
|
* "endpoints":["SASL_PLAINTEXT://127.0.0.1:9093","PLAINTEXT://127.0.0.1:9092"],
|
||||||
|
* "jmx_port":8099,
|
||||||
|
* "host":"127.0.0.1",
|
||||||
|
* "timestamp":"1628833925822",
|
||||||
|
* "port":9092,
|
||||||
|
* "version":4
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* {
|
||||||
|
* "listener_security_protocol_map":{"EXTERNAL":"SASL_PLAINTEXT","INTERNAL":"SASL_PLAINTEXT"},
|
||||||
|
* "endpoints":["EXTERNAL://127.0.0.1:7092","INTERNAL://127.0.0.1:7093"],
|
||||||
|
* "jmx_port":8099,
|
||||||
|
* "host":null,
|
||||||
|
* "timestamp":"1627289710439",
|
||||||
|
* "port":-1,
|
||||||
|
* "version":4
|
||||||
|
* }
|
||||||
|
*
|
||||||
*/
|
*/
|
||||||
public class BrokerMetadata implements Cloneable {
|
@Data
|
||||||
|
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||||
|
public class BrokerMetadata implements Serializable {
|
||||||
|
private static final long serialVersionUID = 3918113492423375809L;
|
||||||
|
|
||||||
private long clusterId;
|
private long clusterId;
|
||||||
|
|
||||||
private int brokerId;
|
private int brokerId;
|
||||||
|
|
||||||
private List<String> endpoints;
|
private List<String> endpoints;
|
||||||
|
|
||||||
|
// <EXTERNAL|INTERNAL, <ip, port>>
|
||||||
|
private Map<String, IpPortData> endpointMap;
|
||||||
|
|
||||||
private String host;
|
private String host;
|
||||||
|
|
||||||
private int port;
|
private int port;
|
||||||
|
|
||||||
/*
|
@JsonProperty("jmx_port")
|
||||||
* ZK上对应的字段就是这个名字, 不要进行修改
|
private int jmxPort;
|
||||||
*/
|
|
||||||
private int jmx_port;
|
|
||||||
|
|
||||||
private String version;
|
private String version;
|
||||||
|
|
||||||
@@ -41,91 +78,54 @@ public class BrokerMetadata implements Cloneable {
|
|||||||
|
|
||||||
private String rack;
|
private String rack;
|
||||||
|
|
||||||
public long getClusterId() {
|
@JsonIgnore
|
||||||
return clusterId;
|
public String getExternalHost() {
|
||||||
|
if (!endpointMap.containsKey(KafkaConstant.EXTERNAL_KEY)) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return endpointMap.get(KafkaConstant.EXTERNAL_KEY).getIp();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setClusterId(long clusterId) {
|
@JsonIgnore
|
||||||
this.clusterId = clusterId;
|
public String getInternalHost() {
|
||||||
|
if (!endpointMap.containsKey(KafkaConstant.INTERNAL_KEY)) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return endpointMap.get(KafkaConstant.INTERNAL_KEY).getIp();
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getBrokerId() {
|
public static void parseAndUpdateBrokerMetadata(BrokerMetadata brokerMetadata) {
|
||||||
return brokerId;
|
brokerMetadata.setEndpointMap(new HashMap<>());
|
||||||
}
|
|
||||||
|
|
||||||
public void setBrokerId(int brokerId) {
|
if (brokerMetadata.getEndpoints().isEmpty()) {
|
||||||
this.brokerId = brokerId;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<String> getEndpoints() {
|
// example EXTERNAL://10.179.162.202:7092
|
||||||
return endpoints;
|
for (String endpoint: brokerMetadata.getEndpoints()) {
|
||||||
}
|
int idx1 = endpoint.indexOf("://");
|
||||||
|
int idx2 = endpoint.lastIndexOf(":");
|
||||||
|
if (idx1 == -1 || idx2 == -1 || idx1 == idx2) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
public void setEndpoints(List<String> endpoints) {
|
String brokerHost = endpoint.substring(idx1 + "://".length(), idx2);
|
||||||
this.endpoints = endpoints;
|
String brokerPort = endpoint.substring(idx2 + 1);
|
||||||
}
|
|
||||||
|
|
||||||
public String getHost() {
|
brokerMetadata.getEndpointMap().put(endpoint.substring(0, idx1), new IpPortData(brokerHost, brokerPort));
|
||||||
return host;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setHost(String host) {
|
if (KafkaConstant.EXTERNAL_KEY.equals(endpoint.substring(0, idx1))) {
|
||||||
this.host = host;
|
// 优先使用external的地址进行展示
|
||||||
}
|
brokerMetadata.setHost(brokerHost);
|
||||||
|
brokerMetadata.setPort(NumberUtils.string2Integer(brokerPort));
|
||||||
|
}
|
||||||
|
|
||||||
public int getPort() {
|
if (null == brokerMetadata.getHost()) {
|
||||||
return port;
|
brokerMetadata.setHost(brokerHost);
|
||||||
}
|
brokerMetadata.setPort(NumberUtils.string2Integer(brokerPort));
|
||||||
|
}
|
||||||
public void setPort(int port) {
|
}
|
||||||
this.port = port;
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getJmxPort() {
|
|
||||||
return jmx_port;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setJmxPort(int jmxPort) {
|
|
||||||
this.jmx_port = jmxPort;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getVersion() {
|
|
||||||
return version;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setVersion(String version) {
|
|
||||||
this.version = version;
|
|
||||||
}
|
|
||||||
|
|
||||||
public long getTimestamp() {
|
|
||||||
return timestamp;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setTimestamp(long timestamp) {
|
|
||||||
this.timestamp = timestamp;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getRack() {
|
|
||||||
return rack;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setRack(String rack) {
|
|
||||||
this.rack = rack;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return "BrokerMetadata{" +
|
|
||||||
"clusterId=" + clusterId +
|
|
||||||
", brokerId=" + brokerId +
|
|
||||||
", endpoints=" + endpoints +
|
|
||||||
", host='" + host + '\'' +
|
|
||||||
", port=" + port +
|
|
||||||
", jmxPort=" + jmx_port +
|
|
||||||
", version='" + version + '\'' +
|
|
||||||
", timestamp=" + timestamp +
|
|
||||||
", rack='" + rack + '\'' +
|
|
||||||
'}';
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,32 +1,35 @@
|
|||||||
{
|
{
|
||||||
"name": "logi-kafka",
|
"name": "logi-kafka",
|
||||||
"version": "2.3.1",
|
"version": "2.6.0",
|
||||||
"description": "",
|
"description": "",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"start": "webpack-dev-server",
|
"prestart": "npm install --save-dev webpack-dev-server",
|
||||||
|
"start": "webpack serve",
|
||||||
"daily-build": "cross-env NODE_ENV=production webpack",
|
"daily-build": "cross-env NODE_ENV=production webpack",
|
||||||
"pre-build": "cross-env NODE_ENV=production webpack",
|
"pre-build": "cross-env NODE_ENV=production webpack",
|
||||||
"prod-build": "cross-env NODE_ENV=production webpack"
|
"prod-build": "cross-env NODE_ENV=production webpack",
|
||||||
|
"fix-memory": "cross-env LIMIT=4096 increase-memory-limit"
|
||||||
},
|
},
|
||||||
"author": "",
|
"author": "",
|
||||||
"license": "ISC",
|
"license": "ISC",
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@hot-loader/react-dom": "^16.8.6",
|
"@hot-loader/react-dom": "^16.8.6",
|
||||||
"@types/clipboard": "^2.0.1",
|
"@types/events": "^3.0.0",
|
||||||
"@types/echarts": "^4.4.1",
|
|
||||||
"@types/lodash.debounce": "^4.0.6",
|
"@types/lodash.debounce": "^4.0.6",
|
||||||
"@types/react": "^16.8.8",
|
"@types/react": "^16.8.8",
|
||||||
"@types/react-dom": "^16.8.2",
|
"@types/react-dom": "^16.8.2",
|
||||||
"@types/react-router-dom": "^4.3.1",
|
"@types/react-router-dom": "^4.3.1",
|
||||||
"@types/spark-md5": "^3.0.2",
|
"@types/spark-md5": "^3.0.2",
|
||||||
|
"@webpack-cli/serve": "^1.6.0",
|
||||||
"antd": "^3.26.15",
|
"antd": "^3.26.15",
|
||||||
"clean-webpack-plugin": "^3.0.0",
|
"clean-webpack-plugin": "^3.0.0",
|
||||||
"clipboard": "2.0.6",
|
"clipboard": "^2.0.8",
|
||||||
"cross-env": "^7.0.2",
|
"cross-env": "^7.0.2",
|
||||||
"css-loader": "^2.1.0",
|
"css-loader": "^2.1.0",
|
||||||
"echarts": "^4.5.0",
|
"echarts": "^5.2.1",
|
||||||
"file-loader": "^5.0.2",
|
"file-loader": "^5.0.2",
|
||||||
"html-webpack-plugin": "^3.2.0",
|
"html-webpack-plugin": "^3.2.0",
|
||||||
|
"increase-memory-limit": "^1.0.7",
|
||||||
"less": "^3.9.0",
|
"less": "^3.9.0",
|
||||||
"less-loader": "^4.1.0",
|
"less-loader": "^4.1.0",
|
||||||
"mini-css-extract-plugin": "^0.6.0",
|
"mini-css-extract-plugin": "^0.6.0",
|
||||||
@@ -49,11 +52,10 @@
|
|||||||
"typescript": "^3.3.3333",
|
"typescript": "^3.3.3333",
|
||||||
"url-loader": "^4.1.1",
|
"url-loader": "^4.1.1",
|
||||||
"webpack": "^4.29.6",
|
"webpack": "^4.29.6",
|
||||||
"webpack-cli": "^3.2.3",
|
"webpack-cli": "^4.9.1",
|
||||||
"webpack-dev-server": "^3.2.1",
|
|
||||||
"xlsx": "^0.16.1"
|
"xlsx": "^0.16.1"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"format-to-json": "^1.0.4"
|
"format-to-json": "^1.0.4"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
BIN
kafka-manager-console/src/assets/image/weChat.png
Normal file
|
After Width: | Height: | Size: 36 KiB |
|
Before Width: | Height: | Size: 125 KiB After Width: | Height: | Size: 125 KiB |
@@ -1,14 +1,29 @@
|
|||||||
import * as React from 'react';
|
import * as React from 'react';
|
||||||
import { Spin, notification } from 'component/antd';
|
import { Spin, notification } from 'component/antd';
|
||||||
import echarts, { EChartOption } from 'echarts/lib/echarts';
|
import * as echarts from 'echarts/core';
|
||||||
|
|
||||||
// 引入柱状图
|
// 引入柱状图
|
||||||
import 'echarts/lib/chart/bar';
|
import { BarChart } from 'echarts/charts';
|
||||||
|
|
||||||
// 引入提示框和标题组件
|
// 引入提示框和标题组件
|
||||||
import 'echarts/lib/component/tooltip';
|
import {
|
||||||
import 'echarts/lib/component/title';
|
TitleComponent,
|
||||||
import 'echarts/lib/component/legend';
|
TooltipComponent,
|
||||||
|
LegendComponent,
|
||||||
|
GridComponent,
|
||||||
|
} from 'echarts/components';
|
||||||
|
import { CanvasRenderer } from 'echarts/renderers';
|
||||||
|
import { EChartsOption } from 'echarts';
|
||||||
|
|
||||||
|
// 注册必须的组件
|
||||||
|
echarts.use([
|
||||||
|
TitleComponent,
|
||||||
|
LegendComponent,
|
||||||
|
TooltipComponent,
|
||||||
|
BarChart,
|
||||||
|
GridComponent,
|
||||||
|
CanvasRenderer,
|
||||||
|
]);
|
||||||
|
|
||||||
interface IChartProps {
|
interface IChartProps {
|
||||||
getChartData: any;
|
getChartData: any;
|
||||||
@@ -38,7 +53,7 @@ export class BarChartComponet extends React.Component<IChartProps> {
|
|||||||
this.chart.resize();
|
this.chart.resize();
|
||||||
}
|
}
|
||||||
|
|
||||||
public isHasData = (data: EChartOption) => {
|
public isHasData = (data: any) => {
|
||||||
const noData = !(data.series && data.series.length);
|
const noData = !(data.series && data.series.length);
|
||||||
this.setState({ noData });
|
this.setState({ noData });
|
||||||
return !noData;
|
return !noData;
|
||||||
@@ -54,7 +69,7 @@ export class BarChartComponet extends React.Component<IChartProps> {
|
|||||||
const chartOptions = getChartData();
|
const chartOptions = getChartData();
|
||||||
|
|
||||||
if ((typeof chartOptions.then) === 'function') {
|
if ((typeof chartOptions.then) === 'function') {
|
||||||
return chartOptions.then((data: EChartOption) => {
|
return chartOptions.then((data: EChartsOption) => {
|
||||||
this.setState({ loading: false });
|
this.setState({ loading: false });
|
||||||
|
|
||||||
if (this.isHasData(data)) {
|
if (this.isHasData(data)) {
|
||||||
|
|||||||
@@ -3,16 +3,34 @@ import { DatePicker, notification, Spin } from 'component/antd';
|
|||||||
import moment, { Moment } from 'moment';
|
import moment, { Moment } from 'moment';
|
||||||
import { timeStampStr } from 'constants/strategy';
|
import { timeStampStr } from 'constants/strategy';
|
||||||
import { disabledDate } from 'lib/utils';
|
import { disabledDate } from 'lib/utils';
|
||||||
import echarts from 'echarts';
|
import * as echarts from 'echarts/core';
|
||||||
|
|
||||||
// 引入柱状图和折线图
|
// 引入柱状图
|
||||||
import 'echarts/lib/chart/bar';
|
import { BarChart, LineChart } from 'echarts/charts';
|
||||||
import 'echarts/lib/chart/line';
|
|
||||||
|
|
||||||
// 引入提示框和标题组件
|
// 引入提示框和标题组件
|
||||||
import 'echarts/lib/component/tooltip';
|
import {
|
||||||
import 'echarts/lib/component/title';
|
TitleComponent,
|
||||||
import 'echarts/lib/component/legend';
|
TooltipComponent,
|
||||||
|
LegendComponent,
|
||||||
|
GridComponent,
|
||||||
|
MarkLineComponent,
|
||||||
|
DatasetComponent,
|
||||||
|
} from 'echarts/components';
|
||||||
|
import { CanvasRenderer } from 'echarts/renderers';
|
||||||
|
|
||||||
|
// 注册必须的组件
|
||||||
|
echarts.use([
|
||||||
|
TitleComponent,
|
||||||
|
LegendComponent,
|
||||||
|
TooltipComponent,
|
||||||
|
GridComponent,
|
||||||
|
BarChart,
|
||||||
|
LineChart,
|
||||||
|
CanvasRenderer,
|
||||||
|
DatasetComponent,
|
||||||
|
MarkLineComponent,
|
||||||
|
]);
|
||||||
import './index.less';
|
import './index.less';
|
||||||
|
|
||||||
const { RangePicker } = DatePicker;
|
const { RangePicker } = DatePicker;
|
||||||
@@ -60,6 +78,23 @@ export class ChartWithDatePicker extends React.Component<IChartProps> {
|
|||||||
public changeChartOptions(options: any) {
|
public changeChartOptions(options: any) {
|
||||||
const noData = options.series.length ? false : true;
|
const noData = options.series.length ? false : true;
|
||||||
this.setState({ noData });
|
this.setState({ noData });
|
||||||
|
options.tooltip.formatter = (params: any) => {
|
||||||
|
let res =
|
||||||
|
'<div style=\'margin-bottom:5px;padding:0 12px;width:100%;height:24px;line-height:24px;border-radius:3px;\'><p>' +
|
||||||
|
params[0].data.time +
|
||||||
|
' </p></div>';
|
||||||
|
// tslint:disable-next-line:prefer-for-of
|
||||||
|
for (let i = 0; i < params.length; i++) {
|
||||||
|
res += `<div key=${params[i].seriesName} style="color: #fff;padding:0 12px;line-height: 24px">
|
||||||
|
<span style="display:inline-block;margin-right:5px;border-radius:50%;width:10px;height:10px;background-color:${[
|
||||||
|
params[i].color,
|
||||||
|
]};"></span>
|
||||||
|
${params[i].seriesName}
|
||||||
|
${params[i].data[params[i].seriesName]}
|
||||||
|
</div>`;
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
};
|
||||||
this.chart.setOption(options, true);
|
this.chart.setOption(options, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -79,7 +114,7 @@ export class ChartWithDatePicker extends React.Component<IChartProps> {
|
|||||||
public render() {
|
public render() {
|
||||||
const { customerNode } = this.props;
|
const { customerNode } = this.props;
|
||||||
return (
|
return (
|
||||||
<div className="status-box" style={{minWidth: '930px'}}>
|
<div className="status-box" style={{ minWidth: '930px' }}>
|
||||||
<div className="status-graph">
|
<div className="status-graph">
|
||||||
<div className="k-toolbar">
|
<div className="k-toolbar">
|
||||||
{customerNode}
|
{customerNode}
|
||||||
|
|||||||
@@ -1,13 +1,27 @@
|
|||||||
import * as React from 'react';
|
import * as React from 'react';
|
||||||
import { Spin } from 'component/antd';
|
import { Spin } from 'component/antd';
|
||||||
import echarts from 'echarts/lib/echarts';
|
import * as echarts from 'echarts/core';
|
||||||
// 引入饼状图
|
// 引入饼图
|
||||||
import 'echarts/lib/chart/pie';
|
import { PieChart } from 'echarts/charts';
|
||||||
// 引入提示框和标题组件
|
|
||||||
import 'echarts/lib/component/tooltip';
|
|
||||||
import 'echarts/lib/component/title';
|
|
||||||
import 'echarts/lib/component/legend';
|
|
||||||
|
|
||||||
|
// 引入提示框和标题组件
|
||||||
|
import {
|
||||||
|
TitleComponent,
|
||||||
|
TooltipComponent,
|
||||||
|
LegendComponent,
|
||||||
|
GridComponent,
|
||||||
|
} from 'echarts/components';
|
||||||
|
import { CanvasRenderer } from 'echarts/renderers';
|
||||||
|
|
||||||
|
// 注册必须的组件
|
||||||
|
echarts.use([
|
||||||
|
PieChart,
|
||||||
|
TitleComponent,
|
||||||
|
LegendComponent,
|
||||||
|
TooltipComponent,
|
||||||
|
GridComponent,
|
||||||
|
CanvasRenderer,
|
||||||
|
]);
|
||||||
interface IPieProps {
|
interface IPieProps {
|
||||||
getChartData: any;
|
getChartData: any;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,25 +1,45 @@
|
|||||||
import React from 'react';
|
import React from 'react';
|
||||||
import echarts, { EChartOption } from 'echarts/lib/echarts';
|
import * as echarts from 'echarts/core';
|
||||||
import 'echarts/lib/chart/pie';
|
|
||||||
import 'echarts/lib/chart/line';
|
|
||||||
import 'echarts/lib/component/legend';
|
|
||||||
import 'echarts/lib/component/tooltip';
|
|
||||||
import 'echarts/lib/component/title';
|
|
||||||
import 'echarts/lib/component/axis';
|
|
||||||
import './index.less';
|
import './index.less';
|
||||||
|
|
||||||
|
// 引入柱状图
|
||||||
|
import { PieChart, LineChart } from 'echarts/charts';
|
||||||
|
|
||||||
|
// 引入提示框和标题组件
|
||||||
|
import {
|
||||||
|
TitleComponent,
|
||||||
|
TooltipComponent,
|
||||||
|
LegendComponent,
|
||||||
|
GridComponent,
|
||||||
|
ToolboxComponent,
|
||||||
|
DatasetComponent,
|
||||||
|
} from 'echarts/components';
|
||||||
|
import { CanvasRenderer } from 'echarts/renderers';
|
||||||
|
|
||||||
|
// 注册必须的组件
|
||||||
|
echarts.use([
|
||||||
|
PieChart,
|
||||||
|
LineChart,
|
||||||
|
ToolboxComponent,
|
||||||
|
TitleComponent,
|
||||||
|
LegendComponent,
|
||||||
|
TooltipComponent,
|
||||||
|
GridComponent,
|
||||||
|
DatasetComponent,
|
||||||
|
CanvasRenderer,
|
||||||
|
]);
|
||||||
export interface IEchartsProps {
|
export interface IEchartsProps {
|
||||||
width?: number;
|
width?: number;
|
||||||
height?: number;
|
height?: number;
|
||||||
options?: EChartOption;
|
options?: any;
|
||||||
}
|
}
|
||||||
|
|
||||||
export const hasData = (options: EChartOption) => {
|
export const hasData = (options: any) => {
|
||||||
if (options && options.series && options.series.length) return true;
|
if (options && options.series && options.series.length) return true;
|
||||||
return false;
|
return false;
|
||||||
};
|
};
|
||||||
|
|
||||||
export default class LineChart extends React.Component<IEchartsProps> {
|
export default class LineCharts extends React.Component<IEchartsProps> {
|
||||||
public id = null as HTMLDivElement;
|
public id = null as HTMLDivElement;
|
||||||
|
|
||||||
public myChart = null as echarts.ECharts;
|
public myChart = null as echarts.ECharts;
|
||||||
@@ -27,7 +47,7 @@ export default class LineChart extends React.Component<IEchartsProps> {
|
|||||||
public componentDidMount() {
|
public componentDidMount() {
|
||||||
const { options } = this.props;
|
const { options } = this.props;
|
||||||
this.myChart = echarts.init(this.id);
|
this.myChart = echarts.init(this.id);
|
||||||
this.myChart.setOption(options);
|
this.myChart.setOption(options, true);
|
||||||
window.addEventListener('resize', this.resize);
|
window.addEventListener('resize', this.resize);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -41,7 +61,7 @@ export default class LineChart extends React.Component<IEchartsProps> {
|
|||||||
|
|
||||||
public refresh = () => {
|
public refresh = () => {
|
||||||
const { options } = this.props;
|
const { options } = this.props;
|
||||||
this.myChart.setOption(options);
|
this.myChart.setOption(options, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
public resize = () => {
|
public resize = () => {
|
||||||
@@ -50,6 +70,6 @@ export default class LineChart extends React.Component<IEchartsProps> {
|
|||||||
|
|
||||||
public render() {
|
public render() {
|
||||||
const { height, width } = this.props;
|
const { height, width } = this.props;
|
||||||
return <div ref={id => this.id = id} style={{width: `${width}px`, height: `${height}px`}} />;
|
return <div ref={id => this.id = id} style={{ width: `${width}px`, height: `${height}px` }} />;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -295,7 +295,7 @@ export class ClusterList extends SearchAndFilterContainer {
|
|||||||
cancelText="取消"
|
cancelText="取消"
|
||||||
okText="确认"
|
okText="确认"
|
||||||
>
|
>
|
||||||
<Tooltip placement="left" title="暂停监控将无法正常监控指标信息,建议开启监控">
|
<Tooltip placement="bottom" title="暂停监控将无法正常监控指标信息,建议开启监控">
|
||||||
<a
|
<a
|
||||||
className="action-button"
|
className="action-button"
|
||||||
>
|
>
|
||||||
|
|||||||
@@ -28,14 +28,16 @@ export const getUserColumns = () => {
|
|||||||
<span className="table-operation">
|
<span className="table-operation">
|
||||||
<a onClick={() => showApplyModal(record)}>编辑</a>
|
<a onClick={() => showApplyModal(record)}>编辑</a>
|
||||||
<a onClick={() => showApplyModalModifyPassword(record)}>修改密码</a>
|
<a onClick={() => showApplyModalModifyPassword(record)}>修改密码</a>
|
||||||
<Popconfirm
|
{record.username == users.currentUser.username ? "" :
|
||||||
title="确定删除?"
|
<Popconfirm
|
||||||
onConfirm={() => users.deleteUser(record.username)}
|
title="确定删除?"
|
||||||
cancelText="取消"
|
onConfirm={() => users.deleteUser(record.username)}
|
||||||
okText="确认"
|
cancelText="取消"
|
||||||
>
|
okText="确认"
|
||||||
<a>删除</a>
|
>
|
||||||
</Popconfirm>
|
<a>删除</a>
|
||||||
|
</Popconfirm>
|
||||||
|
}
|
||||||
</span>);
|
</span>);
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
import { EChartOption } from 'echarts/lib/echarts';
|
|
||||||
import moment from 'moment';
|
import moment from 'moment';
|
||||||
import { ICurve } from 'container/common-curve/config';
|
import { ICurve } from 'container/common-curve/config';
|
||||||
import { adminMonitor } from 'store/admin-monitor';
|
import { adminMonitor } from 'store/admin-monitor';
|
||||||
@@ -124,7 +123,7 @@ export interface ICurveType {
|
|||||||
type: curveType;
|
type: curveType;
|
||||||
title: string;
|
title: string;
|
||||||
curves: ICurve[];
|
curves: ICurve[];
|
||||||
parser: (option: ICurve, data: any[]) => EChartOption;
|
parser: (option: ICurve, data: any[]) => any;
|
||||||
}
|
}
|
||||||
|
|
||||||
export const byteTypeCurves: ICurveType[] = [
|
export const byteTypeCurves: ICurveType[] = [
|
||||||
|
|||||||