Compare commits

..

123 Commits

Author SHA1 Message Date
EricZeng
ec7f801929 Merge pull request #180 from didi/dev_2.3.0
Dev 2.3.0
2021-02-09 22:06:51 +08:00
zengqiao
0f8aca382e bump version to 2.3.0 2021-02-09 21:47:56 +08:00
zengqiao
0270f77eaa add upgrade doc 2021-02-09 21:46:55 +08:00
EricZeng
dcba71ada4 Merge pull request #179 from lucasun/dev_2.3.0_fe
迭代V2.5, 修复broker监控问题,增加JMX认证支持等...
2021-02-09 18:48:42 +08:00
孙超
6080f76a9c 迭代V2.5, 修复broker监控问题,增加JMX认证支持等... 2021-02-09 15:26:47 +08:00
zengqiao
5efd424172 version 2.3.0 2021-02-09 11:20:56 +08:00
EricZeng
2672502c07 Merge pull request #174 from 17hao/issue-153-authority
Tracking delete account
2021-02-07 16:10:56 +08:00
EricZeng
83440cc3d9 Merge pull request #173 from 17hao/issue-153
Tracking changes applied to app
2021-02-07 16:10:01 +08:00
17hao
8e5f93be1c Tracking delete account 2021-02-07 15:54:41 +08:00
17hao
c1afc07955 Tracking changes applied to app 2021-02-07 15:16:26 +08:00
EricZeng
4a83e14878 Merge pull request #172 from 17hao/issue-153
Tracking changes applied to Kafka cluster
2021-02-07 14:38:38 +08:00
17hao
832320abc6 Improve code's cohesion && save jmx properties 2021-02-07 14:20:57 +08:00
17hao
70c237da72 Tracking changes applied to Kafka cluster 2021-02-07 13:23:22 +08:00
EricZeng
edfcc5c023 Merge pull request #169 from 17hao/issue-153
Record topic operation
2021-02-06 22:30:32 +08:00
17hao
0668debec6 Update pom.xml 2021-02-06 18:46:47 +08:00
17hao
02d6463faa Using JsonUtils instead of fastjson 2021-02-06 18:43:36 +08:00
17hao
1fdb85234c Record editting topic 2021-02-05 12:18:50 +08:00
EricZeng
44b7dd1808 Merge pull request #167 from ZHAOYINRUI/master
更新readme、集群接入手册
2021-02-04 19:21:59 +08:00
ZHAOYINRUI
e983ee3101 Update README.md 2021-02-04 19:10:11 +08:00
ZHAOYINRUI
75e7e81c05 Add files via upload 2021-02-04 19:09:02 +08:00
ZHAOYINRUI
31ce3b9c08 Update add_cluster.md 2021-02-04 19:08:28 +08:00
EricZeng
ed93c50fef modify without logical cluster desc 2021-02-04 16:54:42 +08:00
EricZeng
4845660eb5 Merge pull request #163 from 17hao/issue-160
Issue#160: Remove __consumer_offsets from topic list
2021-02-04 16:42:41 +08:00
17hao
c7919210a2 Fix topic list filter condition 2021-02-04 16:32:31 +08:00
17hao
9491418f3b Update if statements 2021-02-04 12:33:32 +08:00
17hao
e8de403286 Hide __transaction_state in topic list && fix logic error 2021-02-04 12:14:44 +08:00
17hao
dfb625377b Using existing topic name constant 2021-02-03 22:30:38 +08:00
EricZeng
2c0f2a8be6 Merge pull request #166 from ZHAOYINRUI/master
更新集群接入文章、资源申请文章
2021-02-03 21:02:38 +08:00
ZHAOYINRUI
787d3cb3e9 Update resource_apply.md 2021-02-03 20:52:44 +08:00
ZHAOYINRUI
96ca17d26c Add files via upload 2021-02-03 19:43:03 +08:00
ZHAOYINRUI
3dd0f7f2c3 Update add_cluster.md 2021-02-03 19:41:33 +08:00
ZHAOYINRUI
10ba0cf976 Update resource_apply.md 2021-02-03 18:18:02 +08:00
ZHAOYINRUI
276c15cc23 Delete docs/user_guide/resource_apply directory 2021-02-03 18:08:15 +08:00
ZHAOYINRUI
2584b848ad Update resource_apply.md 2021-02-03 18:07:34 +08:00
ZHAOYINRUI
6471efed5f Add files via upload 2021-02-03 18:04:40 +08:00
ZHAOYINRUI
5b7d7ad65d Create resource_apply.md 2021-02-03 18:01:42 +08:00
17hao
712851a8a5 Add braces 2021-02-03 16:06:16 +08:00
17hao
63d291cb47 Remove __consumer_offsets from topic list 2021-02-03 15:50:33 +08:00
EricZeng
f825c92111 Merge pull request #159 from didi/dev_2.2.1
storage support s3
2021-02-03 13:49:52 +08:00
EricZeng
419eb2ea41 Merge pull request #158 from didi/dev
change dockerfile and heml location
2021-02-03 10:09:43 +08:00
zengqiao
89b58dd64e storage support s3 2021-02-02 16:42:20 +08:00
zengqiao
6bc5f81440 change dockerfile and heml location 2021-02-02 15:58:46 +08:00
EricZeng
424f4b7b5e Merge pull request #157 from didi/master
merge master
2021-02-02 15:33:51 +08:00
mrazkong
9271a1caac Merge pull request #118 from yangvipguang/helm-dev
增加Dockerfile 和 简单Helm
2021-02-01 10:50:05 +08:00
杨光
0ee4df03f9 Update Dockerfile 2021-01-31 15:34:15 +08:00
杨光
8ac713ce32 Update Dockerfile 2021-01-31 15:30:18 +08:00
杨光
76b2489fe9 Delete docker-depends/agent/config directory 2021-01-31 15:29:50 +08:00
杨光
6786095154 Delete sources.list 2021-01-31 15:29:31 +08:00
杨光
2c5793ef37 Delete settings 2021-01-31 15:29:19 +08:00
杨光
d483f25b96 Add files via upload
add  jmx prometheus
2021-01-31 15:28:59 +08:00
EricZeng
7118368979 Merge pull request #136 from ZHAOYINRUI/patch-10
Create resource_apply.md
2021-01-29 10:54:11 +08:00
EricZeng
59256c2e80 modify jdbc url
modify jdbc url, add useSSL=false
2021-01-29 10:53:35 +08:00
EricZeng
1fb8a0db1e Merge pull request #146 from ZHAOYINRUI/patch-12
Update README.md
2021-01-29 10:03:48 +08:00
ZHAOYINRUI
07d0c8e8fa Update README.md 2021-01-28 22:02:49 +08:00
EricZeng
98452ead17 Merge pull request #145 from didi/dev
faq add how to resolve topic biz data not exist error desc
2021-01-28 16:20:42 +08:00
zengqiao
d8c9f40377 faq add how to resolve topic biz data not exist error desc 2021-01-28 15:50:31 +08:00
EricZeng
8148d5eec6 Merge pull request #144 from didi/dev
optimize result code
2021-01-28 14:11:00 +08:00
zengqiao
4c429ad604 optimize result code 2021-01-28 12:06:06 +08:00
EricZeng
a9c52de8d5 Merge pull request #143 from ZhaoXinlong/patch-1
correcting some typo
2021-01-27 20:28:32 +08:00
ZhaoXinlong
f648aa1f91 correcting some typo
修改文字错误
2021-01-27 16:31:42 +08:00
EricZeng
eaba388bdd Merge pull request #142 from didi/dev
add connect jmx failed desc
2021-01-27 16:06:39 +08:00
zengqiao
73e6afcbc6 add connect jmx failed desc 2021-01-27 16:01:18 +08:00
EricZeng
8c3b72adf2 Merge pull request #139 from didi/dev
optimize message when login failed
2021-01-26 19:57:50 +08:00
zengqiao
ae18ff4262 optimize login failed message 2021-01-26 16:15:08 +08:00
ZHAOYINRUI
1adc8af543 Create resource_apply.md 2021-01-25 19:27:31 +08:00
EricZeng
7413df6f1e Merge pull request #131 from ZHAOYINRUI/patch-9
Update alarm_rules.md
2021-01-25 18:36:06 +08:00
EricZeng
bda8559190 Merge pull request #135 from didi/master
merge master
2021-01-25 18:34:56 +08:00
EricZeng
b74612fa41 Merge pull request #134 from didi/dev_2.2.0
merge dev 2.2.0
2021-01-25 17:30:26 +08:00
EricZeng
22e0c20dcd Merge pull request #133 from lucasun/dev_2.2.0_fe
fix txt
2021-01-25 17:21:42 +08:00
孙超
08f92e1100 fix txt 2021-01-25 17:02:07 +08:00
zengqiao
bb12ece46e modify zk example 2021-01-25 17:01:54 +08:00
EricZeng
0065438305 Merge pull request #132 from lucasun/dev_2.2.0_fe
add fe page
2021-01-25 16:47:02 +08:00
孙超
7f115c1b3e add fe page 2021-01-25 15:34:07 +08:00
ZHAOYINRUI
4e0114ab0d Update alarm_rules.md 2021-01-25 13:24:01 +08:00
EricZeng
0ef64fa4bd Merge pull request #126 from ZHAOYINRUI/patch-8
Create alarm_rules.md
2021-01-25 11:09:21 +08:00
ZHAOYINRUI
84dbc17c22 Update alarm_rules.md 2021-01-25 11:04:30 +08:00
EricZeng
16e16e356d Merge pull request #130 from xuehaipeng/patch-1
Update faq.md
2021-01-25 10:35:12 +08:00
xuehaipeng
978ee885c4 Update faq.md 2021-01-24 20:06:29 +08:00
zengqiao
850d43df63 add v2.2.0 feature & fix 2021-01-23 13:19:29 +08:00
zengqiao
fc109fd1b1 bump version to 2.2.0 2021-01-23 12:41:38 +08:00
EricZeng
2829947b93 Merge pull request #129 from didi/master
merge master
2021-01-23 11:09:52 +08:00
EricZeng
0c2af89a1c Merge pull request #125 from ZHAOYINRUI/patch-7
create kafka_metrics_desc.md
2021-01-23 11:03:14 +08:00
EricZeng
14c2dc9624 update kafka_metrics.md 2021-01-23 11:01:44 +08:00
EricZeng
4f35d710a6 Update and rename metric.md to kafka_metrics_desc.md 2021-01-23 10:58:11 +08:00
EricZeng
fdb5e018e5 Merge pull request #122 from ZHAOYINRUI/patch-4
Update README.md
2021-01-23 10:51:26 +08:00
EricZeng
6001fde25c Update dynamic_config_manager.md 2021-01-23 10:21:47 +08:00
EricZeng
ae63c0adaf Merge pull request #128 from didi/dev
add sync topic to db doc
2021-01-23 10:20:27 +08:00
zengqiao
ad1539c8f6 add sync topic to db doc 2021-01-23 10:17:59 +08:00
EricZeng
634a0c8cd0 Update faq.md 2021-01-22 20:42:13 +08:00
ZHAOYINRUI
773f9a0c63 Create alarm_rules.md 2021-01-22 18:16:51 +08:00
ZHAOYINRUI
e4e320e9e3 Create metric.md 2021-01-22 18:06:35 +08:00
ZHAOYINRUI
3b4b400e6b Update README.md 2021-01-22 15:56:53 +08:00
杨光
a950be2d95 change password 2021-01-20 17:57:14 +08:00
杨光
ba6f5ab984 add helm and dockerfile 2021-01-20 17:49:56 +08:00
mike.zhangliang
f3a5e3f5ed Update README.md 2021-01-18 19:06:43 +08:00
mike.zhangliang
e685e621f3 Update README.md 2021-01-18 19:05:44 +08:00
EricZeng
2cd2be9b67 Merge pull request #112 from didi/dev
监控告警系统对接说明文档
2021-01-17 18:21:16 +08:00
zengqiao
e73d9e8a03 add monitor_system_integrate_with_self file 2021-01-17 18:18:07 +08:00
zengqiao
476f74a604 rename file 2021-01-17 16:49:02 +08:00
EricZeng
ab0d1d99e6 Merge pull request #111 from didi/dev
Dev
2021-01-17 16:11:08 +08:00
zengqiao
d5680ffd5d 增加Topic同步任务&Bug修复 2021-01-16 16:26:38 +08:00
EricZeng
3c091a88d4 Merge pull request #110 from didi/master
合并master分支上的改动
2021-01-16 13:37:31 +08:00
EricZeng
49b70b33de Merge pull request #108 from didi/dev
增加application.yml文件说明 & 修改版本
2021-01-16 13:34:07 +08:00
zengqiao
c5ff2716fb 优化build.sh & yaml 2021-01-16 12:39:56 +08:00
ZQKC
400fdf0896 修复图片地址错误问题
修复图片地址错误问题
2021-01-16 12:04:20 +08:00
ZQKC
cbb8c7323c Merge pull request #109 from ZHAOYINRUI/master
架构图更新、钉钉群ID更新
2021-01-16 09:33:19 +08:00
ZHAOYINRUI
60e79f8f77 Update README.md 2021-01-16 00:25:06 +08:00
ZHAOYINRUI
0e829d739a Add files via upload 2021-01-16 00:22:31 +08:00
ZQKC
62abb274e0 增加application.yml文件说明
增加application.yml文件说明
2021-01-15 19:14:48 +08:00
ZQKC
e4028785de Update README.md
change km address
2021-01-09 15:30:30 +08:00
mrazkong
2bb44bcb76 Update Intergration_n9e_monitor.md 2021-01-07 17:09:15 +08:00
mike.zhangliang
684599f81b Update README.md 2021-01-07 15:44:17 +08:00
mike.zhangliang
b56d28f5df Update README.md 2021-01-07 15:43:07 +08:00
ZHAOYINRUI
02b9ac04c8 Update user_guide_cn.md 2020-12-30 22:44:23 +08:00
ZQKC
abb652ebd5 Merge pull request #104 from didi/dev
v2.1版本合并
2020-12-19 01:14:26 +08:00
ZQKC
ff78a9cc35 Merge pull request #101 from didi/dev
use mysql 8
2020-12-11 11:49:06 +08:00
ZQKC
aea63cad52 Merge pull request #94 from didi/dev
增加FAQ
2020-11-22 21:49:48 +08:00
ZQKC
dd6069e41a Merge pull request #93 from didi/dev
夜莺Mon集成配置说明
2020-11-22 20:09:34 +08:00
ZQKC
4d9a327b1f Merge pull request #92 from didi/dev
FIX N9e Mon
2020-11-22 18:15:49 +08:00
ZQKC
76c2477387 Merge pull request #91 from didi/dev
修复上报夜莺功能
2020-11-22 17:00:39 +08:00
ZQKC
edfd84a8e3 Merge pull request #88 from didi/dev
增加build.sh
2020-11-15 17:02:26 +08:00
ZQKC
abbe47f6b9 Merge pull request #87 from didi/dev
初始化SQL优化&KCM修复&连接信息修复
2020-11-15 16:55:42 +08:00
ZQKC
f70cfabede Merge pull request #84 from didi/dev
fix 前端资源加载问题
2020-11-14 16:56:16 +08:00
273 changed files with 6451 additions and 1664 deletions

View File

@@ -5,60 +5,89 @@
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
## 主要功能特性
### 集群监控维度
- 多版本集群管控,支持从`0.10.2``2.x`版本;
- 集群Topic、Broker等多维度历史与实时关键指标查看
阅读本README文档您可以了解到滴滴Logi-KafkaManager的用户群体、产品定位等信息并通过体验地址快速体验Kafka集群指标监控与运维管控的全流程。<br>若滴滴Logi-KafkaManager已在贵司的生产环境进行使用并想要获得官方更好地支持和指导可以通过[`OCE认证`](http://obsuite.didiyun.com/open/openAuth),加入官方交流平台。
### 集群管控维度
## 1 产品简介
滴滴Logi-KafkaManager脱胎于滴滴内部多年的Kafka运营实践经验是面向Kafka用户、Kafka运维人员打造的共享多租户Kafka云平台。专注于Kafka运维管控、监控告警、资源治理等核心场景经历过大规模集群、海量大数据的考验。内部满意度高达90%的同时,还与多家知名企业达成商业化合作。
- 集群运维包括逻辑Region方式管理集群
- Broker运维包括优先副本选举
- Topic运维包括创建、查询、扩容、修改属性、数据采样及迁移等
- 消费组运维,包括指定时间或指定偏移两种方式进行重置消费偏移
### 1.1 快速体验地址
- 体验地址 http://117.51.146.109:8080 账号密码 admin/admin
### 1.2 体验地图
相比较于同类产品的用户视角单一大多为管理员视角滴滴Logi-KafkaManager建立了基于分角色、多场景视角的体验地图。分别是**用户体验地图、运维体验地图、运营体验地图**
#### 1.2.1 用户体验地图
- 平台租户申请&nbsp;&nbsp;申请应用App作为Kafka中的用户名并用 AppID+password作为身份验证
- 集群资源申请&nbsp;&nbsp;:按需申请、按需使用。可使用平台提供的共享集群,也可为应用申请独立的集群
- Topic&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;可根据应用App创建Topic或者申请其他topic的读写权限
- Topic&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Topic数据采样、调整配额、申请分区等操作
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;基于Topic生产消费各环节耗时统计监控不同分位数性能指标
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;:支持将消费偏移重置至指定时间或指定位置
#### 1.2.2 运维体验地图
- 多版本集群管控&nbsp;&nbsp;:支持从`0.10.2``2.x`版本
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;集群Topic、Broker等多维度历史与实时关键指标查看建立健康分体系
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;划分部分Broker作为Region使用Region定义资源划分单位并按照业务、保障能力区分逻辑集群
- Broker&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;:包括优先副本选举等操作
- Topic&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;:包括创建、查询、扩容、修改属性、迁移、下线等
### 用户使用维度
#### 1.2.3 运营体验地图
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;沉淀资源治理方法。针对Topic分区热点、分区不足等高频常见问题沉淀资源治理方法实现资源治理专家化
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;工单体系。Topic创建、调整配额、申请分区等操作由专业运维人员审批规范资源使用保障平台平稳运行
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;成本控制。Topic资源、集群资源按需申请、按需使用。根据流量核算费用帮助企业建设大数据成本核算体系
- Kafka用户、Kafka研发、Kafka运维 视角区分
- Kafka用户、Kafka研发、Kafka运维 权限区分
### 1.3 核心优势
- &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;:监控多项核心指标,统计不同分位数据,提供种类丰富的指标监控报表,帮助用户、运维人员快速高效定位问题
- 便&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;按照Region定义集群资源划分单位将逻辑集群根据保障等级划分。在方便资源隔离、提高扩展能力的同时实现对服务端的强管控
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;基于滴滴内部多年运营实践沉淀资源治理方法建立健康分体系。针对Topic分区热点、分区不足等高频常见问题实现资源治理专家化
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;:与滴滴夜莺监控告警系统打通,集成监控告警、集群部署、集群升级等能力。形成运维生态,凝练专家服务,使运维更高效
### 1.4 滴滴Logi-KafkaManager架构图
![kafka-manager-arch](https://img-ys011.didistatic.com/static/dicloudpub/do1_xgDHNDLj2ChKxctSuf72)
## kafka-manager架构图
## 2 相关文档
![kafka-manager-arch](./docs/assets/images/common/arch.png)
### 2.1 产品文档
- [滴滴Logi-KafkaManager 安装手册](docs/install_guide/install_guide_cn.md)
- [滴滴Logi-KafkaManager 接入集群](docs/user_guide/add_cluster/add_cluster.md)
- [滴滴Logi-KafkaManager 用户使用手册](docs/user_guide/user_guide_cn.md)
- [滴滴Logi-KafkaManager FAQ](docs/user_guide/faq.md)
### 2.2 社区文章
- [滴滴云官网产品介绍](https://www.didiyun.com/production/logi-KafkaManager.html)
- [7年沉淀之作--滴滴Logi日志服务套件](https://mp.weixin.qq.com/s/-KQp-Qo3WKEOc9wIR2iFnw)
- [滴滴Logi-KafkaManager 一站式Kafka监控与管控平台](https://mp.weixin.qq.com/s/9qSZIkqCnU6u9nLMvOOjIQ)
- [滴滴Logi-KafkaManager 开源之路](https://xie.infoq.cn/article/0223091a99e697412073c0d64)
- [滴滴Logi-KafkaManager 系列视频教程](https://mp.weixin.qq.com/s/9X7gH0tptHPtfjPPSdGO8g)
- [kafka实践十五滴滴开源Kafka管控平台 Logi-KafkaManager研究--A叶子叶来](https://blog.csdn.net/yezonggang/article/details/113106244)
## 3 滴滴Logi开源用户钉钉交流群
![dingding_group](./docs/assets/images/common/dingding_group.jpg)
钉钉群ID32821440
## 4 OCE认证
OCE是一个认证机制和交流平台为滴滴Logi-KafkaManager生产用户量身打造我们会为OCE企业提供更好的技术支持比如专属的技术沙龙、企业一对一的交流机会、专属的答疑群等如果贵司Logi-KafkaManager上了生产[快来加入吧](http://obsuite.didiyun.com/open/openAuth)
## 相关文档
## 5 项目成员
- [kafka-manager 安装手册](docs/install_guide/install_guide_cn.md)
- [kafka-manager 接入集群](docs/user_guide/add_cluster/add_cluster.md)
- [kafka-manager 用户使用手册](docs/user_guide/user_guide_cn.md)
- [kafka-manager FAQ](docs/user_guide/faq.md)
### 5.1 内部核心人员
## 钉钉交流群
![dingding_group](./docs/assets/images/common/dingding_group.jpg)
`iceyuhui``liuyaguang``limengmonty``zhangliangmike``nullhuangyiming``zengqiao``eilenexuzhe``huangjiaweihjw``zhaoyinrui``marzkonglingxu``joysunchao`
## 项目成员
### 内部核心人员
`iceyuhui``liuyaguang``limengmonty``zhangliangmike``nullhuangyiming``zengqiao``eilenexuzhe``huangjiaweihjw`
### 外部贡献者
### 5.2 外部贡献者
`fangjunyu``zhoutaiyang`
## 协议
## 6 协议
`kafka-manager`基于`Apache-2.0`协议进行分发和使用,更多信息参见[协议文件](./LICENSE)

View File

@@ -3,72 +3,53 @@ workspace=$(cd $(dirname $0) && pwd -P)
cd $workspace
## constant
km_version=2.1.0
app_name=kafka-manager-$km_version
OUTPUT_DIR=./output
KM_VERSION=2.3.0
APP_NAME=kafka-manager
APP_DIR=${APP_NAME}-${KM_VERSION}
gitversion=.gitversion
control=./control.sh
create_mysql_table=./docs/install_guide/create_mysql_table.sql
app_config_file=./kafka-manager-web/src/main/resources/application.yml
MYSQL_TABLE_SQL_FILE=./docs/install_guide/create_mysql_table.sql
CONFIG_FILE=./kafka-manager-web/src/main/resources/application.yml
## function
function build() {
# 进行编译
# # cmd 设置使用的JDK, 按需选择, 默认已安装了JDK 8
# JVERSION=`java -version 2>&1 | awk 'NR==1{gsub(/"/,"");print $3}'`
# major=`echo $JVERSION | awk -F. '{print $1}'`
# mijor=`echo $JVERSION | awk -F. '{print $2}'`
# if [ $major -le 1 ] && [ $mijor -lt 8 ]; then
# export JAVA_HOME=/usr/local/jdk1.8.0_65 #(使用jdk8请设置)
# export PATH=$JAVA_HOME/bin:$PATH
# fi
# 编译命令
mvn -U clean package -Dmaven.test.skip=true
mvn -U clean package -Dmaven.test.skip=true
local sc=$?
if [ $sc -ne 0 ];then
## 编译失败, 退出码为 非0
echo "$app_name build error"
echo "$APP_NAME build error"
exit $sc
else
echo -n "$app_name build ok, vsn="`gitversion`
echo "$APP_NAME build ok"
fi
}
function make_output() {
# 新建output目录
rm -rf $app_name &>/dev/null
mkdir -p $app_name &>/dev/null
# 新建output目录
rm -rf ${OUTPUT_DIR} &>/dev/null
mkdir -p ${OUTPUT_DIR}/${APP_DIR} &>/dev/null
# 填充output目录, output内的内容 即为 线上部署内容
(
# cp -rf $control $output_dir && # 拷贝 control.sh 脚本 至output目录
cp -rf $create_mysql_table $app_name && # 拷贝 sql 初始化脚本 至output目录
cp -rf $app_config_file $app_name && # 拷贝 application.yml 至output目录
# 填充output目录, output内的内容
(
cp -rf ${MYSQL_TABLE_SQL_FILE} ${OUTPUT_DIR}/${APP_DIR} && # 拷贝 sql 初始化脚本 至output目录
cp -rf ${CONFIG_FILE} ${OUTPUT_DIR}/${APP_DIR} && # 拷贝 application.yml 至output目录
# 拷贝程序包到output路径
cp kafka-manager-web/target/kafka-manager-web-$km_version-SNAPSHOT.jar ${app_name}/${app_name}-SNAPSHOT.jar
echo -e "make output ok."
) || { echo -e "make output error"; exit 2; } # 填充output目录失败后, 退出码为 非0
# 拷贝程序包到output路径
cp kafka-manager-web/target/kafka-manager-web-${KM_VERSION}-SNAPSHOT.jar ${OUTPUT_DIR}/${APP_DIR}/${APP_NAME}.jar
echo -e "make output ok."
) || { echo -e "make output error"; exit 2; } # 填充output目录失败后, 退出码为 非0
}
function make_package() {
# 压缩output目录
(
tar cvzf ${app_name}.tar.gz ${app_name}
echo -e "make package ok."
cd ${OUTPUT_DIR} && tar cvzf ${APP_DIR}.tar.gz ${APP_DIR}
echo -e "make package ok."
) || { echo -e "make package error"; exit 2; } # 压缩output目录失败后, 退出码为 非0
}
## internals
function gitversion() {
git log -1 --pretty=%h > $gitversion
local gv=`cat $gitversion`
echo "$gv"
}
##########################################
## main
## 其中,
@@ -88,4 +69,4 @@ make_package
# 编译成功
echo -e "build done"
exit 0
exit 0

View File

@@ -0,0 +1,44 @@
FROM openjdk:8-jdk-alpine3.9
LABEL author="yangvipguang"
ENV VERSION 2.1.0
ENV JAR_PATH kafka-manager-web/target
COPY $JAR_PATH/kafka-manager-web-$VERSION-SNAPSHOT.jar /tmp/app.jar
COPY $JAR_PATH/application.yml /km/
RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories
RUN apk add --no-cache --virtual .build-deps \
font-adobe-100dpi \
ttf-dejavu \
fontconfig \
curl \
apr \
apr-util \
apr-dev \
tomcat-native \
&& apk del .build-deps
ENV AGENT_HOME /opt/agent/
WORKDIR /tmp
COPY docker-depends/config.yaml $AGENT_HOME
COPY docker-depends/jmx_prometheus_javaagent-0.14.0.jar $AGENT_HOME
ENV JAVA_AGENT="-javaagent:$AGENT_HOME/jmx_prometheus_javaagent-0.14.0.jar=9999:$AGENT_HOME/config.yaml"
ENV JAVA_HEAP_OPTS="-Xms1024M -Xmx1024M -Xmn100M "
ENV JAVA_OPTS="-verbose:gc \
-XX:+PrintGC -XX:+PrintGCDetails -XX:+PrintHeapAtGC -Xloggc:/tmp/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps \
-XX:MaxMetaspaceSize=256M -XX:+DisableExplicitGC -XX:+UseStringDeduplication \
-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:-UseContainerSupport"
#-Xlog:gc -Xlog:gc* -Xlog:gc+heap=trace -Xlog:safepoint
EXPOSE 8080 9999
ENTRYPOINT ["sh","-c","java -jar $JAVA_HEAP_OPTS $JAVA_OPTS /tmp/app.jar --spring.config.location=/km/application.yml"]
## 默认不带Prometheus JMX监控需要可以自行取消以下注释并注释上面一行默认Entrypoint 命令。
## ENTRYPOINT ["sh","-c","java -jar $JAVA_AGENT $JAVA_HEAP_OPTS $JAVA_OPTS /tmp/app.jar --spring.config.location=/km/application.yml"]

View File

@@ -0,0 +1,5 @@
---
startDelaySeconds: 0
ssl: false
lowercaseOutputName: false
lowercaseOutputLabelNames: false

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

24
container/helm/Chart.yaml Normal file
View File

@@ -0,0 +1,24 @@
apiVersion: v2
name: didi-km
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -0,0 +1,22 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "didi-km.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "didi-km.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "didi-km.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "didi-km.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View File

@@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "didi-km.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "didi-km.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "didi-km.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "didi-km.labels" -}}
helm.sh/chart: {{ include "didi-km.chart" . }}
{{ include "didi-km.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "didi-km.selectorLabels" -}}
app.kubernetes.io/name: {{ include "didi-km.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "didi-km.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "didi-km.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,88 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: km-cm
data:
application.yml: |
server:
port: 8080
tomcat:
accept-count: 1000
max-connections: 10000
max-threads: 800
min-spare-threads: 100
spring:
application:
name: kafkamanager
datasource:
kafka-manager:
jdbc-url: jdbc:mysql://xxxxx:3306/kafka-manager?characterEncoding=UTF-8&serverTimezone=GMT%2B8&useSSL=false
username: admin
password: admin
driver-class-name: com.mysql.jdbc.Driver
main:
allow-bean-definition-overriding: true
profiles:
active: dev
servlet:
multipart:
max-file-size: 100MB
max-request-size: 100MB
logging:
config: classpath:logback-spring.xml
custom:
idc: cn
jmx:
max-conn: 20
store-metrics-task:
community:
broker-metrics-enabled: true
topic-metrics-enabled: true
didi:
app-topic-metrics-enabled: false
topic-request-time-metrics-enabled: false
topic-throttled-metrics: false
save-days: 7
# 任务相关的开关
task:
op:
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
account:
ldap:
kcm:
enabled: false
storage:
base-url: http://127.0.0.1
n9e:
base-url: http://127.0.0.1:8004
user-token: 12345678
timeout: 300
account: root
script-file: kcm_script.sh
monitor:
enabled: false
n9e:
nid: 2
user-token: 1234567890
mon:
base-url: http://127.0.0.1:8032
sink:
base-url: http://127.0.0.1:8006
rdb:
base-url: http://127.0.0.1:80
notify:
kafka:
cluster-id: 95
topic-name: didi-kafka-notify
order:
detail-url: http://127.0.0.1

View File

@@ -0,0 +1,56 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "didi-km.fullname" . }}
labels:
{{- include "didi-km.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "didi-km.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "didi-km.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "didi-km.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 8080
protocol: TCP
- name: jmx-metrics
containerPort: 9999
protocol: TCP
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,28 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "didi-km.fullname" . }}
labels:
{{- include "didi-km.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "didi-km.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,41 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "didi-km.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "didi-km.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "didi-km.fullname" . }}
labels:
{{- include "didi-km.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "didi-km.selectorLabels" . | nindent 4 }}

View File

@@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "didi-km.serviceAccountName" . }}
labels:
{{- include "didi-km.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "didi-km.fullname" . }}-test-connection"
labels:
{{- include "didi-km.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "didi-km.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View File

@@ -0,0 +1,79 @@
# Default values for didi-km.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: docker.io/yangvipguang/km
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "v18"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: "km"
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 8080
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits:
cpu: 50m
memory: 2048Mi
requests:
cpu: 10m
memory: 200Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

Binary file not shown.

After

Width:  |  Height:  |  Size: 382 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 270 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 589 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 652 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 511 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 672 KiB

View File

@@ -0,0 +1,101 @@
---
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
## JMX-连接失败问题解决
集群正常接入Logi-KafkaManager之后即可以看到集群的Broker列表此时如果查看不了Topic的实时流量或者是Broker的实时流量信息时那么大概率就是JMX连接的问题了。
下面我们按照步骤来一步一步的检查。
### 1、问题&说明
**类型一JMX配置未开启**
未开启时,直接到`2、解决方法`查看如何开启即可。
![check_jmx_opened](./assets/connect_jmx_failed/check_jmx_opened.jpg)
**类型二:配置错误**
`JMX`端口已经开启的情况下,有的时候开启的配置不正确,此时也会导致出现连接失败的问题。这里大概列举几种原因:
- `JMX`配置错误:见`2、解决方法`
- 存在防火墙或者网络限制:网络通的另外一台机器`telnet`试一下看是否可以连接上。
- 需要进行用户名及密码的认证:见`3、解决方法 —— 认证的JMX`
错误日志例子:
```
# 错误一: 错误提示的是真实的IP这样的话基本就是JMX配置的有问题了。
2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:192.168.0.1 port:9999.
java.rmi.ConnectException: Connection refused to host: 192.168.0.1; nested exception is:
# 错误二错误提示的是127.0.0.1这个IP这个是机器的hostname配置的可能有问题。
2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:127.0.0.1 port:9999.
java.rmi.ConnectException: Connection refused to host: 127.0.0.1;; nested exception is:
```
### 2、解决方法
这里仅介绍一下比较通用的解决方式,如若有更好的方式,欢迎大家指导告知一下。
修改`kafka-server-start.sh`文件:
```
# 在这个下面增加JMX端口的配置
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G"
export JMX_PORT=9999 # 增加这个配置, 这里的数值并不一定是要9999
fi
```
&nbsp;
修改`kafka-run-class.sh`文件
```
# JMX settings
if [ -z "$KAFKA_JMX_OPTS" ]; then
KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=${当前机器的IP}"
fi
# JMX port to use
if [ $JMX_PORT ]; then
KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
fi
```
### 3、解决方法 —— 认证的JMX
如果您是直接看的这个部分,建议先看一下上一节:`2、解决方法`以确保`JMX`的配置没有问题了。
在JMX的配置等都没有问题的情况下如果是因为认证的原因导致连接不了的此时可以使用下面介绍的方法进行解决。
**当前这块后端刚刚开发完成,可能还不够完善,有问题随时沟通。**
`Logi-KafkaManager 2.2.0+`之后的版本后端已经支持`JMX`认证方式的连接,但是还没有界面,此时我们可以往`cluster`表的`jmx_properties`字段写入`JMX`的认证信息。
这个数据是`json`格式的字符串,例子如下所示:
```json
{
"maxConn": 10, # KM对单台Broker的最大JMX连接数
"username": "xxxxx", # 用户名
"password": "xxxx", # 密码
"openSSL": true, # 开启SSL, true表示开启ssl, false表示关闭
}
```
&nbsp;
SQL的例子
```sql
UPDATE cluster SET jmx_properties='{ "maxConn": 10, "username": "xxxxx", "password": "xxxx", "openSSL": false }' where id={xxx};
```

View File

@@ -0,0 +1,65 @@
---
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# 动态配置管理
## 1、Topic定时同步任务
### 1.1、配置的用途
`Logi-KafkaManager`在设计上,所有的资源都是挂在应用(app)下面。 如果接入的Kafka集群已经存在Topic了那么会导致这些Topic不属于任何的应用从而导致很多管理上的不便。
因此需要有一个方式将这些无主的Topic挂到某个应用下面。
这里提供了一个配置会定时自动将集群无主的Topic挂到某个应用下面下面。
### 1.2、相关实现
就是一个定时任务,该任务会定期做同步的工作。具体代码的位置在`com.xiaojukeji.kafka.manager.task.dispatch.op`包下面的`SyncTopic2DB`类。
### 1.3、配置说明
**步骤一:开启该功能**
在application.yml文件中增加如下配置已经有该配置的话直接把false修改为true即可
```yml
# 任务相关的开关
task:
op:
sync-topic-enabled: true # 无主的Topic定期同步到DB中
```
**步骤二:配置管理中指定挂在那个应用下面**
配置的位置:
![sync_topic_to_db](./assets/dynamic_config_manager/sync_topic_to_db.jpg)
配置键:`SYNC_TOPIC_2_DB_CONFIG_KEY`
配置值(JSON数组)
- clusterId需要进行定时同步的集群ID
- defaultAppId该集群无主的Topic将挂在哪个应用下面
- addAuthority是否需要加上权限, 默认是false。因为考虑到这个挂载只是临时的我们不希望用户使用这个App同时后续可能移交给真正的所属的应用因此默认是不加上权限。
**注意这里的集群ID或者是应用ID不存在的话会导致配置不生效。该任务对已经在DB中的Topic不会进行修改**
```json
[
{
"clusterId": 1234567,
"defaultAppId": "ANONYMOUS",
"addAuthority": false
},
{
"clusterId": 7654321,
"defaultAppId": "ANONYMOUS",
"addAuthority": false
}
]
```

View File

@@ -7,7 +7,7 @@
---
# 夜莺监控集成
# 监控系统集成——夜莺
- `Kafka-Manager`通过将 监控的数据 以及 监控的规则 都提交给夜莺,然后依赖夜莺的监控系统从而实现监控告警功能。
@@ -22,10 +22,13 @@ monitor:
n9e:
nid: 2
user-token: 123456
# 夜莺 mon监控服务 地址
mon:
base-url: http://127.0.0.1:8032
sink:
base-url: http://127.0.0.1:8006
# 夜莺 transfer上传服务 地址
sink:
base-url: http://127.0.0.1:8008
# 夜莺 rdb资源服务 地址
rdb:
base-url: http://127.0.0.1:80

View File

@@ -0,0 +1,54 @@
---
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# 监控系统集成
- 监控系统默认与 [夜莺] (https://github.com/didi/nightingale) 进行集成;
- 对接自有的监控系统需要进行简单的二次开发,即实现部分监控告警模块的相关接口即可;
- 集成会有两块内容,一个是指标数据上报的集成,还有一个是监控告警规则的集成;
## 1、指标数据上报集成
仅完成这一步的集成之后,即可将监控数据上报到监控系统中,此时已能够在自己的监控系统进行监控告警规则的配置了。
**步骤一:实现指标上报的接口**
- 按照自己内部监控系统的数据格式要求,将数据进行组装成符合自己内部监控系统要求的数据进行上报,具体的可以参考夜莺集成的实现代码。
- 至于会上报哪些指标,可以查看有哪些地方调用了该接口。
![sink_metrics](./assets/monitor_system_integrate_with_self/sink_metrics.jpg)
**步骤二:相关配置修改**
![change_config](./assets/monitor_system_integrate_with_self/change_config.jpg)
**步骤三:开启上报任务**
![open_sink_schedule](./assets/monitor_system_integrate_with_self/open_sink_schedule.jpg)
## 2、监控告警规则集成
完成**1、指标数据上报集成**之后,即可在自己的监控系统进行监控告警规则的配置了。完成该步骤的集成之后,可以在`Logi-KafkaManager`中进行监控告警规则的增删改查等等。
大体上和**1、指标数据上报集成**一致,
**步骤一:实现相关接口**
![integrate_ms](./assets/monitor_system_integrate_with_self/integrate_ms.jpg)
实现完成步骤一之后,接下来的步骤和**1、指标数据上报集成**中的步骤二、步骤三一致,都需要进行相关配置的修改即可。
## 3、总结
简单介绍了一下监控告警的集成,嫌麻烦的同学可以仅做 **1、指标数据上报集成** 这一节的内容即可满足一定场景下的需求。
**集成过程中有任何觉得文档没有说清楚的地方或者建议欢迎入群交流也欢迎贡献代码觉得好也辛苦给个star。**

View File

@@ -0,0 +1,27 @@
---
![kafka-manager-logo](../../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# 升级至`2.2.0`版本
`2.2.0`版本在`cluster`表及`logical_cluster`各增加了一个字段因此需要执行下面的sql进行字段的增加。
```sql
# cluster表中增加jmx_properties字段, 这个字段会用于存储jmx相关的认证以及配置信息
ALTER TABLE `cluster` ADD COLUMN `jmx_properties` TEXT NULL COMMENT 'JMX配置' AFTER `security_properties`;
# logical_cluster中增加identification字段, 同时数据和原先name数据相同, 最后增加一个唯一键.
# 此后, name字段还是表示集群名称, identification字段表示的是集群标识, 只能是字母数字及下划线组成,
# 数据上报到监控系统时, 集群这个标识采用的字段就是identification字段, 之前使用的是name字段.
ALTER TABLE `logical_cluster` ADD COLUMN `identification` VARCHAR(192) NOT NULL DEFAULT '' COMMENT '逻辑集群标识' AFTER `name`;
UPDATE `logical_cluster` SET `identification`=`name` WHERE id>=0;
ALTER TABLE `logical_cluster` ADD INDEX `uniq_identification` (`identification` ASC);
```

View File

@@ -0,0 +1,17 @@
---
![kafka-manager-logo](../../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# 升级至`2.3.0`版本
`2.3.0`版本在`gateway_config`表增加了一个描述说明的字段因此需要执行下面的sql进行字段的增加。
```sql
ALTER TABLE `gateway_config`
ADD COLUMN `description` TEXT NULL COMMENT '描述信息' AFTER `version`;
```

View File

@@ -15,7 +15,7 @@
当前因为无法同时兼容`MySQL 8``MySQL 5.7`,因此代码中默认的版本还是`MySQL 5.7`
当前如需使用`MySQL 8`,则按照下述流程进行简单修改代码。
当前如需使用`MySQL 8`,则按照下述流程进行简单修改代码。
- Step1. 修改application.yml中的MySQL驱动类

View File

@@ -0,0 +1,104 @@
---
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# 配置说明
```yaml
server:
port: 8080 # 服务端口
tomcat:
accept-count: 1000
max-connections: 10000
max-threads: 800
min-spare-threads: 100
spring:
application:
name: kafkamanager
datasource:
kafka-manager: # 数据库连接配置
jdbc-url: jdbc:mysql://127.0.0.1:3306/kafka_manager?characterEncoding=UTF-8&serverTimezone=GMT%2B8 #数据库的地址
username: admin # 用户名
password: admin # 密码
driver-class-name: com.mysql.jdbc.Driver
main:
allow-bean-definition-overriding: true
profiles:
active: dev # 启用的配置
servlet:
multipart:
max-file-size: 100MB
max-request-size: 100MB
logging:
config: classpath:logback-spring.xml
custom:
idc: cn # 部署的数据中心, 忽略该配置, 后续会进行删除
jmx:
max-conn: 10 # 和单台 broker 的最大JMX连接数
store-metrics-task:
community:
broker-metrics-enabled: true # 社区部分broker metrics信息收集开关, 关闭之后metrics信息将不会进行收集及写DB
topic-metrics-enabled: true # 社区部分topic的metrics信息收集开关, 关闭之后metrics信息将不会进行收集及写DB
didi:
app-topic-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标因此默认关闭
topic-request-time-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标因此默认关闭
topic-throttled-metrics: false # 滴滴埋入的指标, 社区AK不存在该指标因此默认关闭
save-days: 7 #指标在DB中保持的天数-1表示永久保存7表示保存近7天的数据
# 任务相关的开关
task:
op:
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
account: # ldap相关的配置, 社区版本暂时支持不够完善,可以先忽略,欢迎贡献代码对这块做优化
ldap:
kcm: # 集群升级部署相关的功能需要配合夜莺及S3进行使用这块我们后续专门补充一个文档细化一下牵扯到kcm_script.sh脚本的修改
enabled: false # 默认关闭
storage:
base-url: http://127.0.0.1 # 存储地址
n9e:
base-url: http://127.0.0.1:8004 # 夜莺任务中心的地址
user-token: 12345678 # 夜莺用户的token
timeout: 300 # 集群任务的超时时间,单位秒
account: root # 集群任务使用的账号
script-file: kcm_script.sh # 集群任务的脚本
monitor: # 监控告警相关的功能,需要配合夜莺进行使用
enabled: false # 默认关闭true就是开启
n9e:
nid: 2
user-token: 1234567890
mon:
# 夜莺 mon监控服务 地址
base-url: http://127.0.0.1:8032
sink:
# 夜莺 transfer上传服务 地址
base-url: http://127.0.0.1:8006
rdb:
# 夜莺 rdb资源服务 地址
base-url: http://127.0.0.1:80
# enabled: 表示是否开启监控告警的功能, true: 开启, false: 不开启
# n9e.nid: 夜莺的节点ID
# n9e.user-token: 用户的密钥,在夜莺的个人设置中
# n9e.mon.base-url: 监控地址
# n9e.sink.base-url: 数据上报地址
# n9e.rdb.base-url: 用户资源中心地址
notify: # 通知的功能
kafka: # 默认通知发送到kafka的指定Topic中
cluster-id: 95 # Topic的集群ID
topic-name: didi-kafka-notify # Topic名称
order: # 部署的KM的地址
detail-url: http://127.0.0.1
```

View File

@@ -1,3 +1,8 @@
-- create database
CREATE DATABASE logi_kafka_manager;
USE logi_kafka_manager;
--
-- Table structure for table `account`
--
@@ -104,7 +109,8 @@ CREATE TABLE `cluster` (
`zookeeper` varchar(512) NOT NULL DEFAULT '' COMMENT 'zk地址',
`bootstrap_servers` varchar(512) NOT NULL DEFAULT '' COMMENT 'server地址',
`kafka_version` varchar(32) NOT NULL DEFAULT '' COMMENT 'kafka版本',
`security_properties` text COMMENT '安全认证参数',
`security_properties` text COMMENT 'Kafka安全认证参数',
`jmx_properties` text COMMENT 'JMX配置',
`status` tinyint(4) NOT NULL DEFAULT '1' COMMENT ' 监控标记, 0表示未监控, 1表示监控中',
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
@@ -197,7 +203,8 @@ CREATE TABLE `gateway_config` (
`type` varchar(128) NOT NULL DEFAULT '' COMMENT '配置类型',
`name` varchar(128) NOT NULL DEFAULT '' COMMENT '配置名称',
`value` text COMMENT '配置值',
`version` bigint(20) unsigned NOT NULL DEFAULT '0' COMMENT '版本信息',
`version` bigint(20) unsigned NOT NULL DEFAULT '1' COMMENT '版本信息',
`description` text COMMENT '描述信息',
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
@@ -302,20 +309,22 @@ INSERT INTO kafka_user(app_id, password, user_type, operation) VALUES ('dkm_admi
-- Table structure for table `logical_cluster`
--
-- DROP TABLE IF EXISTS `logical_cluster`;
CREATE TABLE `logical_cluster` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
`name` varchar(192) NOT NULL DEFAULT '' COMMENT '逻辑集群名称',
`mode` int(16) NOT NULL DEFAULT '0' COMMENT '逻辑集群类型, 0:共享集群, 1:独享集群, 2:独立集群',
`app_id` varchar(64) NOT NULL DEFAULT '' COMMENT '所属应用',
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
`region_list` varchar(256) NOT NULL DEFAULT '' COMMENT 'regionid列表',
`description` text COMMENT '备注说明',
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uniq_name` (`name`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='逻辑集群信息表';
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
`name` varchar(192) NOT NULL DEFAULT '' COMMENT '逻辑集群名称',
`identification` varchar(192) NOT NULL DEFAULT '' COMMENT '逻辑集群标识',
`mode` int(16) NOT NULL DEFAULT '0' COMMENT '逻辑集群类型, 0:共享集群, 1:独享集群, 2:独立集群',
`app_id` varchar(64) NOT NULL DEFAULT '' COMMENT '所属应用',
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
`region_list` varchar(256) NOT NULL DEFAULT '' COMMENT 'regionid列表',
`description` text COMMENT '备注说明',
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uniq_name` (`name`),
UNIQUE KEY `uniq_identification` (`identification`)
) ENGINE=InnoDB AUTO_INCREMENT=7 DEFAULT CHARSET=utf8 COMMENT='逻辑集群信息表';
--
-- Table structure for table `monitor_rule`

View File

@@ -9,19 +9,39 @@
# 安装手册
## 1、环境依赖
## 环境依赖
如果是以Release包进行安装的则仅安装`Java``MySQL`即可。如果是要先进行源码包进行打包,然后再使用,则需要安装`Maven``Node`环境。
- `Maven 3.5+`(后端打包依赖)
- `node v12+`(前端打包依赖)
- `Java 8+`(运行环境需要)
- `MySQL 5.7`(数据存储)
- `Maven 3.5+`(后端打包依赖)
- `Node 10+`(前端打包依赖)
---
## 环境初始化
## 2、获取安装包
执行[create_mysql_table.sql](create_mysql_table.sql)中的SQL命令从而创建所需的MySQL库及表默认创建的库名是`kafka_manager`
**1、Release直接下载**
这里如果觉得麻烦然后也不想进行二次开发则可以直接下载Release包下载地址[Github Release包下载地址](https://github.com/didi/Logi-KafkaManager/releases)
如果觉得Github的下载地址太慢了也可以进入`Logi-KafkaManager`的用户群获取群地址在README中。
**2、源代码进行打包**
下载好代码之后,进入`Logi-KafkaManager`的主目录,执行`sh build.sh`命令即可,执行完成之后会在`output/kafka-manager-xxx`目录下面生成一个jar包。
对于`windows`环境的用户,估计执行不了`sh build.sh`命令,因此可以直接执行`mvn install`,然后在`kafka-manager-web/target`目录下生成一个kafka-manager-web-xxx.jar的包。
获取到jar包之后我们继续下面的步骤。
---
## 3、MySQL-DB初始化
执行[create_mysql_table.sql](create_mysql_table.sql)中的SQL命令从而创建所需的MySQL库及表默认创建的库名是`logi_kafka_manager`
```
# 示例:
@@ -30,29 +50,15 @@ mysql -uXXXX -pXXX -h XXX.XXX.XXX.XXX -PXXXX < ./create_mysql_table.sql
---
## 打包
```bash
# 一次性打包
cd ..
mvn install
## 4、启动
```
# application.yml 是配置文件最简单的是仅修改MySQL相关的配置即可启动
---
## 启动
```
# application.yml 是配置文件
cp kafka-manager-web/src/main/resources/application.yml kafka-manager-web/target/
cd kafka-manager-web/target/
nohup java -jar kafka-manager-web-2.1.0-SNAPSHOT.jar --spring.config.location=./application.yml > /dev/null 2>&1 &
nohup java -jar kafka-manager.jar --spring.config.location=./application.yml > /dev/null 2>&1 &
```
## 使用
### 5、使用
本地启动的话,访问`http://localhost:8080`,输入帐号及密码(默认`admin/admin`)进行登录。更多参考:[kafka-manager 用户使用手册](../user_guide/user_guide_cn.md)

View File

@@ -5,16 +5,26 @@
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# 集群接入
集群的接入总共需要三个步骤,分别是:
1. 接入物理集群
2. 创建Region
3. 创建逻辑集群
## 主要概念讲解
面对大规模集群、业务场景复杂的情况引入Region、逻辑集群的概念
- Region划分部分Broker作为一个 Region用Region定义资源划分的单位提高扩展性和隔离性。如果部分Topic异常也不会影响大面积的Broker
- 逻辑集群逻辑集群由部分Region组成便于对大规模集群按照业务划分、保障能力进行管理
![op_cluster_arch](assets/op_cluster_arch.png)
备注接入集群需要2、3两步是因为普通用户的视角下看到的都是逻辑集群如果没有2、3两步那么普通用户看不到任何信息。
集群的接入总共需要三个步骤,分别是:
1. 接入物理集群:填写机器地址、安全协议等配置信息,接入真实的物理集群
2. 创建Region将部分Broker划分为一个Region
3. 创建逻辑集群逻辑集群由部分Region组成可根据业务划分、保障等级来创建相应的逻辑集群
![op_cluster_flow](assets/op_cluster_flow.png)
**备注接入集群需要2、3两步是因为普通用户的视角下看到的都是逻辑集群如果没有2、3两步那么普通用户看不到任何信息。**
## 1、接入物理集群
@@ -36,4 +46,4 @@
![op_add_logical_cluster](assets/op_add_logical_cluster.jpg)
如上图所示,填写逻辑集群信息,然后点击确定,即可完成逻辑集群的创建。
如上图所示,填写逻辑集群信息,然后点击确定,即可完成逻辑集群的创建。

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 105 KiB

View File

@@ -0,0 +1,25 @@
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
## 报警策略-报警函数介绍
| 类别 | 函数 | 含义 |函数文案 |备注 |
| --- | --- | --- | --- | --- |
| 发生次数 |alln | 最近$n个周期内全发生 | 连续发生(all) | |
| 发生次数 | happen, n, m | 最近$n个周期内发生m次 | 出现(happen) | null点也计算在n内 |
| 数学统计 | sum, n | 最近$n个周期取值 的 和 | 求和(sum) | sum_over_time |
| 数学统计 | avg, n | 最近$n个周期取值 的 平均值 | 平均值(avg) | avg_over_time |
| 数学统计 | min, n | 最近$n个周期取值 的 最小值 | 最小值(min) | min_over_time |
| 数学统计 | max, n | 最近$n个周期取值 的 最大值 | 最大值(max | max_over_time |
| 变化率 | pdiff, n | 最近$n个点的变化率, 有一个满足 则触发 | 突增突降率(pdiff) | 假设, 最近3个周期的值分别为 v, v2, v3v为最新值那么计算公式为 any( (v-v2)/v2, (v-v3)/v3 )**区分正负** |
| 变化量 | diff, n | 最近$n个点的变化量, 有一个满足 则触发 | 突增突降值(diff) | 假设, 最近3个周期的值分别为 v, v2, v3v为最新值那么计算公式为 any( (v-v2), (v-v3) )**区分正负** |
| 变化量 | ndiff | 最近n个周期发生m次 v(t) - v(t-1) $OP threshold其中 v(t) 为最新值 | 连续变化(区分正负) - ndiff | |
| 数据中断 | nodata, t | 最近 $t 秒内 无数据上报 | 数据上报中断(nodata) | |
| 同环比 | c_avg_rate_abs, n | 最近$n个周期的取值相比 1天或7天前取值 的变化率 的绝对值 | 同比变化率(c_avg_rate_abs) | 假设最近的n个值为 v1, v2, v3历史取到的对应n'个值为 v1', v2'那么计算公式为abs((avg(v1,v2,v3) / avg(v1',v2') -1)* 100%) |
| 同环比 | c_avg_rate, n | 最近$n个周期的取值相比 1天或7天前取值 的变化率(**区分正负**) | 同比变化率(c_avg_rate) | 假设最近的n个值为 v1, v2, v3历史取到的对应n'个值为 v1', v2'那么计算公式为(avg(v1,v2,v3) / avg(v1',v2') -1)* 100% |

Binary file not shown.

After

Width:  |  Height:  |  Size: 119 KiB

View File

@@ -9,18 +9,41 @@
# FAQ
- 1、Topic申请时没有可选择的集群
- 0、Github图裂问题解决
- 1、Topic申请、新建监控告警等操作时没有可选择的集群
- 2、逻辑集群 & Region的用途
- 3、登录失败
- 4、页面流量信息等无数据
- 5、如何对接夜莺的监控告警功能
- 6、如何使用`MySQL 8`
- 7、`Jmx`连接失败如何解决?
- 8、`topic biz data not exist`错误及处理方式
---
### 1、Topic申请时没有可选择的集群
### 0、Github图裂问题解决
- 参看 [kafka-manager 接入集群](docs/user_guide/add_cluster/add_cluster.md) 手册这里的Region和逻辑集群都必须添加
可以在本地机器`ping github.com`这个地址,获取到`github.com`地址的IP地址
然后将IP绑定到`/etc/hosts`文件中。
例如
```shell
# 在 /etc/hosts文件中增加如下信息
140.82.113.3 github.com
```
---
### 1、Topic申请、新建监控告警等操作时没有可选择的集群
缺少逻辑集群导致的在Topic管理、监控告警、集群管理这三个Tab下面都是普通用户视角普通用户看到的集群都是逻辑集群因此在这三个Tab下进行操作时都需要有逻辑集群。
逻辑集群的创建参看:
- [kafka-manager 接入集群](docs/user_guide/add_cluster/add_cluster.md) 手册这里的Region和逻辑集群都必须添加。
---
@@ -43,7 +66,7 @@
- 1、检查`Broker JMX`是否正确开启。
如若还未开启,具体可百度一下看如何开启
如若还未开启,具体可百度一下看如何开启,或者参看:[Jmx连接配置&问题解决说明文档](../dev_guide/connect_jmx_failed.md)
![helpcenter](./assets/faq/jmx_check.jpg)
@@ -59,10 +82,30 @@
### 5、如何对接夜莺的监控告警功能
- 参看 [kafka-manager 对接夜莺监控](../dev_guide/Intergration_n9e_monitor.md) 说明。
- 参看 [kafka-manager 对接夜莺监控](../dev_guide/monitor_system_integrate_with_n9e.md) 说明。
---
### 6、如何使用`MySQL 8`
- 参看 [kafka-manager 使用`MySQL 8`](../dev_guide/use_mysql_8.md) 说明。
---
### 7、`Jmx`连接失败如何解决?
- 参看 [Jmx连接配置&问题解决](../dev_guide/connect_jmx_failed.md) 说明。
---
### 8、`topic biz data not exist`错误及处理方式
**错误原因**
在进行权限审批的时候可能会出现这个错误出现这个错误的原因是因为Topic相关的业务信息没有在DB中存储或者更具体的说就是该Topic不属于任何应用导致的只需要将这些无主的Topic挂在某个应用下面即可。
**解决方式**
可以在`运维管控->集群列表->Topic信息`下面编辑申请权限的Topic为Topic选择一个应用即可。
以上仅仅只是针对单个Topic的场景如果你有非常多的Topic需要进行初始化的那么此时可以在配置管理中增加一个配置来定时的对无主的Topic进行同步具体见[动态配置管理 - 1、Topic定时同步任务](../dev_guide/dynamic_config_manager.md)

View File

@@ -0,0 +1,72 @@
---
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# Topic 指标说明
## 1. 实时流量指标说明
| 指标名称| 单位| 指标含义|
|-- |---- |---|
| messagesIn| 条/s | 每秒发送到kafka的消息条数 |
| byteIn| B/s | 每秒发送到kafka的字节数 |
| byteOut| B/s | 每秒流出kafka的字节数所有消费组消费的流量如果是Kafka版本较低这个还包括副本同步的流量 |
| byteRejected| B/s | 每秒被拒绝的字节数 |
| failedFetchRequest| qps | 每秒拉取失败的请求数 |
| failedProduceRequest| qps | 每秒发送失败的请求数 |
| totalProduceRequest| qps | 每秒总共发送的请求数与messagesIn的区别是一个是发送请求里面可能会有多条消息 |
| totalFetchRequest| qps | 每秒总共拉取消息的请求数 |
&nbsp;
## 2. 历史流量指标说明
| 指标名称| 单位| 指标含义|
|-- |---- |---|
| messagesIn| 条/s | 近一分钟每秒发送到kafka的消息条数 |
| byteIn| B/s | 近一分钟每秒发送到kafka的字节数 |
| byteOut| B/s | 近一分钟每秒流出kafka的字节数所有消费组消费的流量如果是Kafka版本较低副本同步的流量 |
| byteRejected| B/s | 近一分钟每秒被拒绝的字节数 |
| totalProduceRequest| qps | 近一分钟每秒总共发送的请求数与messagesIn的区别是一个是发送请求里面可能会有多条消息 |
&nbsp;
## 3. 实时耗时指标说明
**基于滴滴加强版Kafka引擎的特性可以获取Broker的实时耗时信息和历史耗时信息**
| 指标名称| 单位 | 指标含义 | 耗时高原因 | 解决方案|
|-- |-- |-- |-- |--|
| RequestQueueTimeMs| ms | 请求队列排队时间 | 请求多,服务端处理不过来 | 联系运维人员处理 |
| LocalTimeMs| ms | Broker本地处理时间 | 服务端读写数据慢,可能是读写锁竞争 | 联系运维人员处理 |
| RemoteTimeMs| ms | 请求等待远程完成时间对于发送请求如果ack=-1该时间表示副本同步时间对于消费请求如果当前没有数据该时间为等待新数据时间如果请求的版本与topic存储的版本不同需要做版本转换也会拉高该时间 | 对于生产ack=-1必然会导致该指标耗时高对于消费如果topic数据写入很慢该指标高也正常。如果需要版本转换该指标耗时也会高 | 对于生产可以考虑修改ack=1消费端问题可以联系运维人员具体分析 |
| ThrottleTimeMs| ms | 请求限流时间 | 生产/消费被限流 | 申请提升限流值 |
| ResponseQueueTimeMs| ms | 响应队列排队时间 | 响应多,服务端处理不过来 | 联系运维人员处理 |
| ResponseSendTimeMs| ms | 响应返回客户端时间 | 1下游消费能力差导致向consumer发送数据时写网络缓冲区过慢2消费lag过大一直从磁盘读取数据 | 1:提升客户端消费性能2: 联系运维人员确认是否读取磁盘问题 |
| TotalTimeMs| ms | 接收到请求到完成总时间,理论上该时间等于上述六项时间之和,但由于各时间都是单独统计,总时间只是约等于上述六部分时间之和 | 上面六项有些耗时高 | 具体针对高的指标解决 |
**备注由于kafka消费端实现方式消费端一次会发送多个Fetch请求在接收到一个Response之后就会开始处理数据使Broker端返回其他Response等待因此ResponseSendTimeMs并不完全是服务端发送时间有时会包含一部分消费端处理数据时间**
## 4. 历史耗时指标说明
**基于滴滴加强版Kafka引擎的特性可以获取Broker的实时耗时信息和历史耗时信息**
| 指标名称| 单位| 指标含义|
|-- | ---- |---|
| produceRequestTime99thPercentile|ms|Topic近一分钟发送99分位耗时|
| fetchRequestTime99thPercentile|ms|Topic近一分钟拉取99分位耗时|
| produceRequestTime95thPercentile|ms|Topic近一分钟发送95分位耗时|
| fetchRequestTime95thPercentile|ms|Topic近一分钟拉取95分位耗时|
| produceRequestTime75thPercentile|ms|Topic近一分钟发送75分位耗时|
| fetchRequestTime75thPercentile|ms|Topic近一分钟拉取75分位耗时|
| produceRequestTime50thPercentile|ms|Topic近一分钟发送50分位耗时|
| fetchRequestTime50thPercentile|ms|Topic近一分钟拉取50分位耗时|

View File

@@ -0,0 +1,32 @@
---
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# 资源申请文档
## 主要名词解释
- 应用App作为Kafka中的账户使用AppID+password作为身份标识
- 集群:可使用平台提供的共享集群,也可为某一应用申请单独的集群
- Topic可申请创建Topic或申请其他Topic的生产/消费权限。进行生产/消费时通过Topic+AppID进行身份鉴权
![production_consumption_flow](assets/resource_apply/production_consumption_flow.png)
## 应用申请
应用App作为Kafka中的账户使用AppID+password作为身份标识。对Topic进行生产/消费时通过Topic+AppID进行身份鉴权。
用户申请应用经由运维人员审批审批通过后获得AppID和密钥
## 集群申请
可使用平台提供的共享集群,若对隔离性、稳定性、生产消费速率有更高的需求,可对某一应用申请单独的集群
## Topic申请
- 用户可根据已申请的应用创建Topic。创建后应用负责人默认拥有该Topic的生产/消费权限和管理权限
- 也可申请其他Topic的生产、消费权限。经由Topic所属应用的负责人审批后即可拥有相应权限。

View File

@@ -622,6 +622,9 @@ Lag表示该消费客户端是否有堆积等于 partition offset-consume
<font size=2>步骤3</font>填写完成后,点击提交即可提交申请。
备注说明集群创建后还需在此基础上创建region、逻辑集群。具体操作可参照 [集群接入手册](https://github.com/didi/Logi-KafkaManager/blob/master/docs/user_guide/add_cluster/add_cluster.md)
![applycluster](./assets/applycluster.png)
#### 申请集群下线 ####

View File

@@ -5,13 +5,13 @@
<modelVersion>4.0.0</modelVersion>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>kafka-manager-common</artifactId>
<version>2.1.0-SNAPSHOT</version>
<version>${kafka-manager.revision}</version>
<packaging>jar</packaging>
<parent>
<artifactId>kafka-manager</artifactId>
<groupId>com.xiaojukeji.kafka</groupId>
<version>2.1.0-SNAPSHOT</version>
<version>${kafka-manager.revision}</version>
</parent>
<properties>
@@ -104,5 +104,10 @@
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
</dependency>
</dependencies>
</project>

View File

@@ -6,8 +6,6 @@ package com.xiaojukeji.kafka.manager.common.bizenum;
*/
public enum IDCEnum {
CN("cn", "国内"),
US("us", "美东"),
RU("ru", "俄罗斯"),
;
private String idc;

View File

@@ -21,6 +21,8 @@ public enum ModuleEnum {
PARTITION(5, "分区"),
GATEWAY_CONFIG(6, "Gateway配置"),
UNKNOWN(-1, "未知")
;
ModuleEnum(int code, String message) {

View File

@@ -10,6 +10,7 @@ public enum RebalanceDimensionEnum {
REGION(1, "Region维度"),
BROKER(2, "Broker维度"),
TOPIC(3, "Topic维度"),
PARTITION(4, "Partition维度"),
;
private Integer code;

View File

@@ -0,0 +1,32 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* 过期Topic状态
* @author zengqiao
* @date 21/01/25
*/
public enum TopicExpiredStatusEnum {
ALREADY_NOTIFIED_AND_DELETED(-2, "已通知, 已下线"),
ALREADY_NOTIFIED_AND_CAN_DELETE(-1, "已通知, 可下线"),
ALREADY_EXPIRED_AND_WAIT_NOTIFY(0, "已过期, 待通知"),
ALREADY_NOTIFIED_AND_WAIT_RESPONSE(1, "已通知, 待反馈"),
;
private int status;
private String message;
TopicExpiredStatusEnum(int status, String message) {
this.status = status;
this.message = message;
}
public int getStatus() {
return status;
}
public String getMessage() {
return message;
}
}

View File

@@ -45,4 +45,13 @@ public enum GatewayConfigKeyEnum {
", configName='" + configName + '\'' +
'}';
}
public static GatewayConfigKeyEnum getByConfigType(String configType) {
for (GatewayConfigKeyEnum configKeyEnum: GatewayConfigKeyEnum.values()) {
if (configKeyEnum.getConfigType().equals(configType)) {
return configKeyEnum;
}
}
return null;
}
}

View File

@@ -7,6 +7,8 @@ package com.xiaojukeji.kafka.manager.common.constant;
public class KafkaConstant {
public static final String COORDINATOR_TOPIC_NAME = "__consumer_offsets";
public static final String TRANSACTION_TOPIC_NAME = "__transaction_state";
public static final String BROKER_HOST_NAME_SUFFIX = ".diditaxi.com";
public static final String CLIENT_VERSION_CODE_UNKNOWN = "-1";

View File

@@ -12,11 +12,6 @@ public class TopicCreationConstant {
*/
public static final String LOG_X_CREATE_TOPIC_CONFIG_KEY_NAME = "LOG_X_CREATE_TOPIC_CONFIG";
/**
* 治理平台创建Topic配置KEY
*/
public static final String CHORUS_CREATE_TOPIC_CONFIG_KEY_NAME = "CHORUS_CREATE_TOPIC_CONFIG";
/**
* 内部创建Topic配置KEY
*/
@@ -30,6 +25,8 @@ public class TopicCreationConstant {
public static final String TOPIC_RETENTION_TIME_KEY_NAME = "retention.ms";
public static final Long DEFAULT_QUOTA = 3 * 1024 * 1024L;
public static Properties createNewProperties(Long retentionTime) {
Properties properties = new Properties();
properties.put(TOPIC_RETENTION_TIME_KEY_NAME, String.valueOf(retentionTime));

View File

@@ -3,7 +3,6 @@ package com.xiaojukeji.kafka.manager.common.entity;
import kafka.admin.AdminClient;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
/**
* @author zengqiao
@@ -16,17 +15,12 @@ public class ConsumerMetadata {
private Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap = new HashMap<>();
private Map<String, List<String>> consumerGroupAppMap = new ConcurrentHashMap<>();
public ConsumerMetadata(Set<String> consumerGroupSet,
Map<String, Set<String>> topicNameConsumerGroupMap,
Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap,
Map<String, List<String>> consumerGroupAppMap) {
Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap) {
this.consumerGroupSet = consumerGroupSet;
this.topicNameConsumerGroupMap = topicNameConsumerGroupMap;
this.consumerGroupSummaryMap = consumerGroupSummaryMap;
this.consumerGroupAppMap = consumerGroupAppMap;
}
public Set<String> getConsumerGroupSet() {
@@ -40,8 +34,4 @@ public class ConsumerMetadata {
public Map<String, AdminClient.ConsumerGroupSummary> getConsumerGroupSummaryMap() {
return consumerGroupSummaryMap;
}
public Map<String, List<String>> getConsumerGroupAppMap() {
return consumerGroupAppMap;
}
}

View File

@@ -1,6 +1,7 @@
package com.xiaojukeji.kafka.manager.common.entity;
import com.alibaba.fastjson.JSON;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import java.io.Serializable;
@@ -96,7 +97,7 @@ public class Result<T> implements Serializable {
return result;
}
public static <T> Result<T> buildFailure(String message) {
public static <T> Result<T> buildGatewayFailure(String message) {
Result<T> result = new Result<T>();
result.setCode(ResultStatus.GATEWAY_INVALID_REQUEST.getCode());
result.setMessage(message);
@@ -104,6 +105,14 @@ public class Result<T> implements Serializable {
return result;
}
public static <T> Result<T> buildFailure(String message) {
Result<T> result = new Result<T>();
result.setCode(ResultStatus.FAIL.getCode());
result.setMessage(message);
result.setData(null);
return result;
}
public static Result buildFrom(ResultStatus resultStatus) {
Result result = new Result();
result.setCode(resultStatus.getCode());
@@ -118,4 +127,9 @@ public class Result<T> implements Serializable {
result.setData(data);
return result;
}
public boolean failed() {
return !Constant.SUCCESS.equals(code);
}
}

View File

@@ -12,125 +12,101 @@ public enum ResultStatus {
SUCCESS(Constant.SUCCESS, "success"),
LOGIN_FAILED(1, "login failed, please check username and password"),
FAIL(1, "操作失败"),
/**
* 内部依赖错误, [1000, 1200)
* 操作错误[1000, 2000)
* ------------------------------------------------------------------------------------------
*/
MYSQL_ERROR(1000, "operate database failed"),
CONNECT_ZOOKEEPER_FAILED(1000, "connect zookeeper failed"),
READ_ZOOKEEPER_FAILED(1000, "read zookeeper failed"),
READ_JMX_FAILED(1000, "read jmx failed"),
// 内部依赖错误 —— Kafka特定错误, [1000, 1100)
BROKER_NUM_NOT_ENOUGH(1000, "broker not enough"),
CONTROLLER_NOT_ALIVE(1000, "controller not alive"),
CLUSTER_METADATA_ERROR(1000, "cluster metadata error"),
TOPIC_CONFIG_ERROR(1000, "topic config error"),
/**
* 外部依赖错误, [1200, 1400)
* ------------------------------------------------------------------------------------------
*/
CALL_CLUSTER_TASK_AGENT_FAILED(1000, " call cluster task agent failed"),
CALL_MONITOR_SYSTEM_ERROR(1000, " call monitor-system failed"),
/**
* 外部用户操作错误, [1400, 1600)
* ------------------------------------------------------------------------------------------
*/
PARAM_ILLEGAL(1400, "param illegal"),
OPERATION_FAILED(1401, "operation failed"),
OPERATION_FORBIDDEN(1402, "operation forbidden"),
API_CALL_EXCEED_LIMIT(1403, "api call exceed limit"),
USER_WITHOUT_AUTHORITY(1404, "user without authority"),
CHANGE_ZOOKEEPER_FORBIDDEN(1405, "change zookeeper forbidden"),
// 资源不存在
CLUSTER_NOT_EXIST(10000, "cluster not exist"),
BROKER_NOT_EXIST(10000, "broker not exist"),
TOPIC_NOT_EXIST(10000, "topic not exist"),
PARTITION_NOT_EXIST(10000, "partition not exist"),
ACCOUNT_NOT_EXIST(10000, "account not exist"),
APP_NOT_EXIST(1000, "app not exist"),
ORDER_NOT_EXIST(1000, "order not exist"),
CONFIG_NOT_EXIST(1000, "config not exist"),
IDC_NOT_EXIST(1000, "idc not exist"),
TASK_NOT_EXIST(1110, "task not exist"),
TOPIC_OPERATION_PARAM_NULL_POINTER(1450, "参数错误"),
TOPIC_OPERATION_PARTITION_NUM_ILLEGAL(1451, "分区数错误"),
TOPIC_OPERATION_BROKER_NUM_NOT_ENOUGH(1452, "Broker数不足错误"),
TOPIC_OPERATION_TOPIC_NAME_ILLEGAL(1453, "Topic名称非法"),
TOPIC_OPERATION_TOPIC_EXISTED(1454, "Topic已存在"),
TOPIC_OPERATION_UNKNOWN_TOPIC_PARTITION(1455, "Topic未知"),
TOPIC_OPERATION_TOPIC_CONFIG_ILLEGAL(1456, "Topic配置错误"),
TOPIC_OPERATION_TOPIC_IN_DELETING(1457, "Topic正在删除"),
TOPIC_OPERATION_UNKNOWN_ERROR(1458, "未知错误"),
AUTHORITY_NOT_EXIST(1000, "authority not exist"),
/**
* 参数错误[2000, 3000)
* ------------------------------------------------------------------------------------------
*/
PARAM_ILLEGAL(2000, "param illegal"),
CG_LOCATION_ILLEGAL(2001, "consumer group location illegal"),
ORDER_ALREADY_HANDLED(2002, "order already handled"),
APP_ID_OR_PASSWORD_ILLEGAL(2003, "app or password illegal"),
SYSTEM_CODE_ILLEGAL(2004, "system code illegal"),
CLUSTER_TASK_HOST_LIST_ILLEGAL(2005, "主机列表错误,请检查主机列表"),
JSON_PARSER_ERROR(2006, "json parser error"),
MONITOR_NOT_EXIST(1110, "monitor not exist"),
BROKER_NUM_NOT_ENOUGH(2050, "broker not enough"),
CONTROLLER_NOT_ALIVE(2051, "controller not alive"),
CLUSTER_METADATA_ERROR(2052, "cluster metadata error"),
TOPIC_CONFIG_ERROR(2053, "topic config error"),
QUOTA_NOT_EXIST(1000, "quota not exist, please check clusterId, topicName and appId"),
/**
* 参数错误 - 资源检查错误
* 因为外部系统的问题, 操作时引起的错误, [7000, 8000)
* ------------------------------------------------------------------------------------------
*/
RESOURCE_NOT_EXIST(7100, "资源不存在"),
CLUSTER_NOT_EXIST(7101, "cluster not exist"),
BROKER_NOT_EXIST(7102, "broker not exist"),
TOPIC_NOT_EXIST(7103, "topic not exist"),
PARTITION_NOT_EXIST(7104, "partition not exist"),
ACCOUNT_NOT_EXIST(7105, "account not exist"),
APP_NOT_EXIST(7106, "app not exist"),
ORDER_NOT_EXIST(7107, "order not exist"),
CONFIG_NOT_EXIST(7108, "config not exist"),
IDC_NOT_EXIST(7109, "idc not exist"),
TASK_NOT_EXIST(7110, "task not exist"),
AUTHORITY_NOT_EXIST(7111, "authority not exist"),
MONITOR_NOT_EXIST(7112, "monitor not exist"),
QUOTA_NOT_EXIST(7113, "quota not exist, please check clusterId, topicName and appId"),
CONSUMER_GROUP_NOT_EXIST(7114, "consumerGroup not exist"),
TOPIC_BIZ_DATA_NOT_EXIST(7115, "topic biz data not exist, please sync topic to db"),
// 资源不存在, 已存在, 已被使用
RESOURCE_NOT_EXIST(1200, "资源存在"),
RESOURCE_ALREADY_EXISTED(1200, "资源已经存在"),
RESOURCE_NAME_DUPLICATED(1200, "资源名称重复"),
RESOURCE_ALREADY_USED(1000, "资源早已被使用"),
// 资源已存在
RESOURCE_ALREADY_EXISTED(7200, "资源已经存在"),
TOPIC_ALREADY_EXIST(7201, "topic already existed"),
// 资源重名
RESOURCE_NAME_DUPLICATED(7300, "资源名称重复"),
// 资源已被使用
RESOURCE_ALREADY_USED(7400, "资源早已被使用"),
/**
* 资源参数错误
* 因为外部系统的问题, 操作时引起的错误, [8000, 9000)
* ------------------------------------------------------------------------------------------
*/
CG_LOCATION_ILLEGAL(10000, "consumer group location illegal"),
ORDER_ALREADY_HANDLED(1000, "order already handled"),
MYSQL_ERROR(8010, "operate database failed"),
APP_ID_OR_PASSWORD_ILLEGAL(1000, "app or password illegal"),
SYSTEM_CODE_ILLEGAL(1000, "system code illegal"),
ZOOKEEPER_CONNECT_FAILED(8020, "zookeeper connect failed"),
ZOOKEEPER_READ_FAILED(8021, "zookeeper read failed"),
ZOOKEEPER_WRITE_FAILED(8022, "zookeeper write failed"),
ZOOKEEPER_DELETE_FAILED(8023, "zookeeper delete failed"),
CLUSTER_TASK_HOST_LIST_ILLEGAL(1000, "主机列表错误,请检查主机列表"),
// 调用集群任务里面的agent失败
CALL_CLUSTER_TASK_AGENT_FAILED(8030, " call cluster task agent failed"),
// 调用监控系统失败
CALL_MONITOR_SYSTEM_ERROR(8040, " call monitor-system failed"),
// 存储相关的调用失败
STORAGE_UPLOAD_FILE_FAILED(8050, "upload file failed"),
STORAGE_FILE_TYPE_NOT_SUPPORT(8051, "File type not support"),
STORAGE_DOWNLOAD_FILE_FAILED(8052, "download file failed"),
///////////////////////////////////////////////////////////////
USER_WITHOUT_AUTHORITY(1000, "user without authority"),
JSON_PARSER_ERROR(1000, "json parser error"),
TOPIC_OPERATION_PARAM_NULL_POINTER(2, "参数错误"),
TOPIC_OPERATION_PARTITION_NUM_ILLEGAL(3, "分区数错误"),
TOPIC_OPERATION_BROKER_NUM_NOT_ENOUGH(4, "Broker数不足错误"),
TOPIC_OPERATION_TOPIC_NAME_ILLEGAL(5, "Topic名称非法"),
TOPIC_OPERATION_TOPIC_EXISTED(6, "Topic已存在"),
TOPIC_OPERATION_UNKNOWN_TOPIC_PARTITION(7, "Topic未知"),
TOPIC_OPERATION_TOPIC_CONFIG_ILLEGAL(8, "Topic配置错误"),
TOPIC_OPERATION_TOPIC_IN_DELETING(9, "Topic正在删除"),
TOPIC_OPERATION_UNKNOWN_ERROR(10, "未知错误"),
TOPIC_EXIST_CONNECT_CANNOT_DELETE(10, "topic exist connect cannot delete"),
EXIST_TOPIC_CANNOT_DELETE(10, "exist topic cannot delete"),
/**
* 工单
*/
CHANGE_ZOOKEEPER_FORBIDEN(100, "change zookeeper forbiden"),
// APP_EXIST_TOPIC_AUTHORITY_CANNOT_DELETE(1000, "app exist topic authority cannot delete"),
UPLOAD_FILE_FAIL(1000, "upload file fail"),
FILE_TYPE_NOT_SUPPORT(1000, "File type not support"),
DOWNLOAD_FILE_FAIL(1000, "download file fail"),
TOPIC_ALREADY_EXIST(17400, "topic already existed"),
CONSUMER_GROUP_NOT_EXIST(17411, "consumerGroup not exist"),
;
private int code;

View File

@@ -23,6 +23,8 @@ public class ClusterDetailDTO {
private String securityProperties;
private String jmxProperties;
private Integer status;
private Date gmtCreate;
@@ -103,6 +105,14 @@ public class ClusterDetailDTO {
this.securityProperties = securityProperties;
}
public String getJmxProperties() {
return jmxProperties;
}
public void setJmxProperties(String jmxProperties) {
this.jmxProperties = jmxProperties;
}
public Integer getStatus() {
return status;
}
@@ -176,8 +186,9 @@ public class ClusterDetailDTO {
", bootstrapServers='" + bootstrapServers + '\'' +
", kafkaVersion='" + kafkaVersion + '\'' +
", idc='" + idc + '\'' +
", mode='" + mode + '\'' +
", mode=" + mode +
", securityProperties='" + securityProperties + '\'' +
", jmxProperties='" + jmxProperties + '\'' +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +

View File

@@ -0,0 +1,53 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.cluster;
public class ControllerPreferredCandidate {
private Integer brokerId;
private String host;
private Long startTime;
private Integer status;
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Long getStartTime() {
return startTime;
}
public void setStartTime(Long startTime) {
this.startTime = startTime;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
@Override
public String toString() {
return "ControllerPreferredBroker{" +
"brokerId=" + brokerId +
", host='" + host + '\'' +
", startTime=" + startTime +
", status=" + status +
'}';
}
}

View File

@@ -9,6 +9,8 @@ public class LogicalCluster {
private String logicalClusterName;
private String logicalClusterIdentification;
private Integer mode;
private Integer topicNum;
@@ -41,6 +43,14 @@ public class LogicalCluster {
this.logicalClusterName = logicalClusterName;
}
public String getLogicalClusterIdentification() {
return logicalClusterIdentification;
}
public void setLogicalClusterIdentification(String logicalClusterIdentification) {
this.logicalClusterIdentification = logicalClusterIdentification;
}
public Integer getMode() {
return mode;
}
@@ -81,6 +91,14 @@ public class LogicalCluster {
this.bootstrapServers = bootstrapServers;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Long getGmtCreate() {
return gmtCreate;
}
@@ -97,19 +115,12 @@ public class LogicalCluster {
this.gmtModify = gmtModify;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Override
public String toString() {
return "LogicalCluster{" +
"logicalClusterId=" + logicalClusterId +
", logicalClusterName='" + logicalClusterName + '\'' +
", logicalClusterIdentification='" + logicalClusterIdentification + '\'' +
", mode=" + mode +
", topicNum=" + topicNum +
", clusterVersion='" + clusterVersion + '\'' +

View File

@@ -1,57 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.config;
/**
* @author zengqiao
* @date 20/9/7
*/
public class SinkTopicRequestTimeMetricsConfig {
private Long clusterId;
private String topicName;
private Long startId;
private Long step;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Long getStartId() {
return startId;
}
public void setStartId(Long startId) {
this.startId = startId;
}
public Long getStep() {
return step;
}
public void setStep(Long step) {
this.step = step;
}
@Override
public String toString() {
return "SinkTopicRequestTimeMetricsConfig{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", startId=" + startId +
", step=" + step +
'}';
}
}

View File

@@ -2,30 +2,18 @@ package com.xiaojukeji.kafka.manager.common.entity.ao.consumer;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetLocationEnum;
import java.util.List;
import java.util.Objects;
/**
* 消费组信息
* @author zengqiao
* @date 19/4/18
*/
public class ConsumerGroupDTO {
public class ConsumerGroup {
private Long clusterId;
private String consumerGroup;
private List<String> appIdList;
private OffsetLocationEnum offsetStoreLocation;
public ConsumerGroupDTO(Long clusterId,
String consumerGroup,
List<String> appIdList,
OffsetLocationEnum offsetStoreLocation) {
public ConsumerGroup(Long clusterId, String consumerGroup, OffsetLocationEnum offsetStoreLocation) {
this.clusterId = clusterId;
this.consumerGroup = consumerGroup;
this.appIdList = appIdList;
this.offsetStoreLocation = offsetStoreLocation;
}
@@ -45,14 +33,6 @@ public class ConsumerGroupDTO {
this.consumerGroup = consumerGroup;
}
public List<String> getAppIdList() {
return appIdList;
}
public void setAppIdList(List<String> appIdList) {
this.appIdList = appIdList;
}
public OffsetLocationEnum getOffsetStoreLocation() {
return offsetStoreLocation;
}
@@ -63,10 +43,9 @@ public class ConsumerGroupDTO {
@Override
public String toString() {
return "ConsumerGroupDTO{" +
return "ConsumerGroup{" +
"clusterId=" + clusterId +
", consumerGroup='" + consumerGroup + '\'' +
", appIdList=" + appIdList +
", offsetStoreLocation=" + offsetStoreLocation +
'}';
}
@@ -79,7 +58,7 @@ public class ConsumerGroupDTO {
if (o == null || getClass() != o.getClass()) {
return false;
}
ConsumerGroupDTO that = (ConsumerGroupDTO) o;
ConsumerGroup that = (ConsumerGroup) o;
return clusterId.equals(that.clusterId)
&& consumerGroup.equals(that.consumerGroup)
&& offsetStoreLocation == that.offsetStoreLocation;

View File

@@ -0,0 +1,68 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.consumer;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetLocationEnum;
import java.util.List;
public class ConsumerGroupSummary {
private Long clusterId;
private String consumerGroup;
private OffsetLocationEnum offsetStoreLocation;
private List<String> appIdList;
private String state;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
public OffsetLocationEnum getOffsetStoreLocation() {
return offsetStoreLocation;
}
public void setOffsetStoreLocation(OffsetLocationEnum offsetStoreLocation) {
this.offsetStoreLocation = offsetStoreLocation;
}
public List<String> getAppIdList() {
return appIdList;
}
public void setAppIdList(List<String> appIdList) {
this.appIdList = appIdList;
}
public String getState() {
return state;
}
public void setState(String state) {
this.state = state;
}
@Override
public String toString() {
return "ConsumerGroupSummary{" +
"clusterId=" + clusterId +
", consumerGroup='" + consumerGroup + '\'' +
", offsetStoreLocation=" + offsetStoreLocation +
", appIdList=" + appIdList +
", state='" + state + '\'' +
'}';
}
}

View File

@@ -0,0 +1,45 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.op;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.util.List;
/**
* @author zengqiao
* @date 21/01/24
*/
@JsonIgnoreProperties(ignoreUnknown = true)
@ApiModel(description="优选为Controller的候选者")
public class ControllerPreferredCandidateDTO {
@ApiModelProperty(value="集群ID")
private Long clusterId;
@ApiModelProperty(value="优选为controller的BrokerId")
private List<Integer> brokerIdList;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public List<Integer> getBrokerIdList() {
return brokerIdList;
}
public void setBrokerIdList(List<Integer> brokerIdList) {
this.brokerIdList = brokerIdList;
}
@Override
public String toString() {
return "ControllerPreferredCandidateDTO{" +
"clusterId=" + clusterId +
", brokerIdList=" + brokerIdList +
'}';
}
}

View File

@@ -25,7 +25,10 @@ public class RebalanceDTO {
@ApiModelProperty(value = "TopicName")
private String topicName;
@ApiModelProperty(value = "维度[0: Cluster维度, 1: Region维度, 2:Broker维度, 3:Topic维度]")
@ApiModelProperty(value = "分区ID")
private Integer partitionId;
@ApiModelProperty(value = "维度[0: Cluster维度, 1: Region维度, 2:Broker维度, 3:Topic维度, 4:Partition纬度]")
private Integer dimension;
public Long getClusterId() {
@@ -60,6 +63,14 @@ public class RebalanceDTO {
this.topicName = topicName;
}
public Integer getPartitionId() {
return partitionId;
}
public void setPartitionId(Integer partitionId) {
this.partitionId = partitionId;
}
public Integer getDimension() {
return dimension;
}
@@ -68,22 +79,12 @@ public class RebalanceDTO {
this.dimension = dimension;
}
@Override
public String toString() {
return "RebalanceDTO{" +
"clusterId=" + clusterId +
", regionId=" + regionId +
", brokerId=" + brokerId +
", topicName='" + topicName + '\'' +
", dimension=" + dimension +
'}';
}
public boolean paramLegal() {
if (ValidateUtils.isNull(clusterId)
|| RebalanceDimensionEnum.REGION.getCode().equals(dimension) && ValidateUtils.isNull(regionId)
|| RebalanceDimensionEnum.BROKER.getCode().equals(dimension) && ValidateUtils.isNull(brokerId)
|| RebalanceDimensionEnum.TOPIC.getCode().equals(dimension) && ValidateUtils.isNull(topicName) ) {
|| (RebalanceDimensionEnum.REGION.getCode().equals(dimension) && ValidateUtils.isNull(regionId))
|| (RebalanceDimensionEnum.BROKER.getCode().equals(dimension) && ValidateUtils.isNull(brokerId))
|| (RebalanceDimensionEnum.TOPIC.getCode().equals(dimension) && ValidateUtils.isNull(topicName))
|| (RebalanceDimensionEnum.PARTITION.getCode().equals(dimension) && (ValidateUtils.isNull(topicName) || ValidateUtils.isNull(partitionId))) ) {
return false;
}
return true;

View File

@@ -27,9 +27,12 @@ public class ClusterDTO {
@ApiModelProperty(value="数据中心")
private String idc;
@ApiModelProperty(value="安全配置参数")
@ApiModelProperty(value="Kafka安全配置")
private String securityProperties;
@ApiModelProperty(value="Jmx配置")
private String jmxProperties;
public Long getClusterId() {
return clusterId;
}
@@ -78,6 +81,14 @@ public class ClusterDTO {
this.securityProperties = securityProperties;
}
public String getJmxProperties() {
return jmxProperties;
}
public void setJmxProperties(String jmxProperties) {
this.jmxProperties = jmxProperties;
}
@Override
public String toString() {
return "ClusterDTO{" +
@@ -87,15 +98,15 @@ public class ClusterDTO {
", bootstrapServers='" + bootstrapServers + '\'' +
", idc='" + idc + '\'' +
", securityProperties='" + securityProperties + '\'' +
", jmxProperties='" + jmxProperties + '\'' +
'}';
}
public Boolean legal() {
public boolean legal() {
if (ValidateUtils.isNull(clusterName)
|| ValidateUtils.isNull(zookeeper)
|| ValidateUtils.isNull(idc)
|| ValidateUtils.isNull(bootstrapServers)
) {
|| ValidateUtils.isNull(bootstrapServers)) {
return false;
}
return true;

View File

@@ -21,6 +21,9 @@ public class LogicalClusterDTO {
@ApiModelProperty(value = "名称")
private String name;
@ApiModelProperty(value = "集群标识, 用于告警的上报")
private String identification;
@ApiModelProperty(value = "集群模式")
private Integer mode;
@@ -52,6 +55,14 @@ public class LogicalClusterDTO {
this.name = name;
}
public String getIdentification() {
return identification;
}
public void setIdentification(String identification) {
this.identification = identification;
}
public Integer getMode() {
return mode;
}
@@ -97,6 +108,7 @@ public class LogicalClusterDTO {
return "LogicalClusterDTO{" +
"id=" + id +
", name='" + name + '\'' +
", identification='" + identification + '\'' +
", mode=" + mode +
", clusterId=" + clusterId +
", regionIdList=" + regionIdList +
@@ -117,6 +129,7 @@ public class LogicalClusterDTO {
}
appId = ValidateUtils.isNull(appId)? "": appId;
description = ValidateUtils.isNull(description)? "": description;
identification = ValidateUtils.isNull(identification)? name: identification;
return true;
}
}

View File

@@ -17,6 +17,8 @@ public class ClusterDO implements Comparable<ClusterDO> {
private String securityProperties;
private String jmxProperties;
private Integer status;
private Date gmtCreate;
@@ -31,30 +33,6 @@ public class ClusterDO implements Comparable<ClusterDO> {
this.id = id;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
public Date getGmtCreate() {
return gmtCreate;
}
public void setGmtCreate(Date gmtCreate) {
this.gmtCreate = gmtCreate;
}
public Date getGmtModify() {
return gmtModify;
}
public void setGmtModify(Date gmtModify) {
this.gmtModify = gmtModify;
}
public String getClusterName() {
return clusterName;
}
@@ -87,6 +65,38 @@ public class ClusterDO implements Comparable<ClusterDO> {
this.securityProperties = securityProperties;
}
public String getJmxProperties() {
return jmxProperties;
}
public void setJmxProperties(String jmxProperties) {
this.jmxProperties = jmxProperties;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
public Date getGmtCreate() {
return gmtCreate;
}
public void setGmtCreate(Date gmtCreate) {
this.gmtCreate = gmtCreate;
}
public Date getGmtModify() {
return gmtModify;
}
public void setGmtModify(Date gmtModify) {
this.gmtModify = gmtModify;
}
@Override
public String toString() {
return "ClusterDO{" +
@@ -95,6 +105,7 @@ public class ClusterDO implements Comparable<ClusterDO> {
", zookeeper='" + zookeeper + '\'' +
", bootstrapServers='" + bootstrapServers + '\'' +
", securityProperties='" + securityProperties + '\'' +
", jmxProperties='" + jmxProperties + '\'' +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +

View File

@@ -11,6 +11,8 @@ public class LogicalClusterDO {
private String name;
private String identification;
private Integer mode;
private String appId;
@@ -41,6 +43,14 @@ public class LogicalClusterDO {
this.name = name;
}
public String getIdentification() {
return identification;
}
public void setIdentification(String identification) {
this.identification = identification;
}
public Integer getMode() {
return mode;
}
@@ -102,6 +112,7 @@ public class LogicalClusterDO {
return "LogicalClusterDO{" +
"id=" + id +
", name='" + name + '\'' +
", identification='" + identification + '\'' +
", mode=" + mode +
", appId='" + appId + '\'' +
", clusterId=" + clusterId +

View File

@@ -17,6 +17,8 @@ public class GatewayConfigDO {
private Long version;
private String description;
private Date createTime;
private Date modifyTime;
@@ -61,6 +63,14 @@ public class GatewayConfigDO {
this.version = version;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Date getCreateTime() {
return createTime;
}
@@ -85,6 +95,7 @@ public class GatewayConfigDO {
", name='" + name + '\'' +
", value='" + value + '\'' +
", version=" + version +
", description='" + description + '\'' +
", createTime=" + createTime +
", modifyTime=" + modifyTime +
'}';

View File

@@ -15,6 +15,9 @@ public class LogicClusterVO {
@ApiModelProperty(value="逻辑集群名称")
private String clusterName;
@ApiModelProperty(value="逻辑标识")
private String clusterIdentification;
@ApiModelProperty(value="逻辑集群类型, 0:共享集群, 1:独享集群, 2:独立集群")
private Integer mode;
@@ -24,9 +27,6 @@ public class LogicClusterVO {
@ApiModelProperty(value="集群版本")
private String clusterVersion;
@ApiModelProperty(value="物理集群ID")
private Long physicalClusterId;
@ApiModelProperty(value="集群服务地址")
private String bootstrapServers;
@@ -55,6 +55,22 @@ public class LogicClusterVO {
this.clusterName = clusterName;
}
public String getClusterIdentification() {
return clusterIdentification;
}
public void setClusterIdentification(String clusterIdentification) {
this.clusterIdentification = clusterIdentification;
}
public Integer getMode() {
return mode;
}
public void setMode(Integer mode) {
this.mode = mode;
}
public Integer getTopicNum() {
return topicNum;
}
@@ -71,14 +87,6 @@ public class LogicClusterVO {
this.clusterVersion = clusterVersion;
}
public Long getPhysicalClusterId() {
return physicalClusterId;
}
public void setPhysicalClusterId(Long physicalClusterId) {
this.physicalClusterId = physicalClusterId;
}
public String getBootstrapServers() {
return bootstrapServers;
}
@@ -87,6 +95,14 @@ public class LogicClusterVO {
this.bootstrapServers = bootstrapServers;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Long getGmtCreate() {
return gmtCreate;
}
@@ -103,32 +119,15 @@ public class LogicClusterVO {
this.gmtModify = gmtModify;
}
public Integer getMode() {
return mode;
}
public void setMode(Integer mode) {
this.mode = mode;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Override
public String toString() {
return "LogicClusterVO{" +
"clusterId=" + clusterId +
", clusterName='" + clusterName + '\'' +
", clusterIdentification='" + clusterIdentification + '\'' +
", mode=" + mode +
", topicNum=" + topicNum +
", clusterVersion='" + clusterVersion + '\'' +
", physicalClusterId=" + physicalClusterId +
", bootstrapServers='" + bootstrapServers + '\'' +
", description='" + description + '\'' +
", gmtCreate=" + gmtCreate +

View File

@@ -0,0 +1,67 @@
package com.xiaojukeji.kafka.manager.common.entity.vo.normal.consumer;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.util.List;
/**
* @author zengqiao
* @date 21/01/14
*/
@ApiModel(value = "Topic消费组概要信息")
public class ConsumerGroupSummaryVO {
@ApiModelProperty(value = "消费组名称")
private String consumerGroup;
@ApiModelProperty(value = "使用的AppID")
private String appIds;
@ApiModelProperty(value = "offset存储位置")
private String location;
@ApiModelProperty(value = "消费组状态")
private String state;
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
public String getAppIds() {
return appIds;
}
public void setAppIds(String appIds) {
this.appIds = appIds;
}
public String getLocation() {
return location;
}
public void setLocation(String location) {
this.location = location;
}
public String getState() {
return state;
}
public void setState(String state) {
this.state = state;
}
@Override
public String toString() {
return "ConsumerGroupSummaryVO{" +
"consumerGroup='" + consumerGroup + '\'' +
", appIds=" + appIds +
", location='" + location + '\'' +
", state='" + state + '\'' +
'}';
}
}

View File

@@ -28,7 +28,7 @@ public class ExpiredTopicVO {
@ApiModelProperty(value = "负责人")
private String principals;
@ApiModelProperty(value = "状态, -1:可下线, 0:过期待通知, 1+:已通知待反馈")
@ApiModelProperty(value = "状态, -1:已通知可下线, 0:过期待通知, 1+:已通知待反馈")
private Integer status;
public Long getClusterId() {

View File

@@ -0,0 +1,115 @@
package com.xiaojukeji.kafka.manager.common.entity.vo.rd;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.util.Date;
/**
* @author zengqiao
* @date 20/3/19
*/
@ApiModel(value = "GatewayConfigVO", description = "Gateway配置信息")
public class GatewayConfigVO {
@ApiModelProperty(value="ID")
private Long id;
@ApiModelProperty(value="配置类型")
private String type;
@ApiModelProperty(value="配置名称")
private String name;
@ApiModelProperty(value="配置值")
private String value;
@ApiModelProperty(value="版本")
private Long version;
@ApiModelProperty(value="描述说明")
private String description;
@ApiModelProperty(value="创建时间")
private Date createTime;
@ApiModelProperty(value="修改时间")
private Date modifyTime;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
public Long getVersion() {
return version;
}
public void setVersion(Long version) {
this.version = version;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Date getCreateTime() {
return createTime;
}
public void setCreateTime(Date createTime) {
this.createTime = createTime;
}
public Date getModifyTime() {
return modifyTime;
}
public void setModifyTime(Date modifyTime) {
this.modifyTime = modifyTime;
}
@Override
public String toString() {
return "GatewayConfigVO{" +
"id=" + id +
", type='" + type + '\'' +
", name='" + name + '\'' +
", value='" + value + '\'' +
", version=" + version +
", description='" + description + '\'' +
", createTime=" + createTime +
", modifyTime=" + modifyTime +
'}';
}
}

View File

@@ -32,9 +32,12 @@ public class ClusterBaseVO {
@ApiModelProperty(value="集群类型")
private Integer mode;
@ApiModelProperty(value="安全配置参数")
@ApiModelProperty(value="Kafka安全配置")
private String securityProperties;
@ApiModelProperty(value="Jmx配置")
private String jmxProperties;
@ApiModelProperty(value="1:监控中, 0:暂停监控")
private Integer status;
@@ -108,6 +111,14 @@ public class ClusterBaseVO {
this.securityProperties = securityProperties;
}
public String getJmxProperties() {
return jmxProperties;
}
public void setJmxProperties(String jmxProperties) {
this.jmxProperties = jmxProperties;
}
public Integer getStatus() {
return status;
}
@@ -141,8 +152,9 @@ public class ClusterBaseVO {
", bootstrapServers='" + bootstrapServers + '\'' +
", kafkaVersion='" + kafkaVersion + '\'' +
", idc='" + idc + '\'' +
", mode='" + mode + '\'' +
", mode=" + mode +
", securityProperties='" + securityProperties + '\'' +
", jmxProperties='" + jmxProperties + '\'' +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +

View File

@@ -0,0 +1,61 @@
package com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
@ApiModel(description = "Broker基本信息")
public class ControllerPreferredCandidateVO {
@ApiModelProperty(value = "brokerId")
private Integer brokerId;
@ApiModelProperty(value = "主机名")
private String host;
@ApiModelProperty(value = "启动时间")
private Long startTime;
@ApiModelProperty(value = "broker状态[0:在线, -1:不在线]")
private Integer status;
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Long getStartTime() {
return startTime;
}
public void setStartTime(Long startTime) {
this.startTime = startTime;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
@Override
public String toString() {
return "ControllerPreferredBrokerVO{" +
"brokerId=" + brokerId +
", host='" + host + '\'' +
", startTime=" + startTime +
", status=" + status +
'}';
}
}

View File

@@ -18,6 +18,9 @@ public class LogicalClusterVO {
@ApiModelProperty(value = "逻辑集群名称")
private String logicalClusterName;
@ApiModelProperty(value = "逻辑集群标识")
private String logicalClusterIdentification;
@ApiModelProperty(value = "物理集群ID")
private Long physicalClusterId;
@@ -55,6 +58,14 @@ public class LogicalClusterVO {
this.logicalClusterName = logicalClusterName;
}
public String getLogicalClusterIdentification() {
return logicalClusterIdentification;
}
public void setLogicalClusterIdentification(String logicalClusterIdentification) {
this.logicalClusterIdentification = logicalClusterIdentification;
}
public Long getPhysicalClusterId() {
return physicalClusterId;
}
@@ -116,6 +127,7 @@ public class LogicalClusterVO {
return "LogicalClusterVO{" +
"logicalClusterId=" + logicalClusterId +
", logicalClusterName='" + logicalClusterName + '\'' +
", logicalClusterIdentification='" + logicalClusterIdentification + '\'' +
", physicalClusterId=" + physicalClusterId +
", regionIdList=" + regionIdList +
", mode=" + mode +

View File

@@ -9,6 +9,7 @@ import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.TopicConnectionDO
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
/**
@@ -52,7 +53,21 @@ public class JsonUtils {
return JSON.toJSONString(obj);
}
public static List<TopicConnectionDO> parseTopicConnections(Long clusterId, JSONObject jsonObject) {
public static <T> T stringToObj(String src, Class<T> clazz) {
if (ValidateUtils.isBlank(src)) {
return null;
}
return JSON.parseObject(src, clazz);
}
public static <T> List<T> stringToArrObj(String src, Class<T> clazz) {
if (ValidateUtils.isBlank(src)) {
return null;
}
return JSON.parseArray(src, clazz);
}
public static List<TopicConnectionDO> parseTopicConnections(Long clusterId, JSONObject jsonObject, long postTime) {
List<TopicConnectionDO> connectionDOList = new ArrayList<>();
for (String clientType: jsonObject.keySet()) {
JSONObject topicObject = jsonObject.getJSONObject(clientType);
@@ -73,6 +88,7 @@ public class JsonUtils {
connectionDO.setClusterId(clusterId);
connectionDO.setTopicName(topicName);
connectionDO.setType(clientType);
connectionDO.setCreateTime(new Date(postTime));
connectionDOList.add(connectionDO);
}
}

View File

@@ -1,9 +1,8 @@
package com.xiaojukeji.kafka.manager.common.utils;
import com.xiaojukeji.kafka.manager.common.bizenum.IDCEnum;
import com.xiaojukeji.kafka.manager.common.constant.TopicCreationConstant;
import org.apache.commons.lang.StringUtils;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -13,6 +12,20 @@ import java.util.Set;
* @date 20/4/16
*/
public class ValidateUtils {
/**
* 任意一个为空, 则返回true
*/
public static boolean anyNull(Object... objects) {
return Arrays.stream(objects).anyMatch(ValidateUtils::isNull);
}
/**
* 是空字符串或者空
*/
public static boolean anyBlank(String... strings) {
return Arrays.stream(strings).anyMatch(StringUtils::isBlank);
}
/**
* 为空
*/
@@ -83,23 +96,4 @@ public class ValidateUtils {
public static boolean isNullOrLessThanZero(Double value) {
return value == null || value < 0;
}
public static boolean topicNameLegal(String idc, String topicName) {
if (ValidateUtils.isNull(idc) || ValidateUtils.isNull(topicName)) {
return false;
}
// 校验Topic的长度
if (topicName.length() >= TopicCreationConstant.TOPIC_NAME_MAX_LENGTH) {
return false;
}
// 校验前缀
if (IDCEnum.CN.getIdc().equals(idc) ||
(IDCEnum.US.getIdc().equals(idc) && topicName.startsWith(TopicCreationConstant.TOPIC_NAME_PREFIX_US)) ||
(IDCEnum.RU.getIdc().equals(idc) && topicName.startsWith(TopicCreationConstant.TOPIC_NAME_PREFIX_RU))) {
return true;
}
return false;
}
}

View File

@@ -0,0 +1,65 @@
package com.xiaojukeji.kafka.manager.common.utils.jmx;
public class JmxConfig {
/**
* 单台最大连接数
*/
private Integer maxConn;
/**
* 用户名
*/
private String username;
/**
* 密码
*/
private String password;
/**
* 开启SSL
*/
private Boolean openSSL;
public Integer getMaxConn() {
return maxConn;
}
public void setMaxConn(Integer maxConn) {
this.maxConn = maxConn;
}
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public Boolean isOpenSSL() {
return openSSL;
}
public void setOpenSSL(Boolean openSSL) {
this.openSSL = openSSL;
}
@Override
public String toString() {
return "JmxConfig{" +
"maxConn=" + maxConn +
", username='" + username + '\'' +
", password='" + password + '\'' +
", openSSL=" + openSSL +
'}';
}
}

View File

@@ -1,5 +1,6 @@
package com.xiaojukeji.kafka.manager.common.utils.jmx;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -7,8 +8,14 @@ import javax.management.*;
import javax.management.remote.JMXConnector;
import javax.management.remote.JMXConnectorFactory;
import javax.management.remote.JMXServiceURL;
import javax.management.remote.rmi.RMIConnectorServer;
import javax.naming.Context;
import javax.rmi.ssl.SslRMIClientSocketFactory;
import java.io.IOException;
import java.net.MalformedURLException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
@@ -28,13 +35,19 @@ public class JmxConnectorWrap {
private AtomicInteger atomicInteger;
public JmxConnectorWrap(String host, int port, int maxConn) {
private JmxConfig jmxConfig;
public JmxConnectorWrap(String host, int port, JmxConfig jmxConfig) {
this.host = host;
this.port = port;
if (maxConn <= 0) {
maxConn = 1;
this.jmxConfig = jmxConfig;
if (ValidateUtils.isNull(this.jmxConfig)) {
this.jmxConfig = new JmxConfig();
}
this.atomicInteger = new AtomicInteger(maxConn);
if (ValidateUtils.isNullOrLessThanZero(this.jmxConfig.getMaxConn())) {
this.jmxConfig.setMaxConn(1);
}
this.atomicInteger = new AtomicInteger(this.jmxConfig.getMaxConn());
}
public boolean checkJmxConnectionAndInitIfNeed() {
@@ -64,8 +77,18 @@ public class JmxConnectorWrap {
}
String jmxUrl = String.format("service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi", host, port);
try {
JMXServiceURL url = new JMXServiceURL(jmxUrl);
jmxConnector = JMXConnectorFactory.connect(url, null);
Map<String, Object> environment = new HashMap<String, Object>();
if (!ValidateUtils.isBlank(this.jmxConfig.getUsername()) && !ValidateUtils.isBlank(this.jmxConfig.getPassword())) {
environment.put(JMXConnector.CREDENTIALS, Arrays.asList(this.jmxConfig.getUsername(), this.jmxConfig.getPassword()));
}
if (jmxConfig.isOpenSSL() != null && this.jmxConfig.isOpenSSL()) {
environment.put(Context.SECURITY_PROTOCOL, "ssl");
SslRMIClientSocketFactory clientSocketFactory = new SslRMIClientSocketFactory();
environment.put(RMIConnectorServer.RMI_CLIENT_SOCKET_FACTORY_ATTRIBUTE, clientSocketFactory);
environment.put("com.sun.jndi.rmi.factory.socket", clientSocketFactory);
}
jmxConnector = JMXConnectorFactory.connect(new JMXServiceURL(jmxUrl), environment);
LOGGER.info("JMX connect success, host:{} port:{}.", host, port);
return true;
} catch (MalformedURLException e) {

View File

@@ -18,6 +18,8 @@ public class ZkPathUtil {
public static final String CONSUMER_ROOT_NODE = ZOOKEEPER_SEPARATOR + "consumers";
public static final String REASSIGN_PARTITIONS_ROOT_NODE = "/admin/reassign_partitions";
/**
* config
*/
@@ -27,11 +29,13 @@ public class ZkPathUtil {
public static final String CONFIG_CLIENTS_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "clients";
public static final String CONFIG_ENTITY_CHANGES_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "changes/config_change_";
public static final String CONFIG_ENTITY_CHANGES_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "changes/config_change_";
public static final String REASSIGN_PARTITIONS_ROOT_NODE = "/admin/reassign_partitions";
private static final String D_METRICS_CONFIG_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "KafkaExMetrics";
private static final String D_METRICS_CONFIG_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "KafkaExMetrics";
public static final String D_CONFIG_EXTENSION_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "extension";
public static final String D_CONTROLLER_CANDIDATES = D_CONFIG_EXTENSION_ROOT_NODE + ZOOKEEPER_SEPARATOR + "candidates";
public static String getBrokerIdNodePath(Integer brokerId) {
return BROKER_IDS_ROOT + ZOOKEEPER_SEPARATOR + String.valueOf(brokerId);
@@ -109,6 +113,10 @@ public class ZkPathUtil {
}
public static String getKafkaExtraMetricsPath(Integer brokerId) {
return D_METRICS_CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + String.valueOf(brokerId);
return D_METRICS_CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + brokerId;
}
public static String getControllerCandidatePath(Integer brokerId) {
return D_CONTROLLER_CANDIDATES + ZOOKEEPER_SEPARATOR + brokerId;
}
}

View File

@@ -0,0 +1,18 @@
package com.xiaojukeji.kafka.manager.common.utils;
import org.junit.Assert;
import org.junit.Test;
import java.util.HashMap;
import java.util.Map;
public class JsonUtilsTest {
@Test
public void testMapToJsonString() {
Map<String, Object> map = new HashMap<>();
map.put("key", "value");
map.put("int", 1);
String expectRes = "{\"key\":\"value\",\"int\":1}";
Assert.assertEquals(expectRes, JsonUtils.toJSONString(map));
}
}

View File

@@ -8,7 +8,7 @@
<parent>
<artifactId>kafka-manager</artifactId>
<groupId>com.xiaojukeji.kafka</groupId>
<version>2.1.0-SNAPSHOT</version>
<version>${kafka-manager.revision}</version>
</parent>
<build>

View File

@@ -94,6 +94,9 @@ import 'antd/es/divider/style';
import Upload from 'antd/es/upload';
import 'antd/es/upload/style';
import Transfer from 'antd/es/transfer';
import 'antd/es/transfer/style';
import TimePicker from 'antd/es/time-picker';
import 'antd/es/time-picker/style';
@@ -142,5 +145,6 @@ export {
TimePicker,
RangePickerValue,
Badge,
Popover
Popover,
Transfer
};

View File

@@ -25,7 +25,7 @@
.editor{
height: 100%;
position: absolute;
left: -14%;
left: -12%;
width: 120%;
}
}

View File

@@ -21,24 +21,12 @@ class Monacoeditor extends React.Component<IEditorProps> {
public state = {
placeholder: '',
};
// public arr = '{"clusterId":95,"startId":37397856,"step":100,"topicName":"kmo_topic_metrics_tempory_zq"}';
// public Ars(a: string) {
// const obj = JSON.parse(a);
// const newobj: any = {};
// for (const item in obj) {
// if (typeof obj[item] === 'object') {
// this.Ars(obj[item]);
// } else {
// newobj[item] = obj[item];
// }
// }
// return JSON.stringify(newobj);
// }
public async componentDidMount() {
const { value, onChange } = this.props;
const format: any = await format2json(value);
this.editor = monaco.editor.create(this.ref, {
value: format.result,
value: format.result || value,
language: 'json',
lineNumbers: 'off',
scrollBeyondLastLine: false,
@@ -48,7 +36,7 @@ class Monacoeditor extends React.Component<IEditorProps> {
minimap: {
enabled: false,
},
// automaticLayout: true, // 自动布局
automaticLayout: true, // 自动布局
glyphMargin: true, // 字形边缘 {},[]
// useTabStops: false,
// formatOnPaste: true,

View File

@@ -2,6 +2,7 @@ import * as React from 'react';
import { Select, Input, InputNumber, Form, Switch, Checkbox, DatePicker, Radio, Upload, Button, Icon, Tooltip } from 'component/antd';
import Monacoeditor from 'component/editor/monacoEditor';
import { searchProps } from 'constants/table';
import { version } from 'store/version';
import './index.less';
const TextArea = Input.TextArea;
@@ -129,6 +130,8 @@ class XForm extends React.Component<IXFormProps> {
this.renderFormItem(formItem),
)}
{formItem.renderExtraElement ? formItem.renderExtraElement() : null}
{/* 添加保存时间提示文案 */}
{formItem.attrs?.prompttype ? <span style={{ color: "#cccccc", fontSize: '12px', lineHeight: '20px', display: 'block' }}>{formItem.attrs.prompttype}</span> : null}
</Form.Item>
);
})}
@@ -189,7 +192,7 @@ class XForm extends React.Component<IXFormProps> {
case FormItemType.upload:
return (
<Upload beforeUpload={(file: any) => false} {...item.attrs}>
<Button><Icon type="upload" /></Button>
<Button><Icon type="upload" /></Button>{version.fileSuffix && <span style={{ color: '#fb3939', padding: '0 0 0 10px' }}>{`请上传${version.fileSuffix}文件`}</span>}
</Upload>
);
}

View File

@@ -67,7 +67,7 @@ export const timeMonthStr = 'YYYY/MM';
// tslint:disable-next-line:max-line-length
export const indexUrl ={
indexUrl:'https://github.com/didi/kafka-manager',
indexUrl:'https://github.com/didi/Logi-KafkaManager/blob/master/docs/user_guide/kafka_metrics_desc.md', // 指标说明
cagUrl:'https://github.com/didi/Logi-KafkaManager/blob/master/docs/user_guide/add_cluster/add_cluster.md', // 集群接入指南 Cluster access Guide
}

View File

@@ -19,7 +19,7 @@ export const cellStyle = {
overflow: 'hidden',
whiteSpace: 'nowrap',
textOverflow: 'ellipsis',
cursor: 'pointer',
// cursor: 'pointer',
};
export const searchProps = {

View File

@@ -38,7 +38,7 @@ export class ClusterConsumer extends SearchAndFilterContainer {
key: 'operation',
width: '10%',
render: (t: string, item: IOffset) => {
return (<a onClick={() => this.getConsumeDetails(item)}></a>);
return (<a onClick={() => this.getConsumeDetails(item)}></a>);
},
}];
private xFormModal: IXFormWrapper;
@@ -100,7 +100,7 @@ export class ClusterConsumer extends SearchAndFilterContainer {
<div className="k-row">
<ul className="k-tab">
<li>{this.props.tab}</li>
{this.renderSearch()}
{this.renderSearch('', '请输入消费组名称')}
</ul>
<Table
columns={this.columns}
@@ -110,7 +110,7 @@ export class ClusterConsumer extends SearchAndFilterContainer {
/>
</div>
<Modal
title="消费的Topic"
title="消费详情"
visible={this.state.detailsVisible}
onOk={() => this.handleDetailsOk()}
onCancel={() => this.handleDetailsCancel()}

View File

@@ -2,7 +2,8 @@
import * as React from 'react';
import { SearchAndFilterContainer } from 'container/search-filter';
import { Table } from 'component/antd';
import { Table, Button, Popconfirm, Modal, Transfer, notification } from 'component/antd';
// import { Transfer } from 'antd'
import { observer } from 'mobx-react';
import { pagination } from 'constants/table';
import Url from 'lib/url-parser';
@@ -16,8 +17,12 @@ import { timeFormat } from 'constants/strategy';
export class ClusterController extends SearchAndFilterContainer {
public clusterId: number;
public state = {
public state: any = {
searchKey: '',
searchCandidateKey: '',
isCandidateModel: false,
mockData: [],
targetKeys: [],
};
constructor(props: any) {
@@ -37,6 +42,94 @@ export class ClusterController extends SearchAndFilterContainer {
return data;
}
public getCandidateData<T extends IController>(origin: T[]) {
let data: T[] = origin;
let { searchCandidateKey } = this.state;
searchCandidateKey = (searchCandidateKey + '').trim().toLowerCase();
data = searchCandidateKey ? origin.filter((item: IController) =>
(item.host !== undefined && item.host !== null) && item.host.toLowerCase().includes(searchCandidateKey as string),
) : origin;
return data;
}
// 候选controller
public renderCandidateController() {
const columns = [
{
title: 'BrokerId',
dataIndex: 'brokerId',
key: 'brokerId',
width: '20%',
sorter: (a: IController, b: IController) => b.brokerId - a.brokerId,
render: (r: string, t: IController) => {
return (
<a href={`${this.urlPrefix}/admin/broker-detail?clusterId=${this.clusterId}&brokerId=${t.brokerId}`}>{r}
</a>
);
},
},
{
title: 'BrokerHost',
key: 'host',
dataIndex: 'host',
width: '20%',
// render: (r: string, t: IController) => {
// return (
// <a href={`${this.urlPrefix}/admin/broker-detail?clusterId=${this.clusterId}&brokerId=${t.brokerId}`}>{r}
// </a>
// );
// },
},
{
title: 'Broker状态',
key: 'status',
dataIndex: 'status',
width: '20%',
render: (r: number, t: IController) => {
return (
<span>{r === 1 ? '不在线' : '在线'}</span>
);
},
},
{
title: '创建时间',
dataIndex: 'startTime',
key: 'startTime',
width: '25%',
sorter: (a: IController, b: IController) => b.timestamp - a.timestamp,
render: (t: number) => moment(t).format(timeFormat),
},
{
title: '操作',
dataIndex: 'operation',
key: 'operation',
width: '15%',
render: (r: string, t: IController) => {
return (
<Popconfirm
title="确定删除?"
onConfirm={() => this.deleteCandidateCancel(t)}
cancelText="取消"
okText="确认"
>
<a></a>
</Popconfirm>
);
},
},
];
return (
<Table
columns={columns}
dataSource={this.getCandidateData(admin.controllerCandidate)}
pagination={pagination}
rowKey="key"
/>
);
}
public renderController() {
const columns = [
@@ -58,12 +151,6 @@ export class ClusterController extends SearchAndFilterContainer {
key: 'host',
dataIndex: 'host',
width: '30%',
// render: (r: string, t: IController) => {
// return (
// <a href={`${this.urlPrefix}/admin/broker-detail?clusterId=${this.clusterId}&brokerId=${t.brokerId}`}>{r}
// </a>
// );
// },
},
{
title: '变更时间',
@@ -87,16 +174,104 @@ export class ClusterController extends SearchAndFilterContainer {
public componentDidMount() {
admin.getControllerHistory(this.clusterId);
admin.getCandidateController(this.clusterId);
admin.getBrokersMetadata(this.clusterId);
}
public addController = () => {
this.setState({ isCandidateModel: true, targetKeys: [] })
}
public addCandidateChange = (targetKeys: any) => {
this.setState({ targetKeys })
}
public handleCandidateCancel = () => {
this.setState({ isCandidateModel: false });
}
public handleCandidateOk = () => {
let brokerIdList = this.state.targetKeys.map((item: any) => {
return admin.brokersMetadata[item].brokerId
})
admin.addCandidateController(this.clusterId, brokerIdList).then(data => {
notification.success({ message: '新增成功' });
admin.getCandidateController(this.clusterId);
}).catch(err => {
notification.error({ message: '新增失败' });
})
this.setState({ isCandidateModel: false, targetKeys: [] });
}
public deleteCandidateCancel = (target: any) => {
admin.deleteCandidateCancel(this.clusterId, [target.brokerId]).then(() => {
notification.success({ message: '删除成功' });
});
this.setState({ isCandidateModel: false });
}
public renderAddCandidateController() {
let filterControllerCandidate = admin.brokersMetadata.filter((item: any) => {
return !admin.filtercontrollerCandidate.includes(item.brokerId)
})
return (
<Modal
title="新增候选Controller"
visible={this.state.isCandidateModel}
// okText="确认"
// cancelText="取消"
maskClosable={false}
// onOk={() => this.handleCandidateOk()}
onCancel={() => this.handleCandidateCancel()}
footer={<>
<Button style={{ width: '60px' }} onClick={() => this.handleCandidateCancel()}></Button>
<Button disabled={this.state.targetKeys.length > 0 ? false : true} style={{ width: '60px' }} type="primary" onClick={() => this.handleCandidateOk()}></Button>
</>
}
>
<Transfer
dataSource={filterControllerCandidate}
targetKeys={this.state.targetKeys}
render={item => item.host}
onChange={(targetKeys) => this.addCandidateChange(targetKeys)}
titles={['未选', '已选']}
locale={{
itemUnit: '项',
itemsUnit: '项',
}}
listStyle={{
width: "45%",
}}
/>
</Modal>
);
}
public render() {
return (
<div className="k-row">
<ul className="k-tab">
<li>
<span>Controller</span>
<span style={{ display: 'inline-block', color: "#a7a8a9", fontSize: '12px', marginLeft: '15px' }}>Controller将会优先从以下Broker中选举</span>
</li>
<div style={{ display: 'flex' }}>
<div style={{ marginRight: '15px' }}>
<Button onClick={() => this.addController()} type='primary'>Controller</Button>
</div>
{this.renderSearch('', '请查找Host', 'searchCandidateKey')}
</div>
</ul>
{this.renderCandidateController()}
<ul className="k-tab" style={{ marginTop: '10px' }}>
<li>{this.props.tab}</li>
{this.renderSearch('', '请输入Host')}
</ul>
{this.renderController()}
{this.renderAddCandidateController()}
</div>
);
}

View File

@@ -2,7 +2,7 @@ import * as React from 'react';
import Url from 'lib/url-parser';
import { region } from 'store';
import { admin } from 'store/admin';
import { topic } from 'store/topic';
import { app } from 'store/app';
import { Table, notification, Tooltip, Popconfirm } from 'antd';
import { pagination, cellStyle } from 'constants/table';
import { observer } from 'mobx-react';
@@ -56,8 +56,6 @@ export class ClusterTopic extends SearchAndFilterContainer {
public expandPartition(item: IClusterTopics) {
// getTopicBasicInfo
admin.getTopicsBasicInfo(item.clusterId, item.topicName).then(data => {
console.log(admin.topicsBasic);
console.log(admin.basicInfo);
this.clusterTopicsFrom = item;
this.setState({
expandVisible: true,
@@ -114,6 +112,7 @@ export class ClusterTopic extends SearchAndFilterContainer {
public componentDidMount() {
admin.getClusterTopics(this.clusterId);
app.getAdminAppList()
}
public renderClusterTopicList() {

View File

@@ -159,7 +159,6 @@ export class ExclusiveCluster extends SearchAndFilterContainer {
public handleDeleteRegion = (record: IBrokersRegions) => {
const filterRegion = admin.logicalClusters.filter(item => item.regionIdList.includes(record.id));
if (!filterRegion) {
return;
}
@@ -335,6 +334,7 @@ export class ExclusiveCluster extends SearchAndFilterContainer {
{this.renderSearch('', '请输入Region名称broker ID')}
</ul>
{this.renderRegion()}
{this.renderDeleteRegionModal()}
</div >
);
}

View File

@@ -94,4 +94,10 @@
.region-prompt{
font-weight: bold;
text-align: center;
}
.asd{
display: flex;
justify-content: space-around;
align-items: center;
}

View File

@@ -32,9 +32,9 @@ export class ClusterDetail extends React.Component {
}
public render() {
let content = {} as IMetaData;
content = admin.basicInfo ? admin.basicInfo : content;
return (
let content = {} as IMetaData;
content = admin.basicInfo ? admin.basicInfo : content;
return (
<>
<PageHeader
className="detail topic-detail-header"
@@ -46,7 +46,7 @@ export class ClusterDetail extends React.Component {
<ClusterOverview basicInfo={content} />
</TabPane>
<TabPane tab="Topic信息" key="2">
<ClusterTopic tab={'Topic信息'}/>
<ClusterTopic tab={'Topic信息'} />
</TabPane>
<TabPane tab="Broker信息" key="3">
<ClusterBroker tab={'Broker信息'} basicInfo={content} />
@@ -60,11 +60,11 @@ export class ClusterDetail extends React.Component {
<TabPane tab="逻辑集群信息" key="6">
<LogicalCluster tab={'逻辑集群信息'} basicInfo={content} />
</TabPane>
<TabPane tab="Controller变更历史" key="7">
<TabPane tab="Controller信息" key="7">
<ClusterController tab={'Controller变更历史'} />
</TabPane>
<TabPane tab="限流信息" key="8">
<CurrentLimiting tab={'限流信息'}/>
<TabPane tab="限流信息" key="8">
<CurrentLimiting tab={'限流信息'} />
</TabPane>
</Tabs>
</>

View File

@@ -40,15 +40,15 @@ export class LogicalCluster extends SearchAndFilterContainer {
key: 'logicalClusterId',
},
{
title: '逻辑集群中文名称',
title: '逻辑集群名称',
dataIndex: 'logicalClusterName',
key: 'logicalClusterName',
width: '150px'
},
{
title: '逻辑集群英文名称',
dataIndex: 'logicalClusterName',
key: 'logicalClusterName1',
title: '逻辑集群标识',
dataIndex: 'logicalClusterIdentification',
key: 'logicalClusterIdentification',
width: '150px'
},
{

View File

@@ -1,5 +1,5 @@
import * as React from 'react';
import { Modal, Table, Button, notification, message, Tooltip, Icon, Popconfirm, Alert } from 'component/antd';
import { Modal, Table, Button, notification, message, Tooltip, Icon, Popconfirm, Alert, Popover } from 'component/antd';
import { wrapper } from 'store';
import { observer } from 'mobx-react';
import { IXFormWrapper, IMetaData, IRegister } from 'types/base-type';
@@ -12,6 +12,7 @@ import { urlPrefix } from 'constants/left-menu';
import { indexUrl } from 'constants/strategy'
import { region } from 'store';
import './index.less';
import Monacoeditor from 'component/editor/monacoEditor';
import { getAdminClusterColumns } from '../config';
const { confirm } = Modal;
@@ -58,7 +59,7 @@ export class ClusterList extends SearchAndFilterContainer {
message: '请输入zookeeper地址',
}],
attrs: {
placeholder: '请输入zookeeper地址',
placeholder: '请输入zookeeper地址例如192.168.0.1:2181,192.168.0.2:2181/logi-kafka',
rows: 2,
disabled: item ? true : false,
},
@@ -72,7 +73,7 @@ export class ClusterList extends SearchAndFilterContainer {
message: '请输入bootstrapServers',
}],
attrs: {
placeholder: '请输入bootstrapServers',
placeholder: '请输入bootstrapServers例如192.168.1.1:9092,192.168.1.2:9092',
rows: 2,
disabled: item ? true : false,
},
@@ -131,7 +132,26 @@ export class ClusterList extends SearchAndFilterContainer {
{
"security.protocol": "SASL_PLAINTEXT",
"sasl.mechanism": "PLAIN",
"sasl.jaas.config": "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"xxxxxx\" password=\"xxxxxx\";"
"sasl.jaas.config": "org.apache.kafka.common.security.plain.PlainLoginModule required username=\\"xxxxxx\\" password=\\"xxxxxx\\";"
}`,
rows: 8,
},
},
{
key: 'jmxProperties',
label: 'JMX认证',
type: 'text_area',
rules: [{
required: false,
message: '请输入JMX认证',
}],
attrs: {
placeholder: `请输入JMX认证例如
{
"maxConn": 10, #KM对单台Broker对最大连接数
"username": "xxxxx", #用户名
"password": "xxxxx", #密码
"openSSL": true, #开启SSLtrue表示开启SSLfalse表示关闭
}`,
rows: 8,
},
@@ -271,11 +291,13 @@ export class ClusterList extends SearchAndFilterContainer {
cancelText="取消"
okText="确认"
>
<a
className="action-button"
>
{item.status === 1 ? '暂停监控' : '开始监控'}
</a>
<Tooltip title="暂停监控将无法正常监控指标信息,建议开启监控">
<a
className="action-button"
>
{item.status === 1 ? '暂停监控' : '开始监控'}
</a>
</Tooltip>
</Popconfirm>
<a onClick={this.showMonitor.bind(this, item)}>

View File

@@ -1,8 +1,8 @@
import * as React from 'react';
import { IUser, IUploadFile, IConfigure, IMetaData, IBrokersPartitions } from 'types/base-type';
import { IUser, IUploadFile, IConfigure, IConfigGateway, IMetaData } from 'types/base-type';
import { users } from 'store/users';
import { version } from 'store/version';
import { showApplyModal, showModifyModal, showConfigureModal } from 'container/modal/admin';
import { showApplyModal, showApplyModalModifyPassword, showModifyModal, showConfigureModal, showConfigGatewayModal } from 'container/modal/admin';
import { Popconfirm, Tooltip } from 'component/antd';
import { admin } from 'store/admin';
import { cellStyle } from 'constants/table';
@@ -27,6 +27,7 @@ export const getUserColumns = () => {
return (
<span className="table-operation">
<a onClick={() => showApplyModal(record)}></a>
<a onClick={() => showApplyModalModifyPassword(record)}></a>
<Popconfirm
title="确定删除?"
onConfirm={() => users.deleteUser(record.username)}
@@ -184,6 +185,87 @@ export const getConfigureColumns = () => {
return columns;
};
// 网关配置
export const getConfigColumns = () => {
const columns = [
{
title: '配置类型',
dataIndex: 'type',
key: 'type',
width: '25%',
ellipsis: true,
sorter: (a: IConfigGateway, b: IConfigGateway) => a.type.charCodeAt(0) - b.type.charCodeAt(0),
},
{
title: '配置键',
dataIndex: 'name',
key: 'name',
width: '15%',
ellipsis: true,
sorter: (a: IConfigGateway, b: IConfigGateway) => a.name.charCodeAt(0) - b.name.charCodeAt(0),
},
{
title: '配置值',
dataIndex: 'value',
key: 'value',
width: '20%',
ellipsis: true,
sorter: (a: IConfigGateway, b: IConfigGateway) => a.value.charCodeAt(0) - b.value.charCodeAt(0),
render: (t: string) => {
return t.substr(0, 1) === '{' && t.substr(0, -1) === '}' ? JSON.stringify(JSON.parse(t), null, 4) : t;
},
},
{
title: '修改时间',
dataIndex: 'modifyTime',
key: 'modifyTime',
width: '15%',
sorter: (a: IConfigGateway, b: IConfigGateway) => b.modifyTime - a.modifyTime,
render: (t: number) => moment(t).format(timeFormat),
},
{
title: '版本号',
dataIndex: 'version',
key: 'version',
width: '10%',
ellipsis: true,
sorter: (a: IConfigGateway, b: IConfigGateway) => b.version.charCodeAt(0) - a.version.charCodeAt(0),
},
{
title: '描述信息',
dataIndex: 'description',
key: 'description',
width: '20%',
ellipsis: true,
onCell: () => ({
style: {
maxWidth: 180,
...cellStyle,
},
}),
},
{
title: '操作',
width: '10%',
render: (text: string, record: IConfigGateway) => {
return (
<span className="table-operation">
<a onClick={() => showConfigGatewayModal(record)}></a>
<Popconfirm
title="确定删除?"
onConfirm={() => admin.deleteConfigGateway({ id: record.id })}
cancelText="取消"
okText="确认"
>
<a></a>
</Popconfirm>
</span>);
},
},
];
return columns;
};
const renderClusterHref = (value: number | string, item: IMetaData, key: number) => {
return ( // 0 暂停监控--不可点击 1 监控中---可正常点击
<>

View File

@@ -3,11 +3,11 @@ import { SearchAndFilterContainer } from 'container/search-filter';
import { Table, Button, Spin } from 'component/antd';
import { admin } from 'store/admin';
import { observer } from 'mobx-react';
import { IConfigure } from 'types/base-type';
import { IConfigure, IConfigGateway } from 'types/base-type';
import { users } from 'store/users';
import { pagination } from 'constants/table';
import { getConfigureColumns } from './config';
import { showConfigureModal } from 'container/modal/admin';
import { getConfigureColumns, getConfigColumns } from './config';
import { showConfigureModal, showConfigGatewayModal } from 'container/modal/admin';
@observer
export class ConfigureManagement extends SearchAndFilterContainer {
@@ -17,7 +17,12 @@ export class ConfigureManagement extends SearchAndFilterContainer {
};
public componentDidMount() {
admin.getConfigure();
if (this.props.isShow) {
admin.getGatewayList();
admin.getGatewayType();
} else {
admin.getConfigure();
}
}
public getData<T extends IConfigure>(origin: T[]) {
@@ -34,15 +39,34 @@ export class ConfigureManagement extends SearchAndFilterContainer {
return data;
}
public getGatewayData<T extends IConfigGateway>(origin: T[]) {
let data: T[] = origin;
let { searchKey } = this.state;
searchKey = (searchKey + '').trim().toLowerCase();
data = searchKey ? origin.filter((item: IConfigGateway) =>
((item.name !== undefined && item.name !== null) && item.name.toLowerCase().includes(searchKey as string))
|| ((item.value !== undefined && item.value !== null) && item.value.toLowerCase().includes(searchKey as string))
|| ((item.description !== undefined && item.description !== null) &&
item.description.toLowerCase().includes(searchKey as string)),
) : origin;
return data;
}
public renderTable() {
return (
<Spin spinning={users.loading}>
<Table
{this.props.isShow ? <Table
rowKey="key"
columns={getConfigColumns()}
dataSource={this.getGatewayData(admin.configGatewayList)}
pagination={pagination}
/> : <Table
rowKey="key"
columns={getConfigureColumns()}
dataSource={this.getData(admin.configureList)}
pagination={pagination}
/>
/>}
</Spin>
);
@@ -53,7 +77,7 @@ export class ConfigureManagement extends SearchAndFilterContainer {
<ul>
{this.renderSearch('', '请输入配置键、值或描述')}
<li className="right-btn-1">
<Button type="primary" onClick={() => showConfigureModal()}></Button>
<Button type="primary" onClick={() => this.props.isShow ? showConfigGatewayModal() : showConfigureModal()}></Button>
</li>
</ul>
);

View File

@@ -6,6 +6,7 @@ import { curveKeys, CURVE_KEY_MAP, PERIOD_RADIO_MAP, PERIOD_RADIO } from './conf
import moment = require('moment');
import { observer } from 'mobx-react';
import { timeStampStr } from 'constants/strategy';
import { adminMonitor } from 'store/admin-monitor';
@observer
export class DataCurveFilter extends React.Component {
@@ -21,6 +22,7 @@ export class DataCurveFilter extends React.Component {
}
public refreshAll = () => {
adminMonitor.setRequestId(null);
Object.keys(curveKeys).forEach((c: curveKeys) => {
const { typeInfo, curveInfo: option } = CURVE_KEY_MAP.get(c);
const { parser } = typeInfo;
@@ -32,7 +34,7 @@ export class DataCurveFilter extends React.Component {
return (
<>
<Radio.Group onChange={this.radioChange} defaultValue={curveInfo.periodKey}>
{PERIOD_RADIO.map(p => <Radio.Button key={p.key} value={p.key}>{p.label}</Radio.Button>)}
{PERIOD_RADIO.map(p => <Radio.Button key={p.key} value={p.key}>{p.label}</Radio.Button>)}
</Radio.Group>
<DatePicker.RangePicker
format={timeStampStr}

View File

@@ -79,7 +79,7 @@ export class IndividualBill extends React.Component {
}
public renderTableList() {
const adminUrl=`${urlPrefix}/admin/bill-detail`
const adminUrl = `${urlPrefix}/admin/bill-detail`
return (
<Table
rowKey="key"
@@ -89,11 +89,11 @@ export class IndividualBill extends React.Component {
/>
);
}
public renderChart() {
return (
<div className="chart-box">
<BarChartComponet ref={(ref) => this.chart = ref } getChartData={this.getData.bind(this, null)} />
<BarChartComponet ref={(ref) => this.chart = ref} getChartData={this.getData.bind(this, null)} />
</div>
);
}
@@ -132,7 +132,7 @@ export class IndividualBill extends React.Component {
<>
<div className="container">
<Tabs defaultActiveKey="1" type="card">
<TabPane
<TabPane
tab={<>
<span></span>&nbsp;
<a
@@ -142,7 +142,7 @@ export class IndividualBill extends React.Component {
>
<Icon type="question-circle" />
</a>
</>}
</>}
key="1"
>
{this.renderDatePick()}

View File

@@ -13,17 +13,20 @@ export class PlatformManagement extends React.Component {
public render() {
return (
<>
<Tabs activeKey={location.hash.substr(1) || '1'} type="card" onChange={handleTabKey}>
<TabPane tab="应用管理" key="1">
<AdminAppList />
</TabPane>
<TabPane tab="用户管理" key="2">
<UserManagement />
</TabPane>
<TabPane tab="配置管理" key="3">
<ConfigureManagement />
</TabPane>
</Tabs>
<Tabs activeKey={location.hash.substr(1) || '1'} type="card" onChange={handleTabKey}>
<TabPane tab="应用管理" key="1">
<AdminAppList />
</TabPane>
<TabPane tab="用户管理" key="2">
<UserManagement />
</TabPane>
<TabPane tab="平台配置" key="3">
<ConfigureManagement isShow={false} />
</TabPane>
<TabPane tab="网关配置" key="4">
<ConfigureManagement isShow={true} />
</TabPane>
</Tabs>
</>
);
}

View File

@@ -29,7 +29,7 @@ export class UserManagement extends SearchAndFilterContainer {
searchKey = (searchKey + '').trim().toLowerCase();
data = searchKey ? origin.filter((item: IUser) =>
(item.username !== undefined && item.username !== null) && item.username.toLowerCase().includes(searchKey as string)) : origin ;
(item.username !== undefined && item.username !== null) && item.username.toLowerCase().includes(searchKey as string)) : origin;
return data;
}

View File

@@ -1,7 +1,7 @@
import * as React from 'react';
import { alarm } from 'store/alarm';
import { IMonitorGroups } from 'types/base-type';
import { getValueFromLocalStorage, setValueToLocalStorage } from 'lib/local-storage';
import { getValueFromLocalStorage, setValueToLocalStorage, deleteValueFromLocalStorage } from 'lib/local-storage';
import { VirtualScrollSelect } from '../../../component/virtual-scroll-select';
interface IAlarmSelectProps {
@@ -36,6 +36,10 @@ export class AlarmSelect extends React.Component<IAlarmSelectProps> {
onChange && onChange(params);
}
public componentWillUnmount() {
deleteValueFromLocalStorage('monitorGroups');
}
public render() {
const { value, isDisabled } = this.props;
return (

Some files were not shown because too many files have changed in this diff Show More