Compare commits

...

983 Commits

Author SHA1 Message Date
EricZeng
045f65204b Merge pull request #633 from didi/master
合并主分支
2022-09-29 13:09:19 +08:00
EricZeng
f414b47a78 1、补充升级至v3.0.0信息;2、增加v3.0.0变更内容;(#636)
1、补充升级至v3.0.0信息;2、增加v3.0.0变更内容;(#)
2022-09-29 13:08:32 +08:00
EricZeng
44f4e2f0f9 Merge pull request #635 from didi/dev
合并前端调整的内容
2022-09-29 11:50:25 +08:00
zengqiao
2361008bdf Merge branch 'dev' of github.com:didi/KnowStreaming into dev 2022-09-29 11:49:00 +08:00
zengqiao
7377ef3ec5 增加v3.0.0变更内容 2022-09-29 11:45:29 +08:00
lucasun
a28d064b7a Merge pull request #634 from GraceWalk/dev
前端 bug 修复 & 问题优化
2022-09-29 11:23:25 +08:00
GraceWalk
e2e57e8575 fix: 依赖版本更新 2022-09-29 11:15:47 +08:00
zengqiao
9d90bd2835 补充升级至v3.0.0信息 2022-09-29 11:04:49 +08:00
EricZeng
7445e68df4 Merge pull request #632 from didi/master
合并主分支
2022-09-29 10:53:54 +08:00
GraceWalk
ab42625ad2 fix: 数字展示格式化 2022-09-29 10:52:31 +08:00
GraceWalk
18789a0a53 fix: IconFont 组件改为从独立包引入 2022-09-29 10:51:52 +08:00
zengqiao
68a37bb56a Merge branch 'master' of github.com:didi/KnowStreaming 2022-09-29 10:49:46 +08:00
GraceWalk
3b33652c47 fix: Rebalance 卡片 icon 颜色调整 2022-09-29 10:48:52 +08:00
GraceWalk
1e0c4c3904 feat: Topic 详情消息 Value 列支持复制 2022-09-29 10:48:09 +08:00
zengqiao
04e223de16 修改协议文案 2022-09-29 10:48:00 +08:00
GraceWalk
c4a691aa8a fix: 多集群列表兼容集群无 ZK 情况 2022-09-29 10:44:28 +08:00
GraceWalk
ff9dde163a feat: 图表支持存储拖拽排序 & 补点逻辑优化 2022-09-29 10:42:44 +08:00
EricZeng
eb7efbd1a5 增加字段校验注解(#631)
增加字段校验注解(#631)
2022-09-29 09:59:02 +08:00
zengqiao
8c8c362c54 Merge branch 'dev' of github.com:didi/KnowStreaming into dev 2022-09-28 20:19:35 +08:00
zengqiao
66e119ad5d 增加字段校验注解 2022-09-28 20:16:06 +08:00
EricZeng
6dedc04a05 Merge pull request #630 from didi/dev
合并开发分支
2022-09-28 20:14:51 +08:00
EricZeng
0cf8bad0df Merge pull request #629 from didi/master
合并主分支
2022-09-28 20:06:26 +08:00
zengqiao
95c9582d8b 优化消费组详情指标为实时获取 2022-09-28 20:03:23 +08:00
EricZeng
7815126ff5 1、修复Group指标防重复不生效问题;2、修复自动创建ES索引模版失败问题; (#628)
* 修复自动创建ES索引模版失败问题

* 修复Group指标防重复不生效问题

Co-authored-by: zengqiao <zengqiao@didiglobal.com>
2022-09-28 19:55:30 +08:00
zengqiao
a5fa9de54b 修复Group指标防重复不生效问题 2022-09-28 19:52:11 +08:00
zengqiao
95f1a2c630 修复自动创建ES索引模版失败问题 2022-09-28 19:46:07 +08:00
zengqiao
1e256ae1fd 修复自动创建ES索引模版失败问题 2022-09-28 19:44:33 +08:00
zengqiao
9fc9c54fa1 bump version to 3.0.0 2022-09-28 11:20:16 +08:00
zengqiao
1b362b1e02 Merge branch 'master' of github.com:didi/KnowStreaming 2022-09-28 11:16:54 +08:00
EricZeng
04e3172cca [ISSUE-624]过滤掉不存在的Topic(#625)
[ISSUE-624]过滤掉不存在的Topic(#625)
2022-09-28 11:13:15 +08:00
EricZeng
1caab7f3f7 [ISSUE-624]过滤掉不存在的Topic(#624)
[ISSUE-624]过滤掉不存在的Topic(#624)
2022-09-28 10:41:39 +08:00
zengqiao
9d33c725ad [ISSUE-624]过滤掉不存在的Topic(#624)
同步Group元信息时,如果Topic已经不存在了,则过滤掉该Group+Topic信息
2022-09-28 10:39:33 +08:00
EricZeng
6ed1d38106 [ISSUE-598]Fix start_time not set when create reassign job in MySQL-8 (#623)
[ISSUE-598]Fix start_time not set when create reassign job in MySQL-8 (#623 )
2022-09-28 10:26:56 +08:00
zengqiao
0f07ddedaf [ISSUE-598]Fix start_time not set when create reassign job in MySQL-8 2022-09-28 10:24:32 +08:00
EricZeng
289945b471 Merge pull request #622 from didi/dev
后端补充Kafka集群运行模式字段信息
2022-09-28 10:08:17 +08:00
zengqiao
f331a6d144 后端补充Kafka集群运行模式字段信息 2022-09-27 18:43:22 +08:00
EricZeng
0c8c12a651 Merge pull request #621 from didi/dev
指标发送ES类按照指标类别拆分
2022-09-27 18:38:05 +08:00
zengqiao
028c3bb2fa 指标发送ES类按照指标类别拆分 2022-09-27 10:19:18 +08:00
EricZeng
d7a5a0d405 健康巡检任务按照类型进行拆分
健康巡检任务按照类型进行拆分
2022-09-27 10:17:12 +08:00
zengqiao
5ef5f6e531 健康巡检任务按照类型进行拆分 2022-09-26 20:10:49 +08:00
EricZeng
1d205734b3 Merge pull request #619 from didi/dev
集群信息中,补充ZK配置字段
2022-09-26 19:50:26 +08:00
Peng
5edd43884f Update README.md 2022-09-26 18:43:25 +08:00
zengqiao
c1992373bc 集群信息中,补充ZK配置字段 2022-09-26 11:10:38 +08:00
EricZeng
ed562f9c8a Merge pull request #618 from didi/dev
DB中Group信息的更新方式,由replace调整为insert或update
2022-09-26 10:02:24 +08:00
zengqiao
b4d44ef8c7 DB中Group信息的更新方式,由replace调整为insert或update 2022-09-23 17:02:25 +08:00
EricZeng
ad0c16a1b4 升级Helm版本及增加Docker相关文件
升级Helm版本及增加Docker相关文件
2022-09-23 16:17:00 +08:00
wangdongfang-aden
7eabe66853 Merge pull request #616 from wangdongfang-aden/dev
添加docker-compose部署和更新helm
2022-09-23 14:50:22 +08:00
wangdongfang-aden
3983d73695 Update Chart.yaml 2022-09-23 14:47:40 +08:00
wangdongfang-aden
161d4c4562 Update 单机部署手册.md 2022-09-23 14:46:27 +08:00
wangdongfang-aden
9a1e89564e Update 单机部署手册.md 2022-09-23 14:44:49 +08:00
wangdongfang-aden
0c18c5b4f6 Update 单机部署手册.md 2022-09-23 14:43:23 +08:00
wangdongfang-aden
3e12ba34f7 Update docker-compose.yml 2022-09-23 14:33:05 +08:00
wangdongfang-aden
e71e29391b Delete ks-start.sh 2022-09-23 14:26:24 +08:00
wangdongfang-aden
9b7b9a7af0 Delete es_template_create.sh 2022-09-23 14:26:16 +08:00
wangdongfang-aden
a23819c308 Create ks-start.sh 2022-09-23 14:19:35 +08:00
wangdongfang-aden
6cb1825d96 Create es_template_create.sh 2022-09-23 14:19:10 +08:00
wangdongfang-aden
77b8c758dc Create initsql 2022-09-23 14:18:17 +08:00
wangdongfang-aden
e5a582cfad Create my.cnf 2022-09-23 14:17:25 +08:00
wangdongfang-aden
ec83db267e Create init.sh 2022-09-23 14:17:02 +08:00
wangdongfang-aden
bfd026cae7 Create dockerfile 2022-09-23 14:16:28 +08:00
wangdongfang-aden
35f1dd8082 Create dockerfile 2022-09-23 14:14:47 +08:00
wangdongfang-aden
7ed0e7dd23 Create dockerfile 2022-09-23 14:14:02 +08:00
wangdongfang-aden
1a3cbf7a9d Create knowstreaming.conf 2022-09-23 14:07:04 +08:00
wangdongfang-aden
d9e4abc3de Create ks-start.sh 2022-09-23 14:05:59 +08:00
wangdongfang-aden
a4186085d3 Create es_template_create.sh 2022-09-23 14:05:05 +08:00
wangdongfang-aden
26b1846bb4 Create docker-compose.yml 2022-09-23 14:03:14 +08:00
wangdongfang-aden
1aa89527a6 helm update 3.0.0-beta.3 2022-09-23 11:36:46 +08:00
wangdongfang-aden
eac76d7ad0 helm update 3.0.0-beta.3 2022-09-23 11:36:01 +08:00
wangdongfang-aden
cea0cd56f6 Merge pull request #607 from haoqi123/dev
[单机部署手册.md]docker-compose部署方式添加注释描述
2022-09-23 10:27:04 +08:00
EricZeng
c4b897f282 bump version to 3.0.0-beta.4
bump version to 3.0.0-beta.4
2022-09-23 10:24:52 +08:00
zengqiao
47389dbabb bump version to 3.0.0-beta.4 2022-09-23 10:17:58 +08:00
haoqi
a2f8b1a851 1. [单机部署手册.md]docker-compose部署方式添加注释描述 2022-09-22 19:46:21 +08:00
EricZeng
feac0a058f Merge pull request #613 from didi/dev
补充v3.0.0-beta.2变更信息
2022-09-22 17:30:35 +08:00
zengqiao
27eeac9fd4 补充v3.0.0-beta.2变更信息 2022-09-22 17:28:51 +08:00
EricZeng
a14db4b194 Merge pull request #612 from didi/dev
合并开发分支
2022-09-22 17:28:09 +08:00
lucasun
54ee271a47 Merge pull request #611 from GraceWalk/dev
修复前端bug和体验问题
2022-09-22 15:51:46 +08:00
GraceWalk
a3a9be4f7f fix: 更正前端本地环境接口代理地址 2022-09-22 15:37:24 +08:00
GraceWalk
d4f0a832f3 fix: 样式更新 2022-09-22 15:31:52 +08:00
GraceWalk
7dc533372c fix: 更正文件引用路径 2022-09-22 15:31:34 +08:00
GraceWalk
1737d87713 fix: 修复配置无法删除的问题 2022-09-22 15:31:13 +08:00
GraceWalk
dbb98dea11 fix: 更新登录页图片 2022-09-22 15:21:04 +08:00
GraceWalk
802b382b36 fix: Topic Messages 详情提示优化 2022-09-22 15:20:31 +08:00
GraceWalk
fc82999d45 fix: 消费测试 Message 限制最大值 2022-09-22 15:19:56 +08:00
GraceWalk
08aa000c07 refactor: 接入/编辑集群优化 2022-09-22 15:19:03 +08:00
GraceWalk
39015b5100 feat: 多集群管理列表页增加手动刷新功能 2022-09-22 15:18:13 +08:00
GraceWalk
0d635ad419 refactor: webpack 配置结构调整 2022-09-22 15:13:25 +08:00
EricZeng
9133205915 Merge pull request #610 from didi/dev
合并开发分支
2022-09-22 14:51:23 +08:00
zengqiao
725ac10c3d 1、调整KafkaZKDao位置;2、offset信息获取时,过滤掉无leader分区;3、调整验证ZK是否合法时的session超时时间 2022-09-22 11:30:46 +08:00
zengqiao
2b76358c8f Overview页面,后端增加排序信息 2022-09-22 11:24:13 +08:00
zengqiao
833c360698 bump oshi-core version to 5.6.1 2022-09-22 11:17:59 +08:00
zengqiao
7da1e67b01 FAQ补充权限识别失败问题说明 2022-09-22 11:13:54 +08:00
GraceWalk
7eb86a47dd fix: 部分依赖更新 2022-09-21 16:22:45 +08:00
GraceWalk
d67e383c28 feat: 系统管理列表增加手动刷新功能 2022-09-21 16:21:57 +08:00
GraceWalk
8749d3e1f5 fix: config 子应用 axios 配置错误兼容 2022-09-21 16:21:07 +08:00
GraceWalk
30fba21c48 fix: 生产测试单词发送消息数限制为 0~1000 2022-09-21 16:15:19 +08:00
GraceWalk
d83d35aee9 fix: 样式 & 文案优化 2022-09-21 16:12:13 +08:00
GraceWalk
1d3caeea7d feat: Cluster 图表去掉放大功能 2022-09-21 16:11:14 +08:00
haoqi
26916f6632 1. [单机部署手册.md]docker-compose部署方式添加注释描述
2. 更改docker-compose中ui对外访问port为80
2022-09-21 12:55:43 +08:00
EricZeng
fbfa0d2d2a Merge pull request #600 from haoqi123/dev
docker-compose addition
2022-09-21 10:49:08 +08:00
haoqi
e626b99090 1. 删除km-dist/docker文件夹,以[单机部署手册.md]为准 2022-09-20 19:30:20 +08:00
haoqi123
203859b71b Merge branch 'didi:dev' into dev 2022-09-20 19:25:12 +08:00
haoqi
9a25c22f3a 1. 调整docker-compose.yml中各个服务的镜像
2. 经过@wangdongfang-aden大哥的调试将helm与docker镜像合二为一,于是删减掉各个镜像的Dockerfile与启动脚本,后续也不需要额外维护
2022-09-20 19:23:18 +08:00
zengqiao
0a03f41a7c 后端增加指标摆放顺序功能 2022-09-20 14:42:22 +08:00
zengqiao
56191939c8 Merge branch 'dev' of github.com:didi/KnowStreaming into dev 2022-09-20 14:23:09 +08:00
zengqiao
beb754aaaa 修复JMX连接被关闭,抛出IOException后,未进行连接重建的问题 2022-09-20 14:22:06 +08:00
EricZeng
f234f740ca Merge pull request #603 from didi/dev
合并开发分支
2022-09-20 10:51:39 +08:00
EricZeng
e14679694c Merge pull request #602 from f1558/dev
fix issue
2022-09-20 10:31:16 +08:00
zengqiao
e06712397e 修复因DB中Broker信息不存在导致TotalLogSize指标获取时抛空指针问题 2022-09-20 10:27:30 +08:00
Richard
b6c6df7ffc fix issue
* SQL specification comments to avoid direct operation failure
2022-09-20 09:42:42 +08:00
zengqiao
375c6f56c9 修改GroupOffsetResetEnum类名为OffsetTypeEnum 2022-09-19 13:55:59 +08:00
EricZeng
0bf85c97b5 Merge pull request #555 from superspeedone/dev
Dev
2022-09-19 11:18:28 +08:00
EricZeng
630e582321 Merge pull request #593 from Mengqi777/mengqi-dev
fix: adjust os judgment method with uname
2022-09-19 10:34:16 +08:00
EricZeng
a89fe23bdd Merge pull request #597 from WYAOBO/dev
文档更新
2022-09-19 10:15:38 +08:00
haoqi
a7a5fa9a31 1. 调整docker-compose.yml中networks配置
2. ks-manager添加健康检查
3. 更新单机部署手册
2022-09-18 19:10:22 +08:00
_haoqi
c73a7eee2f 1. 调整docker-compose服务,容器名称 2022-09-16 20:03:58 +08:00
_haoqi
121f8468d5 1. 调整文件格式LF
2. 调整docker-compose服务,容器名称
2022-09-16 17:33:19 +08:00
haoqi
7b0b6936e0 1. 调整docker-compose.yml中容器名称 2022-09-16 15:54:34 +08:00
Peng
597ea04a96 Update README.md 2022-09-16 15:20:04 +08:00
Peng
f7f90aeaaa Update README.md 2022-09-16 15:18:29 +08:00
_haoqi
227479f695 1. 修改dockerfile
2. 删除无用配置文件
2022-09-16 15:13:18 +08:00
WYAOBO
6477fb3fe0 Merge branch 'didi:dev' into dev 2022-09-16 14:50:13 +08:00
wangdongfang-aden
4223f4f3c4 Merge pull request #596 from wangdongfang-aden/dev
helm update 3.0.0-beta.2
2022-09-16 14:45:43 +08:00
wangdongfang-aden
7288874d72 helm update 3.0.0-beta.2 2022-09-16 14:44:14 +08:00
wangdongfang-aden
68f76f2daf helm update 3.0.0-beta.2 2022-09-16 14:42:34 +08:00
wyb
fe6ddebc49 文档更新 2022-09-16 14:41:45 +08:00
wangdongfang-aden
12b5acd073 helm update 3.0.0-beta.2 2022-09-16 14:41:40 +08:00
wangdongfang-aden
a6f1fe07b3 helm update 3.0.0-beta.2 2022-09-16 14:41:02 +08:00
wangdongfang-aden
85e3f2a946 helm update 3.0.0-beta.2 2022-09-16 14:40:34 +08:00
pokemeng
d4f416de14 fix: adjust os judgment method with uname 2022-09-16 11:34:03 +08:00
haoqi
0d9a6702c1 1. 更改es初始化脚本输出追加为重定向 2022-09-15 17:13:58 +08:00
haoqi
d11285cdbf Merge branch 'master' into dev
# Conflicts:
#	km-dist/init/sql/ddl-logi-security.sql
2022-09-15 17:01:39 +08:00
EricZeng
5f1f33d2b9 Merge pull request #591 from didi/master
合并主分支
2022-09-15 16:59:11 +08:00
zengqiao
474daf752d bump version to 3.0.0-beta.3 2022-09-15 16:54:52 +08:00
haoqi
27d1b92690 1. 添加init容器,只用于初始化es索引 2022-09-15 16:22:51 +08:00
zengqiao
993afa4c19 默认用户名密码调整说明 2022-09-15 16:20:13 +08:00
EricZeng
028d891c32 Merge pull request #588 from didi/dev_v3.0.0-beta.2
合并v3.0.0 beta.2
2022-09-15 15:46:58 +08:00
zengqiao
0df55ec22d 更新3.0.0-beta.2升级手册 2022-09-15 15:23:29 +08:00
zengqiao
579f64774d 更新3.0.0-beta.2变更说明 2022-09-15 15:20:50 +08:00
haoqi
792f8d939d 1. 更改Dockerfile 2022-09-15 15:06:19 +08:00
EricZeng
e4fb02fcda Merge pull request #587 from didi/dev
合并开发分支
2022-09-15 14:35:00 +08:00
haoqi
0c14c641d0 1. 添加docker-compose部署方式
2. 更改manage服务初始化方式
3. 更改es初始化方式
2022-09-15 14:26:45 +08:00
EricZeng
dba671fd1e Merge pull request #586 from GraceWalk/dev
Dev
2022-09-15 13:49:04 +08:00
GraceWalk
80d1693722 fix: 修复单集群详情引导步骤定位错误的问题 2022-09-15 13:39:09 +08:00
GraceWalk
26014a11b2 feat: 补充前端打包构建部分文档说明 2022-09-15 13:36:09 +08:00
GraceWalk
848fddd55a fix: 切换依赖安装源为 taobao 镜像 2022-09-15 13:34:15 +08:00
EricZeng
97f5f05f1a Merge pull request #585 from didi/dev
更新单机部署文档
2022-09-15 13:05:14 +08:00
zengqiao
25b82810f2 更新单机部署文档 2022-09-15 13:01:33 +08:00
EricZeng
9b1e506fa7 Merge pull request #584 from didi/dev
修复日志表字段过短问题
2022-09-15 12:56:49 +08:00
zengqiao
7a42996e97 修复日志表字段过短问题 2022-09-15 12:55:06 +08:00
EricZeng
dbfcebcf67 Merge pull request #583 from didi/dev
合并开发分支
2022-09-15 12:38:11 +08:00
zengqiao
37c3f69a28 修复类型转化失败问题 2022-09-15 11:32:44 +08:00
zengqiao
5d412890b4 调整超时时间配置 2022-09-15 11:31:25 +08:00
zengqiao
1e318a4c40 修改默认的用户名密码 2022-09-15 11:31:03 +08:00
EricZeng
d4549176ec Merge pull request #566 from lomodays207/master
解决 java.lang.NumberFormatException: For input string: "{"value":0,"relation":"eq"}" 问题
2022-09-15 10:05:26 +08:00
haoqi
61efdf492f 添加docker-compose部署方式 2022-09-13 23:20:41 +08:00
lucasun
67ea4d44c8 Merge pull request #575 from GraceWalk/dev
同步前端代码
2022-09-13 15:13:02 +08:00
GraceWalk
fdae05a4aa fix: 登录页文案修改 2022-09-13 14:46:42 +08:00
GraceWalk
5efb837ee8 fix: 单集群详情样式优化 2022-09-13 14:46:29 +08:00
GraceWalk
584b626d93 fix: 修复 Broker Card 返回数据后依旧展示加载态的问题 2022-09-13 14:45:56 +08:00
GraceWalk
de25a4ed8e fix: 修复 Broker Card 返回数据后依旧展示加载态的问题 2022-09-13 14:45:27 +08:00
GraceWalk
2e852e5ca6 fix: 修复用户登出后回退还可以访问系统的问题 2022-09-13 14:44:18 +08:00
GraceWalk
b11000715a 修复 Topic Config 编辑表单不能正确回显当前值的问题 2022-09-13 14:43:35 +08:00
GraceWalk
b3f8b46f0f fix: 修复扩缩/迁移副本无法选中默认 Topic 的问题 & 迁移副本 Topic 迁移时间单位支持分钟粒度 2022-09-13 14:42:21 +08:00
GraceWalk
8d22a0664a fix: Broker 列表标识当前 Controller 2022-09-13 14:37:20 +08:00
GraceWalk
20756a3453 fix: 重置 Offset 部分 partationId 修改为 Select & Offset 数值限制 2022-09-13 14:35:23 +08:00
GraceWalk
c9b4d45a64 fix: 修复 Job 扩缩副本任务明细错误的问题 2022-09-13 14:31:45 +08:00
GraceWalk
83f7f5468b fix: 均衡历史列表样式重构 & 周期均衡场景化 & 立即均衡默认带入周期均衡参数 2022-09-13 14:30:03 +08:00
GraceWalk
59c042ad67 fix: Topic 列表趋势图优化 & 相关文案调整 2022-09-13 14:26:12 +08:00
GraceWalk
d550fc5068 fix: 修复 Consume 点击 Stop 后未停止请求发送的问题 2022-09-13 14:24:30 +08:00
GraceWalk
6effba69a0 feat: 补充 ReBalance 和 Topic 部分权限项 2022-09-13 14:22:50 +08:00
GraceWalk
9b46956259 fix: Topic 详情 Partition Tab 卡片模式展示优化 2022-09-13 14:18:17 +08:00
GraceWalk
b5a4a732da fix: 健康分设置问题修复 2022-09-13 14:15:15 +08:00
GraceWalk
487862367e feat: 多集群列表支持编辑 & 代码结构优化 2022-09-13 14:14:15 +08:00
GraceWalk
5b63b9ce67 feat: 左侧栏内容调整 2022-09-13 14:12:34 +08:00
GraceWalk
afbcd3e1df fix: Broker/Topic 图表详情 bugfix & 体验优化 2022-09-13 14:09:57 +08:00
GraceWalk
12b82c1395 fix: 图表展示 bugifx & 优化 2022-09-13 14:09:03 +08:00
GraceWalk
863b765e0d feat: 新增 RenderEmpty 组件 2022-09-13 14:04:55 +08:00
GraceWalk
731429c51c fix: 系统管理子应用补充返回 code 码拦截逻辑 2022-09-13 11:44:47 +08:00
GraceWalk
66f3bc61fe fix: 创建/编辑角色优化 2022-09-13 11:44:08 +08:00
GraceWalk
4efe35dd51 fix: 项目打包构建流程优化 & 补充说明 2022-09-13 11:43:30 +08:00
EricZeng
c92461ef93 Merge pull request #565 from didi/dev
合并开发分支
2022-09-12 05:53:34 +08:00
superspeedone
405e6e0c1d Topic消息查询支持Timestamp排序,接口支持按指定日期查询 2022-09-09 18:56:45 +08:00
superspeedone
0d227aef49 Topic消息查询支持Timestamp排序,接口支持按指定日期查询 2022-09-09 17:29:22 +08:00
superspeedone
0e49002f42 Topic消息查询支持Timestamp排序,接口支持按指定日期查询 2022-09-09 15:45:31 +08:00
wangdongfang-aden
2e016800e0 Merge pull request #568 from wangdongfang-aden/dev
使用3.0.0-beta.1镜像
2022-09-09 15:18:36 +08:00
wangdongfang-aden
09f317b991 使用3.0.0-beta.1镜像 2022-09-09 15:17:02 +08:00
wangdongfang-aden
5a48cb1547 使用3.0.0-beta.1镜像 2022-09-09 15:16:33 +08:00
wangdongfang-aden
f632febf33 Update Chart.yaml 2022-09-09 15:15:56 +08:00
wangdongfang-aden
3c53467943 使用3.0.0-beta.1镜像 2022-09-09 15:15:24 +08:00
qiubo
d358c0f4f7 修复ES total 查询转换异常问题 2022-09-09 10:22:26 +08:00
zengqiao
de977a5b32 加快添加集群后的信息获取的速度 2022-09-08 14:21:26 +08:00
zengqiao
703d685d59 Task任务分为metrics,common,metaddata三类,每一类任务的执行对应一个线程池,减少对Job模块线程池的依赖 2022-09-08 14:17:15 +08:00
zengqiao
31a5f17408 修复旧副本数为NULL的问题 2022-09-08 13:53:41 +08:00
zengqiao
c40ae3c455 增加副本变更任务结束后,进行优先副本选举的操作 2022-09-08 13:52:51 +08:00
zengqiao
b71a34279e 调整默认的权限 2022-09-08 13:50:08 +08:00
zengqiao
8f8c0c4eda 删除无效文件 2022-09-08 13:49:07 +08:00
zengqiao
3a384f0e34 优化重置Offset时的错误信息 2022-09-08 13:47:21 +08:00
zengqiao
cf7bc11cbd 增加登录系统对接文档 2022-09-08 13:46:45 +08:00
EricZeng
be60ae8399 Merge pull request #560 from didi/dev
合并开发分支
2022-09-07 14:20:04 +08:00
superspeedone
8e50d145d5 Topic消息查询支持Timestamp排序,支持查询最新消息或最早消息 #534 2022-09-07 11:17:59 +08:00
zengqiao
7a3d15525c 支持Ldap登录认证 2022-09-06 15:25:27 +08:00
zengqiao
64f32d8b24 bump logi-security version to 2.10.13 and logi-elasticsearch-client version to 1.0.24 2022-09-06 15:24:05 +08:00
zengqiao
949d6ba605 集群Broker列表,增加Controller角色信息 2022-09-06 15:22:57 +08:00
zengqiao
ceb8db09f4 优化查询Topic信息,Topic不存在时的错误提示 2022-09-06 15:21:53 +08:00
zengqiao
ed05a0ebb8 修复集群Group列表搜索反馈结果错误问题 2022-09-06 15:20:50 +08:00
zengqiao
a7cbb76655 修复Offset单位错误问题 2022-09-06 15:19:29 +08:00
zengqiao
93cbfa0b1f 后端增加页面权限点 2022-09-06 15:18:54 +08:00
zengqiao
6120613a98 Merge branch 'dev' of github.com:didi/KnowStreaming into dev 2022-09-06 15:15:14 +08:00
EricZeng
dbd00db159 Merge pull request #559 from didi/master
合并主分支
2022-09-06 15:14:18 +08:00
zengqiao
befde952f5 补充KS连接特定JMX IP的说明 2022-09-06 15:13:03 +08:00
zengqiao
1aa759e5be bump version to 3.0.0-beta.2 2022-09-06 10:25:14 +08:00
EricZeng
2de27719c1 Merge pull request #557 from didi/dev
合并开发分支
2022-09-05 17:07:51 +08:00
EricZeng
21db57b537 Merge pull request #556 from GraceWalk/dev
fix: 锁定 tree-changes 版本
2022-09-05 16:55:22 +08:00
GraceWalk
dfe8d09477 fix: 锁定 tree-changes 版本 2022-09-05 16:53:49 +08:00
EricZeng
90dfa22c64 Merge pull request #554 from WYAOBO/master
修复jmx,ip错误
2022-09-05 16:08:03 +08:00
superspeedone
0f35427645 Merge branch 'dev' of https://github.com/superspeedone/KnowStreaming into dev 2022-09-05 15:41:08 +08:00
wangyaobo
7909f60ff8 修复jmx ip选择错误 2022-09-05 15:40:01 +08:00
WYAOBO
9a1a8a4c30 Merge pull request #1 from didi/dev
Dev
2022-09-05 15:31:34 +08:00
yanweiwen
fa7ad64140 Topic消息查询支持Timestamp排序,支持查询最新消息或最早消息 #534 2022-09-05 14:46:40 +08:00
EricZeng
8a0c23339d Merge pull request #551 from didi/dev
合并开发分支
2022-09-05 11:31:40 +08:00
zengqiao
e7ab3aff16 增加升级至v3.0.0-beta.1的内容 2022-09-05 11:29:38 +08:00
zengqiao
d0948797b9 增加v3.0.0-beta.1信息 2022-09-05 11:28:51 +08:00
EricZeng
04a5e17451 Merge pull request #550 from didi/dev
合并开发分支
2022-09-03 08:50:34 +08:00
zengqiao
47065c8042 文档名修改 2022-09-03 08:49:47 +08:00
EricZeng
488c778736 Merge pull request #548 from WYAOBO/master
新增task使用文档
2022-09-03 08:46:43 +08:00
EricZeng
d10a7bcc75 Merge pull request #549 from didi/dev
合并开发分支
2022-09-03 08:42:57 +08:00
zengqiao
afe44a2537 自动创建ES索引 & 主动填补指标历史曲线缺少的点 2022-09-03 08:34:32 +08:00
zengqiao
9eadafe850 健康巡检交由KS线程进行执行 2022-09-03 08:32:33 +08:00
zengqiao
dab3eefcc0 调整超时时间 2022-09-03 08:31:46 +08:00
zengqiao
2b9a6b28d8 消费组信息获取时不包含认证信息 2022-09-03 08:31:06 +08:00
zengqiao
465f98ca2b 日志错误信息中补充Topic名称信息 2022-09-03 08:28:38 +08:00
zengqiao
a0312be4fd Jmx连接的主机IP支持可选择 2022-09-03 08:26:50 +08:00
WYAOBO
4a5161372b Update KnowStreaming Task模块简介.md 2022-09-02 18:06:06 +08:00
WYAOBO
4c9921f752 Update KnowStreaming Task模块简介.md 2022-09-02 18:05:10 +08:00
WYAOBO
6dd72d40ee Update KnowStreaming Task模块简介.md 2022-09-02 18:04:14 +08:00
WYAOBO
db49c234bb Update KnowStreaming Task模块简介.md 2022-09-02 18:03:09 +08:00
WYAOBO
4a9df0c4d9 Update KnowStreaming Task模块简介.md 2022-09-02 18:02:21 +08:00
wangyaobo
461573c2ba task文档 2022-09-02 17:57:56 +08:00
wangyaobo
291992753f 文档更新 2022-09-02 17:28:18 +08:00
wangyaobo
fcefe7ac38 文档 2022-09-02 17:19:44 +08:00
zengqiao
7da712fcff 文案错误订正 2022-09-02 14:56:11 +08:00
zengqiao
2fd8687624 补充数据库错误说明 2022-09-02 14:55:02 +08:00
EricZeng
639b1f8336 Merge pull request #542 from didi/dev
前端优化
2022-09-02 12:46:04 +08:00
EricZeng
ab3b83e42a Merge pull request #539 from ruanliang-hualun/dev
备注与实现不一致,优先使用EXTERNAL_KEY
2022-09-02 12:42:30 +08:00
lucasun
4818629c40 Merge pull request #541 from GraceWalk/dev
前端代码打包优化 & bug 修复
2022-09-02 11:08:05 +08:00
ruanliang01
61784c860a 备注与实现不一致,优先使用EXTERNAL_KEY 2022-09-01 21:07:15 +08:00
GraceWalk
d5667254f2 fix: 样式 & 文案细节优化 2022-09-01 19:51:27 +08:00
GraceWalk
af2b93983f fix: 修正均衡卡片展示状态 2022-09-01 19:50:36 +08:00
GraceWalk
8281301cbd fix: 修复 Consumer 点击 Stop 不发停止检索问题修复 2022-09-01 19:50:04 +08:00
GraceWalk
0043ab8371 feat: 优化前端打包体积 2022-09-01 19:46:50 +08:00
GraceWalk
500eaace82 feat: 前端打包增加分包策略 2022-09-01 19:44:37 +08:00
GraceWalk
28e8540c78 fix: 修复创建/编辑角色报错的问题 2022-09-01 19:43:33 +08:00
EricZeng
69adf682e2 Merge pull request #537 from didi/dev
开发合入主分支
2022-09-01 17:06:32 +08:00
EricZeng
69cd1ff6e1 Merge pull request #536 from didi/master
合并主分支
2022-09-01 17:02:32 +08:00
EricZeng
415d67cc32 Merge pull request #535 from chaixiaoxue/dev
Fix page does not take effect
2022-09-01 16:51:32 +08:00
TinoC
46a2fec79b Fix page does not take effect 2022-09-01 16:35:30 +08:00
EricZeng
560b322fca Merge pull request #532 from Strangevy/dev_v3.0.0
Modify the default value of logi_security_oplog.operation_methods
2022-09-01 13:34:09 +08:00
Strangevy
effe17ac85 logi_security_oplog.operation_methods default '' 2022-09-01 10:28:00 +08:00
EricZeng
7699acfc1b Merge pull request #531 from didi/dev_v3.0.0
1、后端补充leader选举能力;2、图片链接调整;3、健康检查文案调整;4、版本列表增加排序;5、指标采集缓存时间调整;
2022-08-31 19:29:46 +08:00
zengqiao
6e058240b3 指标采集缓存时间调整 2022-08-31 17:15:49 +08:00
zengqiao
f005c6bc44 版本列表增加排序 2022-08-31 17:14:56 +08:00
zengqiao
7be462599f 健康检查文案调整 2022-08-31 17:13:18 +08:00
zengqiao
271ab432d9 图片链接调整 2022-08-31 17:12:34 +08:00
zengqiao
4114777a4e 补充leader选举能力 2022-08-31 17:11:12 +08:00
Peng
9189a54442 Update README.md 2022-08-30 21:56:43 +08:00
EricZeng
b95ee762e3 Merge pull request #529 from didi/dev_v3.0.0
解决raft集群controller信息不断记录问题
2022-08-30 19:42:06 +08:00
zengqiao
9e3c4dc06b 解决raft集群controller信息不断记录问题 2022-08-30 19:39:15 +08:00
EricZeng
1891a3ac86 Merge pull request #526 from yzydtc/dev_v3.0.0
[KnowStreaming-524] fix one markdown link 404 not found
2022-08-30 14:53:22 +08:00
zhiyuan.yang
9ecdcac06d [KnowStreaming-524] fix one markdown link 404 not found 2022-08-30 14:47:54 +08:00
EricZeng
790cb6a2e1 Merge pull request #521 from didi/dev_v3.0.0
1、bump version;2、ES客户端数可配置化;3、采样优化;4、文档修改;
2022-08-30 13:36:36 +08:00
EricZeng
4a98e5f025 Merge pull request #520 from didi/dev_v3.0.0
1、删除无效字段;2、日志优化;
2022-08-29 20:33:55 +08:00
zengqiao
507abc1d84 bump version 2022-08-29 20:32:41 +08:00
zengqiao
9b732fbbad ES客户端数可配置化 2022-08-29 20:32:01 +08:00
zengqiao
220f1c6fc3 采样优化 2022-08-29 20:31:34 +08:00
zengqiao
7a950c67b6 文档修改 2022-08-29 20:30:47 +08:00
zengqiao
78f625dc8c 日志优化 2022-08-29 16:45:48 +08:00
zengqiao
211d26a3ed 删除无效字段 2022-08-29 16:44:53 +08:00
EricZeng
dce2bc6326 Merge pull request #519 from didi/master
合并主分支
2022-08-29 16:24:02 +08:00
EricZeng
90e5d7f6f0 Merge pull request #516 from didi/v3.0.0-beta
V3.0.0 beta
2022-08-28 14:07:14 +08:00
Peng
71d4e0f9e6 Update README.md 2022-08-28 06:12:13 +08:00
Peng
580b4534e0 Update README.md 2022-08-28 06:12:03 +08:00
Peng
fc835e09c6 Update README.md 2022-08-28 06:10:24 +08:00
Peng
c6e782a637 Update README.md 2022-08-28 06:09:59 +08:00
Peng
1ddfbfc833 Update README.md 2022-08-28 06:09:17 +08:00
EricZeng
dbf637fe0f Merge pull request #515 from didi/master
merge master
2022-08-26 18:45:25 +08:00
EricZeng
110e129622 Merge pull request #514 from didi/dev
Dev
2022-08-26 18:44:36 +08:00
zengqiao
677e9d1b54 Helm调整 2022-08-26 18:43:50 +08:00
EricZeng
ad2adb905e Merge pull request #513 from GraceWalk/master
knowdesign 依赖版本锁定为 1.3.7
2022-08-26 17:44:28 +08:00
GraceWalk
5e9de7ac14 knowdesign 依赖版本锁定为 1.3.7 2022-08-26 17:42:09 +08:00
EricZeng
c63fb8380c Merge pull request #512 from didi/master
合并主分支
2022-08-26 17:23:37 +08:00
EricZeng
2d39acc224 Merge pull request #511 from didi/dev_v3.0.0
删除无关脚本
2022-08-26 11:30:56 +08:00
zengqiao
e68358e05f 删除无关脚本 2022-08-26 11:29:30 +08:00
EricZeng
a96f10edf0 Merge pull request #510 from didi/dev_v3.0.0
v3.0.0-beta版本
2022-08-26 11:24:36 +08:00
zengqiao
f03d94935b 均衡设置修复 2022-08-26 10:51:37 +08:00
zengqiao
9c1320cd95 SQL调整 2022-08-26 10:51:12 +08:00
zengqiao
4f2ae588a5 补充3.0.0-beta的信息 2022-08-26 10:50:54 +08:00
zengqiao
eff51034b7 调整链接及文案 2022-08-26 10:49:17 +08:00
zengqiao
18832dc448 前端调整 2022-08-25 20:32:49 +08:00
zengqiao
5262ae8907 采样调整 2022-08-25 20:30:15 +08:00
zengqiao
7f251679fa Job模块创建任务失败修复 2022-08-25 19:50:50 +08:00
zengqiao
5f5920b427 分页工具类优化 2022-08-25 19:50:08 +08:00
zengqiao
65a16d058a 增加返回Kafka内部Topic 2022-08-25 19:49:47 +08:00
zengqiao
a73484d23a 免登录接口调用说明修改 2022-08-25 19:48:58 +08:00
zengqiao
47887a20c6 前端优化 2022-08-24 21:29:57 +08:00
zengqiao
9465c6f198 删除无效说明 2022-08-24 21:28:24 +08:00
zengqiao
c09872c8c2 调整默认配置 2022-08-24 21:28:04 +08:00
zengqiao
b0501cc80d 修复db字段缺少默认值导致写入数据失败问题 2022-08-24 21:27:43 +08:00
zengqiao
f0792db6b3 兼容0.10.2版本offset信息的获取 2022-08-24 21:27:04 +08:00
zengqiao
e1514c901b 同步代码 2022-08-23 19:01:53 +08:00
zengqiao
e90c5003ae 官网路径调整 2022-08-23 18:30:05 +08:00
zengqiao
92a0d5d52c README调整 2022-08-22 18:40:23 +08:00
zengqiao
8912cb5323 README调整 2022-08-22 18:30:45 +08:00
zengqiao
d008c19149 文档更新 & 问题修复 2022-08-22 18:27:53 +08:00
EricZeng
e844b6444a Merge pull request #509 from Liubey/dev_v3.0.0
fix wrong link in readme
2022-08-19 10:14:50 +08:00
Liubey
02606cdce2 fix wrong link 2022-08-19 09:34:03 +08:00
Liubey
0081720f0e fix wrong link
Signed-off-by: Liubey <liubey1214@gmail.com>
2022-08-19 09:30:16 +08:00
Peng
cca1e92868 Update README.md 2022-08-18 18:03:00 +08:00
Peng
69b774a074 Update README.md 2022-08-18 17:51:41 +08:00
zengqiao
5656b03fb4 合并主分支 2022-08-18 17:42:18 +08:00
Peng
02d0dcbb7f Update README.md 2022-08-18 17:33:01 +08:00
zengqiao
7b2e06df12 change license 2022-08-18 17:32:07 +08:00
zengqiao
4259ae63d7 change license 2022-08-18 17:25:02 +08:00
zengqiao
d7b11803bc bump version to 3.0.0-beta 2022-08-18 17:21:43 +08:00
Peng
fed298a6d4 Update README.md 2022-08-18 17:10:00 +08:00
zengqiao
51832385b1 初始化3.0.0版本 2022-08-18 17:04:05 +08:00
Peng
462303fca0 Update README.md 2022-08-11 14:49:04 +08:00
Peng
4405703e42 Update README.md 2022-08-09 11:11:03 +08:00
Peng
23e398e121 Update README.md 2022-08-09 10:05:24 +08:00
Peng
b17bb89d04 Update README.md 2022-08-09 09:56:35 +08:00
Peng
5590cebf8f Update README.md 2022-08-09 09:54:44 +08:00
Peng
1fa043f09d Update README.md 2022-08-09 09:52:30 +08:00
Peng
3bd0af1451 Update README.md 2022-08-09 09:49:09 +08:00
Peng
1545962745 Update README.md
添加star趋势
2022-07-29 16:05:16 +08:00
EricZeng
d032571681 Merge pull request #503 from didi/dev
补充FutureUtil类
2022-07-06 16:21:24 +08:00
zengqiao
33fb0acc7e 补充FutureUtil类 2022-07-06 15:18:53 +08:00
EricZeng
1ec68a91e2 Merge pull request #499 from gzldc/master
关于logback的版本漏洞修复 #488
2022-07-01 08:57:11 +08:00
shishuai
a23c113a46 关于logback的版本漏洞修复 #488 2022-06-29 22:06:33 +08:00
Peng
371ae2c0a5 Update README.md 2022-06-28 10:19:18 +08:00
Peng
8f8f6ffa27 Update README.md 2022-06-28 10:18:03 +08:00
EricZeng
475fe0d91f Merge pull request #496 from didi/dev
删除application中无效的版本信息配置
2022-06-23 10:31:53 +08:00
zengqiao
3d74e60d03 删除application中无效的版本信息配置 2022-06-23 10:31:07 +08:00
EricZeng
83ac83bb28 Merge pull request #495 from didi/master
合并主分支
2022-06-23 10:29:22 +08:00
EricZeng
8478fb857c Merge pull request #494 from didi/dev
1、打包时自动生成版本信息及git提交信息;2、优化swagger对版本信息的获取;
2022-06-23 10:23:10 +08:00
zengqiao
7074bdaa9f 1、打包时自动生成版本信息及git提交信息;2、优化swagger对版本信息的获取 2022-06-23 10:17:36 +08:00
EricZeng
58164294cc Update README.md 2022-03-17 10:02:10 +08:00
EricZeng
7c0e9df156 Merge pull request #479 from didi/dev
集成测试&单元测试补充
2022-03-15 13:49:33 +08:00
EricZeng
bd62212ecb Merge pull request #472 from didi/dev_v2.5.0_addtest
LogiKM增加单元测试和集成测试
2022-03-07 17:29:20 +08:00
EricZeng
2292039b42 Merge pull request #474 from houxiufeng/dev_v2.5.0_addtest
modify AbstractSingleSignOnTest error
2022-03-07 17:28:39 +08:00
houxiufeng
73f8da8d5a modify AbstractSingleSignOnTest error 2022-03-07 17:10:15 +08:00
EricZeng
e51dbe0ca7 Merge pull request #473 from didi/dev
Dev
2022-03-07 14:49:52 +08:00
xuguang
482a375e31 Merge branch 'v2.5.0' of github.com:didi/LogiKM into dev_v2.5.0_addtest 2022-03-04 16:13:54 +08:00
xuguang
689c5ce455 增加单元测试和集成测试文档 & 问题修改 2022-03-04 16:04:36 +08:00
EricZeng
734a020ecc Merge pull request #470 from didi/dev
修复删除指标时,数据越界问题
2022-03-03 19:04:03 +08:00
zengqiao
44d537f78c 修复删除指标时,数据越界问题 2022-03-03 11:26:50 +08:00
zengqiao
b4c60eb910 增加过滤掉Broker的连接信息时,增加请求类型的判断 2022-02-28 12:07:50 +08:00
xuguang
e120b32375 剩余controller接口集成测试 2022-02-25 17:04:46 +08:00
xuguang
de54966d30 NormalJmxController注释修改 2022-02-24 16:20:37 +08:00
xuguang
39a6302c18 部分controller接口集成测试 2022-02-21 10:43:54 +08:00
xuguang
05ceeea4b0 bugfix: LogicalClusterDTO和RegionDTO校验参数冗余 & RdLogicalClusterController解释修正 2022-02-21 10:41:50 +08:00
EricZeng
9f8e3373a8 Merge pull request #465 from hailanxin/master
增加jmx连接失败的一个情况和解决方法
2022-02-17 21:08:21 +08:00
hailanxin
42521cbae4 Update connect_jmx_failed.md 2022-02-17 14:02:43 +08:00
hailanxin
b23c35197e 增加jmx连接失败的一个情况和解决方法 2022-02-17 14:02:13 +08:00
EricZeng
70f28d9ac4 Merge pull request #461 from zzzhangqi/rainbond
add rainbond installation
2022-02-16 10:18:15 +08:00
zhangqi
912d73d98a add rainbond installation 2022-02-15 18:34:49 +08:00
EricZeng
2a720fce6f Merge pull request #451 from zzzhangqi/master
Support docker source code construction
2022-02-15 18:05:46 +08:00
zhangqi
e4534c359f Support docker source code construction 2022-02-15 10:43:11 +08:00
zengqiao
b91bec15f2 bump version to v2.6.1 2022-01-26 15:33:08 +08:00
EricZeng
67ad5cacb7 Merge pull request #453 from didi/dev
1.设置版本为2.6.0; 2.补充v2.6.0的release notes
2022-01-24 13:32:13 +08:00
zengqiao
b4a739476a 补充v2.6.0的release notes 2022-01-24 13:30:48 +08:00
zengqiao
a7bf2085db 设置版本为2.6.0 2022-01-24 13:30:05 +08:00
zengqiao
c3802cf48b 去除@project.version@说明 2022-01-24 13:27:10 +08:00
EricZeng
54711c4491 Merge pull request #452 from didi/dev
允许配置文件中不进行配置
2022-01-24 13:22:48 +08:00
zengqiao
fcb52a69c0 允许配置文件中不进行配置 2022-01-24 13:16:30 +08:00
zengqiao
1b632f9754 调整文章顺序 2022-01-22 13:24:07 +08:00
EricZeng
73d7a0ecdc Merge pull request #450 from didi/dev
调整Task模块日志及Api请求统计日志的输出
2022-01-21 19:25:01 +08:00
EricZeng
08943593b3 Merge pull request #449 from kingdomrushing/dev_v2.6.0
增加对定时任务的说明
2022-01-21 15:27:17 +08:00
xuguang
c949a88f20 周期任务说明文档内容补充 2022-01-21 13:59:46 +08:00
xuguang
a49c11f655 统一定时任务cron格式 2022-01-21 13:14:31 +08:00
xuguang
a66aed4a88 增加对定时任务的注释和说明文档 2022-01-21 13:12:58 +08:00
xuguang
0045c953a0 Merge branch 'dev' of github.com:kingdomrushing/LogiKM into dev_v2.6.0 2022-01-21 10:59:38 +08:00
xuguang
fdce41b451 增加对定时任务的说明 2022-01-21 10:50:29 +08:00
xuguang
4d5e4d0f00 去掉单元测试敏感信息 2022-01-21 10:08:15 +08:00
didi
82c9b6481e 真实环境配置定义在配置文件中 2022-01-20 22:33:52 +08:00
EricZeng
053d4dcb18 Merge pull request #447 from didi/dev
补充KCM使用文档
2022-01-20 14:29:18 +08:00
zengqiao
e1b2c442aa 调整Task模块日志及Api请求统计日志的输出 2022-01-20 14:28:39 +08:00
zengqiao
0ed8ba8ca4 补充KCM使用文档 2022-01-20 11:47:55 +08:00
xuguang
f195847c68 Merge branch 'dev_v2.5.0_addtest' of github.com:didi/LogiKM into dev_v2.5.0_addtest 2022-01-20 10:24:10 +08:00
xuguang
5beb13b17e NormalAppControllerTest,OpAuthorityControllerTest 2022-01-20 10:22:20 +08:00
EricZeng
7d9ec05062 Merge pull request #445 from didi/dev
优化corn表达式解析失败后退出无任何日志提示问题
2022-01-20 10:22:16 +08:00
xuguang
fc604a9eaf 集成测试:物理集群的增删改查 2022-01-20 10:15:42 +08:00
zengqiao
4f3c1ad9b6 优化corn表达式解析失败后退出无任何日志提示问题 2022-01-19 20:23:18 +08:00
EricZeng
6d45ed586c Merge pull request #444 from didi/dev
1. 完善搜索用户时可以显示用户的其他元信息(完善chineseName和department); 2. 升级至v2.6.0说明
2022-01-19 15:25:11 +08:00
didi
1afb633b4f Merge branch 'dev_v2.5.0_addtest' of github.com:didi/LogiKM into dev_v2.5.0_addtest 2022-01-18 17:08:49 +08:00
didi
34d9f9174b 所有单测重新测试 2022-01-18 17:07:21 +08:00
zengqiao
3b0c208eff 补充v2.6.0升级说明及修复新增account提示mysql错误问题 2022-01-18 15:48:05 +08:00
EricZeng
05022f8db4 Merge pull request #442 from Huyueeer/devLookUserName
LDAP忽略大小写 & 认证携带必要元信息(姓名、部门、邮箱)
2022-01-18 15:26:32 +08:00
EricZeng
3336de457a Merge pull request #441 from didi/dev
bump springboot version to 2.1.18 and ignore springframework version …
2022-01-18 14:42:18 +08:00
EricZeng
10a27bc29c Merge pull request #443 from lucasun/master
V2.6.0 FE
2022-01-18 14:24:17 +08:00
Hu.Yue
542e5d3c2d Merge branch 'didi-dev' into devLookUserName 2022-01-18 14:09:05 +08:00
lucasun
7372617b14 Merge branch 'didi:master' into master 2022-01-18 14:07:44 +08:00
Hu.Yue
89735a130b Merge branch 'dev' of https://github.com/didi/LogiKM into didi-dev 2022-01-18 14:07:34 +08:00
孙超
859cf74bd6 2.6.0问题修复 2022-01-18 14:05:32 +08:00
zengqiao
e2744ab399 bump version to 2.6.0 2022-01-18 14:00:20 +08:00
Hu.Yue
16bd065098 Merge branch 'dev' of https://github.com/didi/LogiKM into didi-dev 2022-01-18 13:58:16 +08:00
zengqiao
71c52e6dd7 bump springboot version to 2.1.18 and ignore springframework version config 2022-01-17 21:18:06 +08:00
EricZeng
a7f8c3ced3 Merge pull request #440 from didi/dev
1.安装部署脚本LogiKM可配置; 2.增加网关接口及第三方接口可直接调用的开关;
2022-01-17 21:08:55 +08:00
zengqiao
f3f0432c65 1.安装部署脚本LogiKM可配置; 2.增加网关接口及第三方接口可直接调用的开关; 2022-01-17 20:41:53 +08:00
EricZeng
426ba2d150 Merge pull request #439 from didi/dev
1、修复swagger抛出的NumberFormatException问题; 2、Swagger提示版本和POM中版本通过配置保持一致; 3、梳理Task模块任务-BrokerMetrics任务梳理;
2022-01-17 20:39:10 +08:00
zengqiao
2790099efa 梳理Task模块任务-BrokerMetrics任务梳理 2022-01-17 15:28:36 +08:00
zengqiao
f6ba8bc95e Swagger提示版本和POM中版本通过配置保持一致 2022-01-17 13:17:07 +08:00
zengqiao
d6181522c0 修复swagger抛出的NumberFormatException问题 2022-01-17 11:42:30 +08:00
EricZeng
04cf071ca6 Merge pull request #437 from didi/dev
增加简单回退工具类,增加Jmx连接失败回退功能机制,优化Jmx连接失败日志
2022-01-15 13:56:44 +08:00
zengqiao
e4371b5d02 修正注释 2022-01-14 14:28:45 +08:00
zengqiao
52c52b2a0d 增加简单回退工具类,增加Jmx连接失败回退功能机制,优化Jmx连接失败日志 2022-01-14 14:24:04 +08:00
EricZeng
8f40f10575 Merge pull request #436 from didi/dev
1. 增加对BrokerMetadata中endpoints为internal|External方式的解析; 2. 增加线程池、客户端池可配置; 3. fix config incorrectly comment
2022-01-13 16:28:34 +08:00
zengqiao
fe0f6fcd0b fix config incorrectly comment 2022-01-13 16:02:33 +08:00
EricZeng
31b1ad8bb4 Merge pull request #435 from kingdomrushing/dev_v2.6.0
增加线程池、客户端池可配置 & 增加对BrokerMetadata中endpoints为internal|External方式的解析
2022-01-13 15:51:54 +08:00
xuguang
373680d854 线程池 & BrokerMetadata 问题修复 2022-01-13 15:39:39 +08:00
xuguang
9e3bc80495 线程池 & BrokerMetadata 问题修复 2022-01-13 15:35:11 +08:00
EricZeng
89405fe003 Merge pull request #434 from didi/fix_2.5.0
修复console模块关闭问题及前端文件名错误问题
2022-01-13 14:00:01 +08:00
shirenchuang
b9ea3865a5 升级到2.5版本
(cherry picked from commit 5bc6eb6774)
2022-01-13 13:47:21 +08:00
孙超
b5bd643814 修复图片名称大小写问题
(cherry picked from commit ada2718b5e)
2022-01-13 13:46:06 +08:00
xuguang
52ccaeffd5 解决依赖冲突 2022-01-13 11:48:43 +08:00
kingdomrushing
18136c12fd Merge branch 'didi:master' into dev_v2.6.0 2022-01-13 11:41:20 +08:00
EricZeng
dec3f9e75e Merge pull request #429 from didi/dev
ldap config add default value
2022-01-13 11:40:46 +08:00
EricZeng
ccc0ee4d18 Merge pull request #432 from didi/master
merge master to dev
2022-01-13 11:40:17 +08:00
kingdomrushing
69e9708080 Merge branch 'dev' into dev_v2.6.0 2022-01-13 11:39:50 +08:00
lucasun
5944ba099a Merge pull request #431 from lucasun/hotfix/2.5.0_fe
修复图片名称大小写问题
2022-01-13 11:17:25 +08:00
孙超
ada2718b5e 修复图片名称大小写问题 2022-01-13 11:13:26 +08:00
lucasun
1f87bd63e7 Merge pull request #416 from potaaaaaato/master
update echarts from v4 to v5
2022-01-13 10:55:34 +08:00
xuguang
c0f3259cf6 增加线程池、客户端池可配置 2022-01-12 19:56:37 +08:00
EricZeng
e1d5749a40 Merge pull request #428 from didi/dev
Dockerfile mv application.yml to fix start failed problem
2022-01-12 18:14:20 +08:00
zengqiao
a8d7eb27d9 ldap config add default value 2022-01-12 18:03:16 +08:00
zengqiao
1eecdf3829 Dockerfile mv application.yml to fix start failed problem 2022-01-12 17:45:40 +08:00
EricZeng
be8b345889 Merge pull request #427 from didi/dev
del unused jmx_prometheus_javaagent-0.14.0.jar
2022-01-12 17:40:48 +08:00
zengqiao
074da389b3 del unused jmx_prometheus_javaagent-0.14.0.jar 2022-01-12 17:32:13 +08:00
xuguang
4df2dc09fe 增加对BrokerMetadata中endpoints为internal|External方式的解析 2022-01-12 16:15:46 +08:00
EricZeng
e8d42ba074 Merge pull request #425 from didi/dev
1. optimize reeassign task-name; 2. ignore read kafka-controller data when znode not exist; 3.add region created event and handle it to cal region capacity immediately;
2022-01-12 11:44:22 +08:00
zengqiao
c036483680 add region created event and handle it to cal region capacity immediately 2022-01-11 17:49:17 +08:00
zengqiao
2818584db6 ignore read kafka-controller data when znode not exist 2022-01-11 17:19:14 +08:00
zengqiao
37585f760d optimize reeassign task-name 2022-01-11 16:57:24 +08:00
EricZeng
f5477a03a1 Merge pull request #364 from Huyueeer/devConfig
平台配置EXPIRED_TOPIC_CONFIG新增正则过滤Topic
2022-01-11 16:33:21 +08:00
EricZeng
50388425b2 Merge pull request #424 from didi/dev
1.add lombok; 2.support change delete metrics rate;
2022-01-11 16:29:43 +08:00
zengqiao
725c59eab0 support change delete metrics rate 2022-01-11 16:03:22 +08:00
zengqiao
7bf1de29a4 add lombok 2022-01-11 14:04:48 +08:00
EricZeng
d90c3fc7dd Merge pull request #423 from didi/dev
1. bump swagger version; 2. fix NPE when flush logical cluster and physical cluster not in cache or not exist; 3. JmxConnectorWrap's log add cluster and broker info;
2022-01-11 13:16:46 +08:00
zengqiao
80785ce072 JmxConnectorWrap's log add cluster and broker info 2022-01-11 11:45:28 +08:00
zengqiao
44ea896de8 fix NPE when flush logical cluster and physical cluster not in cache or not exist 2022-01-11 11:37:23 +08:00
zengqiao
d30cb8a0f0 bump swagger version 2022-01-11 11:24:43 +08:00
EricZeng
6c7b333b34 Merge pull request #422 from didi/dev
网关增加配置及修改配置时,version不变化问题修复
2022-01-10 10:26:49 +08:00
石臻臻的杂货铺
6d34a00e77 Update README.md 2022-01-08 15:02:07 +08:00
xuguang
1f353e10ce application.yml修改 2022-01-07 15:58:12 +08:00
zengqiao
4e10f8d1c5 网关增加配置及修改配置时,version不变化问题修复 2022-01-07 15:35:14 +08:00
EricZeng
a22cd853fc Merge pull request #421 from didi/dev
修复批量往DB写入空指标数组时报SQL语法异常的问题
2022-01-07 14:16:07 +08:00
zengqiao
354e0d6a87 修复批量往DB写入空指标数组时报SQL语法异常的问题 2022-01-07 13:55:42 +08:00
EricZeng
dfabe28645 Merge pull request #420 from didi/master
合并主分支
2022-01-07 13:38:10 +08:00
EricZeng
fce230da48 Merge pull request #414 from didi/dev
设置Log4j 2的版本为2.16.0
2022-01-07 13:36:50 +08:00
didi
055ba9bda6 Merge branch 'dev_v2.5.0_addtest' of github.com:didi/LogiKM into dev_v2.5.0_addtest
 Conflicts:
	kafka-manager-core/src/test/java/com/xiaojukeji/kafka/manager/service/service/ExpertServiceTest.java
2022-01-07 11:50:15 +08:00
didi
ec19c3b4dd monitor、openapi、account模块下的单元测试 2022-01-07 11:43:31 +08:00
xuguang
37aa526404 单元测试:ClusterHostTaskServiceTest,ClusterRoleTaskServiceTest,ClusterTaskServiceTest,KafkaFileServiceTest,TopicCommandsTest,TopicReassignUtilsTest 2022-01-06 20:00:25 +08:00
xuguang
86c1faa40f bugfix: Result类问题修改 2022-01-06 11:33:17 +08:00
xuguang
8dcf15d0f9 kafka-manager-bpm包单元测试的编写 & bugfix 2022-01-04 17:29:28 +08:00
EricZeng
6835e1e680 Merge pull request #419 from ZHAOYINRUI/patch-6
更新视频教程地址
2022-01-04 11:07:52 +08:00
ZHAOYINRUI
d8f89b8f67 更新视频教程地址 2022-01-04 11:06:51 +08:00
eilenexuzhe
ec28eba781 feat: move webpack-dev-server to scripts 2021-12-27 17:12:51 +08:00
eilenexuzhe
5ef8fff5bc feat: update echarts v4 to v5 2021-12-27 17:12:02 +08:00
xuguang
4f317b76fa SpringTool.getUserName()方法中获取requestAttributes可能为null, 增加为null判断 2021-12-27 16:35:22 +08:00
didi
61672637dc Merge branch 'dev_v2.5.0_addtest' of github.com:didi/LogiKM into dev_v2.5.0_addtest 2021-12-27 14:56:48 +08:00
didi
ecf6e8f664 ConfigService,OperateRecordService,RegionService,ThrottleService,TopicExpiredService,TopicManagerService接口下的单元测试方法 2021-12-27 14:55:35 +08:00
xuguang
4115975320 kafka-manager-account, kafka-manager-bpm, kafka-manager-kcm, kafka-manager-monitor单元测试模块添加 2021-12-27 10:28:08 +08:00
didi
21904a8609 `TopicManagerServiceImpl的addAuthority中使用的是getId,应该是getAppId 2021-12-24 14:41:39 +08:00
Peng
10b0a3dabb Update README.md 2021-12-24 11:54:52 +08:00
xuguang
b2091e9aed 单元测试:AnalysisServiceTest && ConsumerServiceTest && JmxServiceTest &&
LogicalClusterServiceTest && ReassignServiceTest && TopicServiceTest
2021-12-23 18:17:47 +08:00
xuguang
f2cb5bd77c bugfix: TopicServiceImpl && JmxServiceImpl && ConsumerService && ConsumerServiceImpl 2021-12-23 18:15:40 +08:00
xuguang
19c61c52e6 bugfix: TopicService && TopicServiceImpl && ZookeeperServiceImpl 2021-12-22 16:04:06 +08:00
didi
b327359183 TopicManagerServiceImpl的modifyTopicByOp没有return ResultStatus.SUCCESS; 2021-12-21 18:06:23 +08:00
xuguang
9e9bb72e17 BrokerServiceTest && KafkaBillService && LogicalClusterServiceTest && AbstractAllocateQuotaStrategy && AbstractHealthScoreStrategy 单元测试 2021-12-20 10:26:43 +08:00
zengqiao
a23907e009 开启前端包 2021-12-17 15:45:45 +08:00
xuguang
ad131f5a2c bugfix: DidiHealthScoreStrategy.calTopicHealthScore计算topic健康分时,获取topic的brokerId异常 2021-12-17 14:41:04 +08:00
zengqiao
dbeae4ca68 设置log4j2版本为2.16.0以修复相关漏洞 2021-12-17 11:44:50 +08:00
zengqiao
0fb0e94848 Merge branch 'master' into dev 2021-12-16 22:29:45 +08:00
石臻臻的杂货铺
95d2a82d35 Merge pull request #412 from didi/v2.5.0
升级到2.5版本
2021-12-16 18:34:30 +08:00
shirenchuang
5bc6eb6774 升级到2.5版本 2021-12-16 18:28:51 +08:00
石臻臻的杂货铺
3ba81e9aaa Merge pull request #411 from didi/v2.5.0
升级到2.5版本
2021-12-16 15:28:34 +08:00
shirenchuang
329a9b59c1 升级到2.5版本 2021-12-16 15:08:54 +08:00
xuguang
39cccd568e DiDiHealthScoreStrategy类中某些变量开头字母改成小写 2021-12-16 14:17:45 +08:00
didi
19b7f6ad8c RegionService下的updateRegion的重载方法含义错误;应该是根据regionId更新region,参数应该是regionId,不是clusterId 2021-12-14 18:32:14 +08:00
xuguang
41c000cf47 AuthorityServiceTest && SecurityServiceTest && TopicReportServiceTest && ClusterServiceTest && ZookeeperServiceTest单元测试 2021-12-14 18:30:12 +08:00
xuguang
1b8ea61e87 openTopicJmx方法的参数jmxSwitch需要判空 2021-12-13 18:16:23 +08:00
EricZeng
22c26e24b1 Merge pull request #410 from lucasun/hotfix/2.5.0_fe
修复顶导V2.5.0版本
2021-12-11 14:30:22 +08:00
孙超
396045177c 修复顶导V2.5.0版本 2021-12-11 14:25:43 +08:00
xuguang
4538593236 实现core包下TopicReportService接口单元测试 & TopicReportDao.xml中字段和关键字错误修改 2021-12-08 15:50:53 +08:00
xuguang
8086ef355b 实现core包下AppService,AuthorityService,QuotaService接口单元测试 & TopicQuotaData中bug修改 2021-12-07 14:08:09 +08:00
xuguang
60d038fe46 实现core包下AppService接口单元测试 2021-12-06 14:46:11 +08:00
huqidong
ff0f4463be Logi-KM testng 测试环境搭建 & springboot 集成 & mock 测试用例编写. 2021-12-03 18:04:20 +08:00
EricZeng
820571d993 Merge pull request #408 from didi/master
merge master
2021-12-02 19:39:41 +08:00
EricZeng
e311d3767c Merge pull request #407 from didi/dev_v2.5.0
merge dev_v2.5.0 to master
2021-12-01 19:42:03 +08:00
EricZeng
24d7b80244 Merge pull request #406 from kingdomrushing/dev_v2.5.0
我的申请-审批中-审批时间置为空
2021-12-01 13:31:08 +08:00
xuguang
61f99e4d2e 我的申请-审批中-审批时间置为空 2021-12-01 12:54:02 +08:00
EricZeng
d5348bcf49 Merge pull request #405 from lucasun/dev_v2.5.0_fe
我的申请-审批列表列-申请时间、审批时间增加无数据判断
2021-12-01 11:27:58 +08:00
孙超
5d31d66365 我的申请-审批列表列-申请时间、审批时间增加无数据判断 2021-12-01 11:17:12 +08:00
EricZeng
29778a0154 Merge pull request #400 from lucasun/dev_v2.5.0_fe
Dev v2.5.0 fe
2021-11-30 15:01:49 +08:00
Peng
165c0a5866 Update README.md 2021-11-29 19:32:54 +08:00
EricZeng
588323961e Merge pull request #401 from didi/master
合并主分支到2.5开发分支
2021-11-23 18:50:27 +08:00
孙超
fd1c0b71c5 V2.5.0前端 更换二维码&前端bugfix 2021-11-23 17:48:10 +08:00
lucasun
54fbdcadf9 Merge branch 'didi:master' into master 2021-11-23 17:35:27 +08:00
石臻臻的杂货铺
69a30d0cf0 Merge pull request #399 from kingdomrushing/dev_v2.5.0
修复"新添加集群的时候,报watch的空指针异常"问题 & 修复"删除废弃Topic之后,Topic资源治理没有同步删除"问题
2021-11-22 17:35:36 +08:00
xuguang
b8f9b44f38 修复获取topic流量指标未按时间排序问题 2021-11-20 13:30:34 +08:00
xuguang
cbf17d4eb5 修复"新添加集群的时候,报watch的空指针异常"问题 & 修复"删除废弃Topic之后,Topic资源治理没有同步删除"问题 2021-11-19 19:27:19 +08:00
石臻臻的杂货铺
327e025262 Merge pull request #397 from kingdomrushing/dev_v2.5.0
Dev v2.5.0
2021-11-19 14:20:20 +08:00
xuguang
6b1e944bba 修复topic管理中topic编辑备注没有数据回显问题 2021-11-19 13:23:52 +08:00
EricZeng
668ed4d61b Merge pull request #396 from didi/dev_inc_monitor_indicators
补充新增上报监控系统指标说明文档
2021-11-16 22:32:20 +08:00
zengqiao
312c0584ed 补充新增上报监控系统指标说明文档 2021-11-16 22:20:35 +08:00
zengqiao
110d3acb58 补充新增上报监控系统指标说明文档 2021-11-16 22:16:35 +08:00
xuguang
ddbc60283b 将tomcat版本升级为8.5.72 & "我的审批"列表增加"通过时间"列,并支持按该列排序 & JMX连接关闭问题修复 2021-11-16 17:15:58 +08:00
shirenchuang
471bcecfd6 Merge branch 'v2.4.3' into dev_v2.5.0 2021-11-15 12:45:01 +08:00
shirenchuang
0245791b13 Merge remote-tracking branch 'origin/master' into dev_v2.5.0 2021-11-15 11:45:22 +08:00
shirenchuang
4794396ce8 Merge remote-tracking branch 'origin/master' into v2.4.3 2021-11-15 10:49:22 +08:00
EricZeng
c7088779d6 Merge pull request #395 from ZHAOYINRUI/patch-5
Update README.md
2021-11-11 15:21:11 +08:00
ZHAOYINRUI
672905da12 Update README.md 2021-11-11 15:19:29 +08:00
EricZeng
47172b13be Merge pull request #394 from ZHAOYINRUI/patch-4
Update README.md
2021-11-09 14:18:34 +08:00
ZHAOYINRUI
3668a10af6 Update README.md 2021-11-09 12:43:09 +08:00
EricZeng
a4e294c03f Merge pull request #393 from ZHAOYINRUI/patch-3
增加【Kafka中文社区】知识星球二维码
2021-11-08 17:44:15 +08:00
ZHAOYINRUI
3fd6f4003f 增加【Kafka中文社区】知识星球二维码 2021-11-08 17:39:08 +08:00
EricZeng
3eaf5cd530 Merge pull request #378 from didi/dev
白名单接口中仅保留登录接口
2021-09-21 11:09:36 +08:00
zengqiao
c344fd8ca4 白名单接口中仅保留登录接口 2021-09-21 11:00:33 +08:00
EricZeng
09639ca294 Merge pull request #377 from didi/dev
修复Sonar扫描问题
2021-09-21 10:58:36 +08:00
EricZeng
a81b6dca83 Merge pull request #376 from didi/master
merge master
2021-09-21 10:47:55 +08:00
mike.zhangliang
b74aefb08f Update README.md 2021-08-15 15:14:25 +08:00
huyueeer
fffc0c3add 完善搜索用户时可以显示用户的其他元信息(完善chineseName和department) 2021-08-12 15:28:23 +08:00
mike.zhangliang
757f90aa7a Update README.md 2021-08-11 09:08:33 +08:00
huyueeer
022f9eb551 更新EXPIRED_TOPIC_CONFIG文档描述 2021-08-06 16:00:16 +08:00
huyueeer
6e7b82cfcb 平台配置EXPIRED_TOPIC_CONFIG新增正则过滤Topic 2021-08-06 14:52:22 +08:00
huyueeer
b5fb24b360 本地认证或LDAP认证支持携带‘姓名’、‘部门’、‘邮箱’等用户元信息 2021-08-06 11:40:24 +08:00
huyueeer
b77345222c LDAP认证忽略大小写,修正判断顺序,相同LDAP用户反复REPLACE 2021-08-05 11:17:38 +08:00
huyueeer
793e81406e LDAP认证忽略大小写,修正从LDAP服务器返回值设置Username 2021-08-04 16:23:57 +08:00
huyueeer
cef1ec95d2 LDAP验证忽略账户大小写 2021-08-04 14:14:53 +08:00
EricZeng
7e1b3c552b Merge pull request #360 from ZHAOYINRUI/patch-1
Update 开源版与商业版特性对比.md
2021-08-03 10:02:02 +08:00
ZHAOYINRUI
69736a63b6 Update 开源版与商业版特性对比.md
补充优化
2021-08-02 22:10:15 +08:00
EricZeng
fb4a9f9056 删除多余的‘在’
删除多余的‘在’
2021-07-26 09:28:16 +08:00
zengqiao
387d89d3af optimize code format by sonar-lint 2021-07-13 10:39:28 +08:00
EricZeng
65d9ca9d39 Merge pull request #336 from fengxsong/master
feat: update dockerfile and helm chart
2021-07-10 10:47:57 +08:00
Peng
8c842af4ba Update README.md
更新小尺寸logo
2021-07-09 12:46:18 +08:00
shirenchuang
4faf9262c9 配置文件漏了 加上 2021-07-09 11:55:14 +08:00
shirenchuang
be7724c67d 2021-07-09 11:21:20 +08:00
Peng
48d26347f7 Update README.md
替换logo
2021-07-09 11:01:18 +08:00
shirenchuang
bdb01ec8b5 2021-07-07 13:28:55 +08:00
mike.zhangliang
9047815799 Update README.md 2021-07-06 17:36:23 +08:00
EricZeng
05bd94a2cc Merge pull request #344 from didi/dev
删除钉钉群二维码
2021-07-05 12:15:52 +08:00
zengqiao
c9f7da84d0 删除钉钉群二维码 2021-07-05 12:14:37 +08:00
EricZeng
bcc124e86a Merge pull request #343 from Hongten/master
修复Converts#convert2OrderDO() 出现重复赋值
2021-07-04 21:35:34 +08:00
Hongten
48d2733403 Merge pull request #2 from didi/master
sync code
2021-07-04 18:04:55 +08:00
hongtenzone@foxmail.com
31fc6e4e56 remove duplicate operation 2021-07-04 17:59:36 +08:00
hongtenzone@foxmail.com
fcdeef0146 remove duplicate operation 2021-07-04 17:55:54 +08:00
EricZeng
1cd524c0cc Merge pull request #341 from didi/dev
Topic基本信息中增加retention.bytes信息
2021-07-02 18:34:56 +08:00
zengqiao
0f746917a7 Topic基本信息中增加retention.bytes信息 2021-07-02 16:41:57 +08:00
EricZeng
a2228d0169 Merge pull request #335 from didi/dev
bump jackson-databind version to 2.9.10.8
2021-06-24 18:04:57 +08:00
shirenchuang
e8a679d34b Merge branch 'master' into v2.4.3 2021-06-24 17:18:50 +08:00
fengxusong
1912a42091 fix: default config 2021-06-24 14:00:29 +08:00
fengxusong
ca81f96635 feat: update dockerfile and charts 2021-06-24 12:13:29 +08:00
zengqiao
eb3b8c4b31 bump jackson-databind version to 2.9.10.8 2021-06-23 21:31:43 +08:00
EricZeng
6740d6d60b Merge pull request #332 from didi/dev
修复poll异常时, 超时时间不生效问题
2021-06-23 20:24:50 +08:00
zengqiao
c46c35b248 修复poll异常时, 超时时间不生效问题 2021-06-23 10:11:38 +08:00
EricZeng
0b2dcec4bc Merge pull request #323 from didi/dev
fix jmx credentials
2021-06-03 10:22:12 +08:00
shirenchuang
f8e2a4aff4 修改km的打包方式
增加启动/关闭脚本
2021-06-02 18:13:58 +08:00
zengqiao
7256db8c4e fix jmx credentials 2021-06-02 13:59:18 +08:00
shirenchuang
b14d5d9bee 修改km的打包方式
增加启动/关闭脚本
2021-06-01 20:20:40 +08:00
shirenchuang
12e15c3e4b Merge branch 'shirc_dev' into dev_v2.5.0 2021-06-01 20:19:53 +08:00
shirenchuang
51911bf272 add distribution 2021-06-01 20:17:54 +08:00
shirenchuang
6dc8061401 add distribution 2021-06-01 16:32:16 +08:00
EricZeng
b8fa4f8797 Merge pull request #319 from didi/dev
optimize n9e's default port
2021-05-31 19:44:38 +08:00
zengqiao
cc0bea7f45 optimize n9e's default port 2021-05-31 19:43:03 +08:00
EricZeng
4e9124b244 Merge pull request #316 from didi/dev
Topic账单配置说明
2021-05-29 13:46:18 +08:00
zengqiao
f0eabef7b0 Topic账单配置说明 2021-05-28 17:36:36 +08:00
EricZeng
23e5557958 Merge pull request #315 from didi/master
kafka-gateway相关功能说明
2021-05-28 17:13:29 +08:00
EricZeng
b1d02afa85 Merge pull request #312 from lucasun/master
修复clipbord 2.0.6 打包问题
2021-05-28 11:34:01 +08:00
孙超
2edc380f47 修改package.json增加内存修复和clipbord版本 2021-05-28 11:30:33 +08:00
孙超
cea8295c09 clipbord升级版本 2021-05-28 11:21:12 +08:00
EricZeng
244bfc993a Merge pull request #310 from ZHAOYINRUI/master
补充FAQ开源版和商业版特性对比
2021-05-27 14:50:52 +08:00
ZHAOYINRUI
3a272a4493 Update faq.md 2021-05-27 14:45:51 +08:00
ZHAOYINRUI
a3300db770 Update faq.md 2021-05-27 14:22:59 +08:00
ZHAOYINRUI
b0394ce261 Delete Logi-KafkaManager开源版和商业版特性对比总结.pdf 2021-05-27 14:22:03 +08:00
ZHAOYINRUI
3123089790 Create 开源版与商业版特性对比.md 2021-05-27 14:21:48 +08:00
ZHAOYINRUI
f13cf66676 Delete 开源版与商业版特性对比.md 2021-05-27 12:08:15 +08:00
ZHAOYINRUI
0c8c4d87fb Delete 开源版与商业版特性对比.md 2021-05-27 12:08:01 +08:00
ZHAOYINRUI
066088fdeb Update faq.md 2021-05-27 12:06:51 +08:00
ZHAOYINRUI
cf641e41c7 Update faq.md 2021-05-27 12:05:41 +08:00
ZHAOYINRUI
5b48322e1b Update faq.md 2021-05-27 12:04:58 +08:00
ZHAOYINRUI
9d3f680d58 Update faq.md 2021-05-27 12:04:04 +08:00
ZHAOYINRUI
bed28d57e6 Update 开源版与商业版特性对比.md 2021-05-27 12:02:09 +08:00
ZHAOYINRUI
2538525103 Update 开源版与商业版特性对比.md 2021-05-27 12:01:51 +08:00
ZHAOYINRUI
6ed798db8c Create 开源版与商业版特性对比.md 2021-05-27 12:01:15 +08:00
ZHAOYINRUI
8e9d966829 Update 开源版与商业版特性对比.md 2021-05-27 12:00:26 +08:00
ZHAOYINRUI
be16640f92 Update 开源版与商业版特性对比.md 2021-05-27 11:59:41 +08:00
ZHAOYINRUI
0e1376dd2e Create 开源版与商业版特性对比.md 2021-05-27 11:57:56 +08:00
ZHAOYINRUI
0494575aa7 Update faq.md 2021-05-27 11:32:08 +08:00
ZHAOYINRUI
bed57534e0 Add files via upload 2021-05-27 11:22:47 +08:00
EricZeng
1862d631d1 Merge pull request #305 from didi/dev
heartbeat表的数据更新时间从MySQL自动生成调整为Logi-KM的时间
2021-05-25 13:44:04 +08:00
zengqiao
c977ce5690 heartbeat表的数据更新时间从MySQL自动生成调整为Logi-KM的时间 2021-05-25 10:27:27 +08:00
zengqiao
84df377516 bump version to v2.4.2 and add release notes 2021-05-21 10:45:10 +08:00
EricZeng
4d9a284f6e Merge pull request #303 from didi/dev
bump tomcat version to 8.5.66
2021-05-21 10:21:16 +08:00
zengqiao
da7ad8b44a bump tomcat version to 8.5.66 2021-05-21 10:20:09 +08:00
EricZeng
4164046323 Merge pull request #301 from didi/dev
fix title version
2021-05-20 20:36:14 +08:00
zengqiao
72e743dfd1 fix title version 2021-05-20 20:35:23 +08:00
EricZeng
7eb7edaf0a Merge pull request #300 from didi/dev
bump tomcat version to 8.5.56
2021-05-20 20:33:59 +08:00
zengqiao
49368aaf76 bump tomcat version to 8.5.56 2021-05-20 18:17:30 +08:00
zengqiao
b8c07a966f bump version to v2.4.1 2021-05-18 21:00:20 +08:00
EricZeng
c6bcc0e3aa Merge pull request #297 from didi/dev
split op util controller to topic controller and leader controller, and add authority controller, quota controller
2021-05-18 20:21:18 +08:00
zengqiao
7719339f23 split op util controller to topic controller and leader controller, and add authority controller, quota controller 2021-05-18 20:18:08 +08:00
EricZeng
8ad64722ed Merge pull request #296 from tcf1207239873/dev
迭代优化2.4.1-配额调整
2021-05-18 19:37:58 +08:00
tangcongfa_v@didichuxing.com
611f8b8865 调整配额与权限 2021-05-18 18:25:03 +08:00
tangcongfa_v@didichuxing.com
38bdc173e8 Merge remote-tracking branch 'origin/dev' into dev 2021-05-18 16:07:40 +08:00
tangcongfa_v@didichuxing.com
52244325d9 权限调整 2021-05-18 16:03:24 +08:00
tcf1207239873
3fd3d99b8c Merge pull request #4 from didi/dev
Dev-0518-2
2021-05-18 15:45:55 +08:00
tangcongfa_v@didichuxing.com
d4ee5e91a2 配额调整 2021-05-18 14:49:00 +08:00
EricZeng
c2ad2d7238 Merge pull request #295 from didi/dev
简化sd配置枚举类的长度
2021-05-18 14:36:29 +08:00
zengqiao
892e195f0e code format 2021-05-18 14:33:02 +08:00
zengqiao
c5b1bed7dc 简化sd配置枚举类的长度 2021-05-18 14:13:23 +08:00
EricZeng
0e388d7aa7 Merge pull request #294 from didi/dev
增加登录绕过的接口调用方式
2021-05-18 11:48:55 +08:00
zengqiao
c3a0dbbe48 增加登录绕过的接口调用方式 2021-05-18 11:45:15 +08:00
tangcongfa_v@didichuxing.com
8b95b3ffc7 配额调整 2021-05-18 11:25:41 +08:00
tcf1207239873
42b78461cd Merge pull request #3 from didi/dev
Dev-0518
2021-05-18 10:30:57 +08:00
EricZeng
9190a41ca5 Merge pull request #293 from didi/dev
增加监控指标说明
2021-05-17 18:57:58 +08:00
zengqiao
28a7251319 增加监控指标说明 2021-05-17 15:31:08 +08:00
EricZeng
20565866ef Merge pull request #292 from didi/master
merge master
2021-05-17 15:20:50 +08:00
EricZeng
246f10aee5 Merge pull request #258 from Huyueeer/patch-2
更新Topic资源治理部分
2021-05-17 09:58:09 +08:00
EricZeng
960017280d 增加Topic资源治理的配置说明
增加Topic资源治理的配置说明
2021-05-17 09:57:56 +08:00
tangcongfa_v@didichuxing.com
7218aaf52e 权限调整 2021-05-13 17:49:42 +08:00
zengqiao
62050cc7b6 Merge branch 'master' of https://github.com/didi/Logi-KafkaManager 2021-05-13 16:22:16 +08:00
zengqiao
f88a14ac0a 补充说明为什么删除Topic成功之后, 又立马出现的原因 2021-05-13 16:22:05 +08:00
EricZeng
9286761c30 更新体验地址
更新体验地址
2021-05-13 10:26:07 +08:00
zengqiao
07c3273247 增加v2.4.0更新内容 2021-05-12 20:16:47 +08:00
tangcongfa_v@didichuxing.com
eb8fe77582 配额调整 2021-05-12 11:31:41 +08:00
tangcongfa_v@didichuxing.com
b68ba0bff6 配额调整 2021-05-11 16:58:44 +08:00
tangcongfa_v@didichuxing.com
696657c09e 处理冲突 2021-05-11 10:40:58 +08:00
tcf1207239873
12bea9b60a Merge pull request #1 from didi/dev
Dev
2021-05-11 10:28:02 +08:00
tangcongfa_v@didichuxing.com
9334e9552f 处理冲突 2021-05-11 10:25:25 +08:00
tangcongfa_v@didichuxing.com
a43b04a98b 处理冲突 2021-05-11 10:23:25 +08:00
zengqiao
f359ff995d faq中增加应用下线失败的说明 2021-05-11 08:41:37 +08:00
tangcongfa_v@didichuxing.com
9185d2646b 权限申请 2021-05-10 16:22:58 +08:00
EricZeng
33e61c762c Merge pull request #285 from didi/dev
bump commons-beanutils version to 1.9.4 & faq补充app的使用说明
2021-05-08 13:51:26 +08:00
zengqiao
e342e646ff 优化因DB时区不对导致周期任务不能被触发的日志 2021-05-08 13:50:04 +08:00
zengqiao
ed163a80e0 bump commons-beanutils version to 1.9.4 2021-05-08 11:45:27 +08:00
zengqiao
b390df08b5 faq补充app的使用说明 2021-05-08 11:28:52 +08:00
tangcongfa_v@didichuxing.com
f0b3b9f7f4 扩分区 2021-05-08 11:23:06 +08:00
EricZeng
a67d732507 Merge pull request #284 from didi/dev
faq中补充说明heartbeat表的作用及集群删除是否影响物理集群的说明
2021-05-08 11:20:51 +08:00
zengqiao
ca0ebe0d75 faq中补充说明heartbeat表的作用及集群删除是否影响物理集群的说明 2021-05-08 11:15:25 +08:00
EricZeng
94d113cbe0 Merge pull request #283 from didi/master
merge master
2021-05-08 11:07:53 +08:00
Hongten
25c3aeaa5f Merge pull request #1 from Hongten/optimize/migration_task_name
optimize the migration task name
2021-05-07 19:26:54 +08:00
Xiang Hong Wei
736d5a00b7 optimize the migration task name 2021-05-07 19:06:47 +08:00
zengqiao
f1627b214c FAQ:新建告警组说明优化 2021-05-07 14:03:09 +08:00
EricZeng
d9265ec7ea Merge pull request #280 from didi/dev
连接信息写DB优化为批量写入
2021-05-07 13:52:43 +08:00
zengqiao
663e871bed 连接信息写DB优化为批量写入 2021-05-07 13:51:34 +08:00
shirenchuang
5c5eaddef7 readme add 社区km文章 2021-05-07 11:34:32 +08:00
shirenchuang
edaec4f1ae readme add 社区km文章 2021-05-07 11:34:18 +08:00
EricZeng
6d19acaa6c 更新FAQ,增加磁盘信息没有数据的说明
更新FAQ,增加磁盘信息没有数据的说明
2021-05-06 18:39:30 +08:00
EricZeng
d29a619fbf 优化页面无数据的FAQ
优化页面无数据的FAQ
2021-05-06 17:54:46 +08:00
EricZeng
b17808dd91 Merge pull request #275 from Huyueeer/master
修改maxMetricsSaveDays字段为Long类型
2021-04-30 18:48:54 +08:00
HuYueeer
c5321a3667 修改maxMetricsSaveDays字段为Long类型 2021-04-30 13:47:49 +08:00
EricZeng
8836691510 Merge pull request #273 from didi/dev
bump jetty-util version to 9.4.39.v20210325
2021-04-29 17:26:40 +08:00
zengqiao
6568f6525d bump jetty-util version to 9.4.39.v20210325 2021-04-29 17:25:08 +08:00
tangcongfa_v@didichuxing.com
473fc27b49 创建topic 2021-04-28 18:05:58 +08:00
EricZeng
74aeb55acb Merge pull request #266 from Liu-XinYuan/responsecode-fix
In case of authentication failure, add a clear response code
2021-04-28 17:25:55 +08:00
Liu-XinYuan
8efcf0529f In case of authentication failure, add a clear response code 2021-04-28 17:14:03 +08:00
EricZeng
06071c2f9c Merge pull request #264 from didi/dev_2.4.0
修复Topic工单选择Broker方式进行审批时展示的还是Region的问题
2021-04-28 14:19:26 +08:00
EricZeng
5eb4eca487 Merge pull request #263 from lucasun/master
修复Topic工单审批时,切换到Broker时展示还是Region的问题
2021-04-28 14:17:11 +08:00
孙超
33f6153e12 V2.4 bugfix 2021-04-28 14:09:54 +08:00
tangcongfa_v@didichuxing.com
df3283f526 删除topic 2021-04-28 11:22:53 +08:00
tangcongfa_v@didichuxing.com
b5901a2819 创建topic 2021-04-28 11:08:01 +08:00
EricZeng
6d5f1402fe Merge pull request #262 from didi/dev
增加App与Topic自动化审批开关
2021-04-28 10:51:12 +08:00
zengqiao
65e3782b2e 增加App与Topic自动化审批开关 2021-04-28 10:48:17 +08:00
EricZeng
135981dd30 Merge pull request #261 from didi/dev
开放接口集群ID开始调整, op对应的是物理集群ID, normal对应的是逻辑集群ID, 第一步, util接口调整
2021-04-28 10:15:42 +08:00
zengqiao
fe5cf2d922 开放接口集群ID开始调整, op对应的是物理集群ID, normal对应的是逻辑集群ID, 第一步, util接口调整 2021-04-28 10:10:16 +08:00
EricZeng
e15425cc2e 修复错别字
修复错别字
2021-04-27 17:36:44 +08:00
EricZeng
c3cb0a4e33 优化FAQ中告警组创建的说明
优化FAQ中告警组创建的说明
2021-04-27 17:30:18 +08:00
lucasun
cc32976bdd Merge pull request #259 from JokerQueue/master
用户管理页面,前端限制用户删除自己
2021-04-27 14:08:31 +08:00
Joker
bc08318716 用户管理页面,前端限制用户删除自己 2021-04-26 21:15:27 +08:00
tangcongfa_v@didichuxing.com
ee1ab30c2c 创建topic 2021-04-26 17:48:32 +08:00
HuYueeer
7fa1a66f7e 更新Topic资源治理部分 2021-04-26 14:51:12 +08:00
EricZeng
946bf37406 Merge pull request #257 from Huyueeer/patch-1
更新提问:heartbeat表相关
2021-04-26 14:27:10 +08:00
HuYueeer
8706f6931a Update faq.md
更新提问:heartbeat表相关
2021-04-26 14:15:32 +08:00
tangcongfa_v@didichuxing.com
f551674860 创建topic 2021-04-26 11:16:53 +08:00
EricZeng
d90fe0ef07 Merge pull request #253 from didi/master
merge master
2021-04-26 10:01:19 +08:00
EricZeng
bf979fa3b3 Merge pull request #252 from didi/dev_2.4.0
Dev 2.4.0
2021-04-26 09:55:30 +08:00
EricZeng
b3b88891e9 Merge pull request #251 from lucasun/master
v2.4.0
2021-04-25 21:01:33 +08:00
lucasun
01c5de60dc Merge branch 'master' into master 2021-04-25 20:54:10 +08:00
孙超
47b8fe5022 V2.4.1 FE 2021-04-25 20:43:20 +08:00
zengqiao
324b37b875 v2.4.0 be code 2021-04-25 18:11:52 +08:00
zengqiao
76e7e192d8 bump version to 2.4.0 2021-04-25 17:40:47 +08:00
EricZeng
f9f3c4d923 Merge pull request #240 from yangvipguang/docker-dev
Docker容器镜像优化
2021-04-25 17:23:36 +08:00
杨光
a476476bd1 Update Dockerfile
添加进程管理器tini 防止僵尸应用
升级基础镜像到Java 16 alpine 
默认使用官方jar 包
默认开启JMX 监控
2021-04-23 14:10:40 +08:00
杨光
82a60a884a Add files via upload 2021-04-23 14:06:55 +08:00
杨光
f17727de18 Merge pull request #1 from didi/master
同步提交
2021-04-23 11:31:35 +08:00
shirenchuang
f1f33c79f4 Merge branch 'shirc_dev' into dev 2021-04-23 10:26:43 +08:00
shirenchuang
d52eaafdbb 修正一下 用户手册中的 共享集群和独享集群的概念 2021-04-23 10:25:36 +08:00
shirenchuang
e7a3e50ed1 Merge branch 'shirc_dev' into dev 2021-04-23 10:20:01 +08:00
shirenchuang
2e09a87baa Merge remote-tracking branch 'origin/master' into shirc_dev 2021-04-23 10:18:30 +08:00
shirenchuang
b92ae7e47e 修正一个注释 2021-04-23 10:18:18 +08:00
EricZeng
f98446e139 Merge pull request #239 from Liu-XinYuan/i238
fix  create topic failed when not specify peak_byte_in
2021-04-22 19:02:48 +08:00
Liu-XinYuan
57a48dadaa modify from obj ==null to ValidateUtils.isNull 2021-04-22 18:44:34 +08:00
Liu-XinYuan
c65ec68e46 fix create topic failed when not specify peak_byte_in 2021-04-22 18:30:23 +08:00
zengqiao
d6559be3fc 部分后台任务获取Topic列表时不走缓存 2021-04-22 16:06:37 +08:00
shirenchuang
6fbf67f9a9 Merge branch 'dev' into shirc_dev 2021-04-22 14:13:32 +08:00
zengqiao
59df5b24fe broker元信息中增加Rack信息 2021-04-20 19:28:36 +08:00
zengqiao
3e1544294b 删除无效代码 2021-04-20 17:22:26 +08:00
EricZeng
a12c398816 Merge pull request #232 from didi/dev
应用下线功能权限列表获取优化
2021-04-20 13:54:49 +08:00
EricZeng
0bd3e28348 Merge pull request #228 from PengShuaixin/dev
应用下线审批功能优化
2021-04-20 13:51:32 +08:00
PengShuaixin
ad4e39c088 应用下线功能权限列表获取优化 2021-04-20 11:22:11 +08:00
PengShuaixin
2668d96e6a Merge remote-tracking branch 'origin/dev' into dev
# Conflicts:
#	kafka-manager-extends/kafka-manager-bpm/src/main/java/com/xiaojukeji/kafka/manager/bpm/order/impl/DeleteAppOrder.java
2021-04-20 11:19:49 +08:00
shirenchuang
357c496aad 下线应用的时候 判断先下线topic 2021-04-20 11:18:07 +08:00
shirenchuang
22a513ba22 升级mysql驱动;支持Mysql 8.0+ 2021-04-20 11:18:07 +08:00
zengqiao
e6dd1119be 通过获取类的RequestMapping注解来判断当前请求是否需要登录 2021-04-20 11:18:07 +08:00
EricZeng
2dbe454e04 Merge pull request #231 from didi/master
merge master
2021-04-20 10:46:03 +08:00
zengqiao
e3a59b76eb 修复数据删空之后, 缓存不能被更新的BUG 2021-04-19 20:31:40 +08:00
shirenchuang
01008acfcd 本地sql配置 2021-04-19 15:35:48 +08:00
zengqiao
b67a162d3f bump version to v2.3.1 2021-04-19 14:13:48 +08:00
shirenchuang
8bfde9fbaf Merge branch 'shirc_dev' into dev 2021-04-19 10:19:11 +08:00
shirenchuang
1fdecf8def 下线应用的时候 判断先下线topic 2021-04-19 10:17:29 +08:00
zengqiao
1141d4b833 通过获取类的RequestMapping注解来判断当前请求是否有权限 2021-04-15 18:12:21 +08:00
EricZeng
cdac92ca7b Merge pull request #229 from didi/dev
通过获取类的RequestMapping注解来判断当前请求是否需要登录
2021-04-14 19:47:43 +08:00
zengqiao
2a57c260cc 通过获取类的RequestMapping注解来判断当前请求是否需要登录 2021-04-14 19:40:19 +08:00
PengShuaixin
f41e29ab3a 应用下线功能优化 2021-04-14 12:29:59 +08:00
zengqiao
8f10624073 add jmx prometheus jar 2021-04-12 17:58:24 +08:00
EricZeng
eb1f8be11e Merge pull request #224 from didi/master
merge master
2021-04-12 13:51:14 +08:00
EricZeng
3333501ab9 Merge pull request #222 from zwOvO/master
删除无用import、删除无用代码
2021-04-09 19:28:04 +08:00
zwOvO
0f40820315 删除无用import、删除无用代码 2021-04-09 11:41:06 +08:00
shirenchuang
5f1a839620 升级mysql驱动;支持Mysql 8.0+ 2021-04-06 12:09:52 +08:00
zengqiao
b9bb1c775d change uri filter rule 2021-04-06 10:26:21 +08:00
zengqiao
1059b7376b forbiden request when uri contain .. 2021-04-06 10:01:29 +08:00
EricZeng
f38ab4a9ce Merge pull request #217 from didi/dev
拒绝包含./或/连续过多的接口请求
2021-03-31 20:00:52 +08:00
zengqiao
9e7450c012 拒绝包含./或/连续过多的接口请求 2021-03-31 19:45:18 +08:00
EricZeng
99a3e360fe Merge pull request #216 from didi/dev
接口过滤策略由接口黑名单转成接口白名单
2021-03-30 12:56:19 +08:00
lucasun
d45f8f78d6 Merge pull request #215 from zhangfenhua/master
增加nginx配置:前后端分离&配置多个静态资源
2021-03-30 11:11:58 +08:00
zengqiao
648af61116 接口过滤策略由接口黑名单转成接口白名单 2021-03-29 21:21:23 +08:00
zhangfenhua
eebf1b89b1 nginx配置手册 2021-03-29 11:53:50 +08:00
EricZeng
f8094bb624 Merge pull request #211 from didi/dev
add expert config desc
2021-03-23 15:23:10 +08:00
zengqiao
ed13e0d2c2 add expert config desc 2021-03-23 15:21:48 +08:00
EricZeng
aa830589b4 Merge pull request #210 from didi/dev
fix monitor enable time illegal bug
2021-03-22 17:22:44 +08:00
zengqiao
999a2bd929 fix monitor enable time illegal bug 2021-03-22 17:21:12 +08:00
EricZeng
d69ee98450 Merge pull request #209 from didi/dev
add faq, kafka version supported & apply logical cluster and how to handle it
2021-03-22 13:43:14 +08:00
zengqiao
f6712c24ad merge master 2021-03-22 13:42:09 +08:00
zengqiao
89d2772194 add faq, kafka version supported & apply logical cluster and how to handle it 2021-03-22 13:38:23 +08:00
mike.zhangliang
03352142b6 Update README.md
微信加群方式补充
2021-03-16 14:46:38 +08:00
lucasun
73a51e0c00 Merge pull request #205 from ZQKC/master
add qa
2021-03-10 19:27:01 +08:00
zengqiao
2e26f8caa6 add qa 2021-03-10 19:23:29 +08:00
EricZeng
f9bcce9e43 Merge pull request #3 from didi/master
merge didi Logi-KM
2021-03-10 19:20:39 +08:00
EricZeng
2ecc877ba8 fix add_cluster.md path
fix add_cluster.md path
2021-03-10 15:45:48 +08:00
EricZeng
3f8a3c69e3 Merge pull request #201 from ZQKC/master
optimize ldap
2021-03-10 14:12:35 +08:00
zengqiao
67c37a0984 optimize ldap 2021-03-10 13:52:09 +08:00
EricZeng
a58a55d00d Merge pull request #203 from lucasun/hotfix/v2.3.1
clipbord版本锁定在2.0.6,升级2.0.7会引起ts打包报错
2021-03-09 18:11:02 +08:00
孙超
06d51dd0b8 clipbord版本锁定在2.0.6,升级2.0.7会引起ts打包报错 2021-03-09 18:07:42 +08:00
zengqiao
d5db028f57 optimize ldap 2021-03-09 15:13:55 +08:00
EricZeng
fcb85ff4be Merge pull request #2 from didi/master
merge didi logi-km
2021-03-09 11:07:17 +08:00
EricZeng
3695b4363d Merge pull request #200 from didi/dev
del ResultStatus which in vo
2021-03-09 11:02:46 +08:00
zengqiao
cb11e6437c del ResultStatus in vo 2021-03-09 11:01:21 +08:00
EricZeng
5127bd11ce Merge pull request #198 from didi/master
merge master
2021-03-09 10:42:28 +08:00
EricZeng
91f90aefa1 Merge pull request #195 from fanghanyun/v2.3.0_ldap
support AD LDAP
2021-03-09 10:40:42 +08:00
fanghanyun
0a067bce36 Support AD LDAP 2021-03-09 10:19:08 +08:00
fanghanyun
f0aba433bf Support AD LDAP 2021-03-08 20:31:15 +08:00
EricZeng
f06467a0e3 Merge pull request #197 from didi/dev
delete without used code
2021-03-05 16:12:27 +08:00
zengqiao
68bcd3c710 delete without used code 2021-03-05 16:05:58 +08:00
EricZeng
a645733cc5 Merge pull request #196 from didi/dev
add gateway config docs
2021-03-05 15:31:53 +08:00
zengqiao
49fe5baf94 add gateway config docs 2021-03-05 14:59:40 +08:00
fanghanyun
411ee55653 support AD LDAP 2021-03-05 14:45:54 +08:00
EricZeng
e351ce7411 Merge pull request #194 from didi/dev
reject req when uri contains ..
2021-03-04 17:52:56 +08:00
zengqiao
f33e585a71 reject req when uri contains .. 2021-03-04 17:51:35 +08:00
EricZeng
77f3096e0d Merge pull request #191 from didi/dev
Dev
2021-02-28 22:04:34 +08:00
EricZeng
9a5b18c4e6 Merge pull request #190 from JokerQueue/dev
bug fix:correct way to judge a user does not exist
2021-02-28 14:36:28 +08:00
Joker
0c7112869a bug fix:correct way to judge a user does not exist 2021-02-27 22:35:35 +08:00
EricZeng
f66a4d71ea Merge pull request #188 from JokerQueue/dev
bug fix: unexpected stop of the topic sync task
2021-02-26 22:46:54 +08:00
Joker
9b0ab878df bug fix: unexpected stop of the topic sync task 2021-02-26 19:47:03 +08:00
EricZeng
d30b90dfd0 Merge pull request #186 from ZHAOYINRUI/master
新增releases_notes、更新FAQ
2021-02-26 09:59:18 +08:00
ZHAOYINRUI
efd28f8c27 Update faq.md 2021-02-26 00:03:25 +08:00
ZHAOYINRUI
e05e722387 Add files via upload 2021-02-26 00:01:09 +08:00
EricZeng
748e81956d Update faq.md 2021-02-24 14:10:41 +08:00
EricZeng
c9a41febce Merge pull request #184 from didi/dev
reject illegal zk address
2021-02-23 17:32:20 +08:00
zengqiao
18e244b756 reject illegal zk address 2021-02-23 17:18:49 +08:00
mrazkong
47676139a3 Merge pull request #183 from didi/dev
support dynamic change cluster auth
2021-02-23 16:56:26 +08:00
zengqiao
1ed933b7ad support dynamic change auth 2021-02-23 16:34:21 +08:00
EricZeng
f6a343ccd6 Merge pull request #182 from didi/master
merge master
2021-02-23 15:47:28 +08:00
EricZeng
dd6cdc22e5 Merge pull request #178 from Observe-secretly/v2.2.1_ldap
新功能:增加了对LDAP登录的支持
2021-02-10 12:35:07 +08:00
李民
f70f4348b3 Merge branch 'master' into v2.2.1_ldap 2021-02-10 10:00:32 +08:00
EricZeng
ec7f801929 Merge pull request #180 from didi/dev_2.3.0
Dev 2.3.0
2021-02-09 22:06:51 +08:00
zengqiao
0f8aca382e bump version to 2.3.0 2021-02-09 21:47:56 +08:00
zengqiao
0270f77eaa add upgrade doc 2021-02-09 21:46:55 +08:00
EricZeng
dcba71ada4 Merge pull request #179 from lucasun/dev_2.3.0_fe
迭代V2.5, 修复broker监控问题,增加JMX认证支持等...
2021-02-09 18:48:42 +08:00
孙超
6080f76a9c 迭代V2.5, 修复broker监控问题,增加JMX认证支持等... 2021-02-09 15:26:47 +08:00
李民
e7349161f3 BUG FIX:修改LDAP登录重复注册用户的BUG 2021-02-09 15:22:26 +08:00
李民
2e2907ea09 修改LDAP获取UserDN的时候可能出错的问题 2021-02-09 14:33:53 +08:00
李民
25e84b2a6c 新功能:增加对LDAP的登录的支持 2021-02-09 11:33:54 +08:00
zengqiao
5efd424172 version 2.3.0 2021-02-09 11:20:56 +08:00
EricZeng
2672502c07 Merge pull request #174 from 17hao/issue-153-authority
Tracking delete account
2021-02-07 16:10:56 +08:00
EricZeng
83440cc3d9 Merge pull request #173 from 17hao/issue-153
Tracking changes applied to app
2021-02-07 16:10:01 +08:00
17hao
8e5f93be1c Tracking delete account 2021-02-07 15:54:41 +08:00
17hao
c1afc07955 Tracking changes applied to app 2021-02-07 15:16:26 +08:00
EricZeng
4a83e14878 Merge pull request #172 from 17hao/issue-153
Tracking changes applied to Kafka cluster
2021-02-07 14:38:38 +08:00
17hao
832320abc6 Improve code's cohesion && save jmx properties 2021-02-07 14:20:57 +08:00
17hao
70c237da72 Tracking changes applied to Kafka cluster 2021-02-07 13:23:22 +08:00
EricZeng
edfcc5c023 Merge pull request #169 from 17hao/issue-153
Record topic operation
2021-02-06 22:30:32 +08:00
17hao
0668debec6 Update pom.xml 2021-02-06 18:46:47 +08:00
17hao
02d6463faa Using JsonUtils instead of fastjson 2021-02-06 18:43:36 +08:00
17hao
1fdb85234c Record editting topic 2021-02-05 12:18:50 +08:00
EricZeng
44b7dd1808 Merge pull request #167 from ZHAOYINRUI/master
更新readme、集群接入手册
2021-02-04 19:21:59 +08:00
ZHAOYINRUI
e983ee3101 Update README.md 2021-02-04 19:10:11 +08:00
ZHAOYINRUI
75e7e81c05 Add files via upload 2021-02-04 19:09:02 +08:00
ZHAOYINRUI
31ce3b9c08 Update add_cluster.md 2021-02-04 19:08:28 +08:00
EricZeng
ed93c50fef modify without logical cluster desc 2021-02-04 16:54:42 +08:00
EricZeng
4845660eb5 Merge pull request #163 from 17hao/issue-160
Issue#160: Remove __consumer_offsets from topic list
2021-02-04 16:42:41 +08:00
17hao
c7919210a2 Fix topic list filter condition 2021-02-04 16:32:31 +08:00
17hao
9491418f3b Update if statements 2021-02-04 12:33:32 +08:00
17hao
e8de403286 Hide __transaction_state in topic list && fix logic error 2021-02-04 12:14:44 +08:00
17hao
dfb625377b Using existing topic name constant 2021-02-03 22:30:38 +08:00
EricZeng
2c0f2a8be6 Merge pull request #166 from ZHAOYINRUI/master
更新集群接入文章、资源申请文章
2021-02-03 21:02:38 +08:00
ZHAOYINRUI
787d3cb3e9 Update resource_apply.md 2021-02-03 20:52:44 +08:00
ZHAOYINRUI
96ca17d26c Add files via upload 2021-02-03 19:43:03 +08:00
ZHAOYINRUI
3dd0f7f2c3 Update add_cluster.md 2021-02-03 19:41:33 +08:00
ZHAOYINRUI
10ba0cf976 Update resource_apply.md 2021-02-03 18:18:02 +08:00
ZHAOYINRUI
276c15cc23 Delete docs/user_guide/resource_apply directory 2021-02-03 18:08:15 +08:00
ZHAOYINRUI
2584b848ad Update resource_apply.md 2021-02-03 18:07:34 +08:00
ZHAOYINRUI
6471efed5f Add files via upload 2021-02-03 18:04:40 +08:00
ZHAOYINRUI
5b7d7ad65d Create resource_apply.md 2021-02-03 18:01:42 +08:00
17hao
712851a8a5 Add braces 2021-02-03 16:06:16 +08:00
17hao
63d291cb47 Remove __consumer_offsets from topic list 2021-02-03 15:50:33 +08:00
EricZeng
f825c92111 Merge pull request #159 from didi/dev_2.2.1
storage support s3
2021-02-03 13:49:52 +08:00
EricZeng
419eb2ea41 Merge pull request #158 from didi/dev
change dockerfile and heml location
2021-02-03 10:09:43 +08:00
zengqiao
89b58dd64e storage support s3 2021-02-02 16:42:20 +08:00
zengqiao
6bc5f81440 change dockerfile and heml location 2021-02-02 15:58:46 +08:00
EricZeng
424f4b7b5e Merge pull request #157 from didi/master
merge master
2021-02-02 15:33:51 +08:00
mrazkong
9271a1caac Merge pull request #118 from yangvipguang/helm-dev
增加Dockerfile 和 简单Helm
2021-02-01 10:50:05 +08:00
杨光
0ee4df03f9 Update Dockerfile 2021-01-31 15:34:15 +08:00
杨光
8ac713ce32 Update Dockerfile 2021-01-31 15:30:18 +08:00
杨光
76b2489fe9 Delete docker-depends/agent/config directory 2021-01-31 15:29:50 +08:00
杨光
6786095154 Delete sources.list 2021-01-31 15:29:31 +08:00
杨光
2c5793ef37 Delete settings 2021-01-31 15:29:19 +08:00
杨光
d483f25b96 Add files via upload
add  jmx prometheus
2021-01-31 15:28:59 +08:00
EricZeng
7118368979 Merge pull request #136 from ZHAOYINRUI/patch-10
Create resource_apply.md
2021-01-29 10:54:11 +08:00
EricZeng
59256c2e80 modify jdbc url
modify jdbc url, add useSSL=false
2021-01-29 10:53:35 +08:00
EricZeng
1fb8a0db1e Merge pull request #146 from ZHAOYINRUI/patch-12
Update README.md
2021-01-29 10:03:48 +08:00
ZHAOYINRUI
07d0c8e8fa Update README.md 2021-01-28 22:02:49 +08:00
EricZeng
98452ead17 Merge pull request #145 from didi/dev
faq add how to resolve topic biz data not exist error desc
2021-01-28 16:20:42 +08:00
zengqiao
d8c9f40377 faq add how to resolve topic biz data not exist error desc 2021-01-28 15:50:31 +08:00
EricZeng
8148d5eec6 Merge pull request #144 from didi/dev
optimize result code
2021-01-28 14:11:00 +08:00
zengqiao
4c429ad604 optimize result code 2021-01-28 12:06:06 +08:00
EricZeng
a9c52de8d5 Merge pull request #143 from ZhaoXinlong/patch-1
correcting some typo
2021-01-27 20:28:32 +08:00
ZhaoXinlong
f648aa1f91 correcting some typo
修改文字错误
2021-01-27 16:31:42 +08:00
EricZeng
eaba388bdd Merge pull request #142 from didi/dev
add connect jmx failed desc
2021-01-27 16:06:39 +08:00
zengqiao
73e6afcbc6 add connect jmx failed desc 2021-01-27 16:01:18 +08:00
EricZeng
8c3b72adf2 Merge pull request #139 from didi/dev
optimize message when login failed
2021-01-26 19:57:50 +08:00
zengqiao
ae18ff4262 optimize login failed message 2021-01-26 16:15:08 +08:00
ZHAOYINRUI
1adc8af543 Create resource_apply.md 2021-01-25 19:27:31 +08:00
EricZeng
7413df6f1e Merge pull request #131 from ZHAOYINRUI/patch-9
Update alarm_rules.md
2021-01-25 18:36:06 +08:00
EricZeng
bda8559190 Merge pull request #135 from didi/master
merge master
2021-01-25 18:34:56 +08:00
EricZeng
b74612fa41 Merge pull request #134 from didi/dev_2.2.0
merge dev 2.2.0
2021-01-25 17:30:26 +08:00
EricZeng
22e0c20dcd Merge pull request #133 from lucasun/dev_2.2.0_fe
fix txt
2021-01-25 17:21:42 +08:00
孙超
08f92e1100 fix txt 2021-01-25 17:02:07 +08:00
zengqiao
bb12ece46e modify zk example 2021-01-25 17:01:54 +08:00
EricZeng
0065438305 Merge pull request #132 from lucasun/dev_2.2.0_fe
add fe page
2021-01-25 16:47:02 +08:00
孙超
7f115c1b3e add fe page 2021-01-25 15:34:07 +08:00
ZHAOYINRUI
4e0114ab0d Update alarm_rules.md 2021-01-25 13:24:01 +08:00
EricZeng
0ef64fa4bd Merge pull request #126 from ZHAOYINRUI/patch-8
Create alarm_rules.md
2021-01-25 11:09:21 +08:00
ZHAOYINRUI
84dbc17c22 Update alarm_rules.md 2021-01-25 11:04:30 +08:00
EricZeng
16e16e356d Merge pull request #130 from xuehaipeng/patch-1
Update faq.md
2021-01-25 10:35:12 +08:00
xuehaipeng
978ee885c4 Update faq.md 2021-01-24 20:06:29 +08:00
zengqiao
850d43df63 add v2.2.0 feature & fix 2021-01-23 13:19:29 +08:00
zengqiao
fc109fd1b1 bump version to 2.2.0 2021-01-23 12:41:38 +08:00
EricZeng
9aefc55534 Merge pull request #1 from didi/dev
merge didi dev
2021-01-23 11:16:35 +08:00
EricZeng
2829947b93 Merge pull request #129 from didi/master
merge master
2021-01-23 11:09:52 +08:00
EricZeng
0c2af89a1c Merge pull request #125 from ZHAOYINRUI/patch-7
create kafka_metrics_desc.md
2021-01-23 11:03:14 +08:00
EricZeng
14c2dc9624 update kafka_metrics.md 2021-01-23 11:01:44 +08:00
EricZeng
4f35d710a6 Update and rename metric.md to kafka_metrics_desc.md 2021-01-23 10:58:11 +08:00
EricZeng
fdb5e018e5 Merge pull request #122 from ZHAOYINRUI/patch-4
Update README.md
2021-01-23 10:51:26 +08:00
EricZeng
6001fde25c Update dynamic_config_manager.md 2021-01-23 10:21:47 +08:00
EricZeng
ae63c0adaf Merge pull request #128 from didi/dev
add sync topic to db doc
2021-01-23 10:20:27 +08:00
zengqiao
ad1539c8f6 add sync topic to db doc 2021-01-23 10:17:59 +08:00
EricZeng
634a0c8cd0 Update faq.md 2021-01-22 20:42:13 +08:00
ZHAOYINRUI
773f9a0c63 Create alarm_rules.md 2021-01-22 18:16:51 +08:00
ZHAOYINRUI
e4e320e9e3 Create metric.md 2021-01-22 18:06:35 +08:00
ZHAOYINRUI
3b4b400e6b Update README.md 2021-01-22 15:56:53 +08:00
杨光
a950be2d95 change password 2021-01-20 17:57:14 +08:00
杨光
ba6f5ab984 add helm and dockerfile 2021-01-20 17:49:56 +08:00
mike.zhangliang
f3a5e3f5ed Update README.md 2021-01-18 19:06:43 +08:00
mike.zhangliang
e685e621f3 Update README.md 2021-01-18 19:05:44 +08:00
EricZeng
2cd2be9b67 Merge pull request #112 from didi/dev
监控告警系统对接说明文档
2021-01-17 18:21:16 +08:00
zengqiao
e73d9e8a03 add monitor_system_integrate_with_self file 2021-01-17 18:18:07 +08:00
zengqiao
476f74a604 rename file 2021-01-17 16:49:02 +08:00
EricZeng
ab0d1d99e6 Merge pull request #111 from didi/dev
Dev
2021-01-17 16:11:08 +08:00
zengqiao
d5680ffd5d 增加Topic同步任务&Bug修复 2021-01-16 16:26:38 +08:00
EricZeng
3c091a88d4 Merge pull request #110 from didi/master
合并master分支上的改动
2021-01-16 13:37:31 +08:00
EricZeng
49b70b33de Merge pull request #108 from didi/dev
增加application.yml文件说明 & 修改版本
2021-01-16 13:34:07 +08:00
zengqiao
c5ff2716fb 优化build.sh & yaml 2021-01-16 12:39:56 +08:00
ZQKC
400fdf0896 修复图片地址错误问题
修复图片地址错误问题
2021-01-16 12:04:20 +08:00
ZQKC
cbb8c7323c Merge pull request #109 from ZHAOYINRUI/master
架构图更新、钉钉群ID更新
2021-01-16 09:33:19 +08:00
ZHAOYINRUI
60e79f8f77 Update README.md 2021-01-16 00:25:06 +08:00
ZHAOYINRUI
0e829d739a Add files via upload 2021-01-16 00:22:31 +08:00
ZQKC
62abb274e0 增加application.yml文件说明
增加application.yml文件说明
2021-01-15 19:14:48 +08:00
ZQKC
e4028785de Update README.md
change km address
2021-01-09 15:30:30 +08:00
mrazkong
2bb44bcb76 Update Intergration_n9e_monitor.md 2021-01-07 17:09:15 +08:00
mike.zhangliang
684599f81b Update README.md 2021-01-07 15:44:17 +08:00
mike.zhangliang
b56d28f5df Update README.md 2021-01-07 15:43:07 +08:00
ZHAOYINRUI
02b9ac04c8 Update user_guide_cn.md 2020-12-30 22:44:23 +08:00
zengqiao
2fc283990a bump version to 2.1.0 2020-12-19 01:53:46 +08:00
ZQKC
abb652ebd5 Merge pull request #104 from didi/dev
v2.1版本合并
2020-12-19 01:14:26 +08:00
zengqiao
55786cb7f7 修改node版本要求 2020-12-19 00:45:58 +08:00
zengqiao
447a575f4f v2.1 fe 2020-12-19 00:40:52 +08:00
zengqiao
49280a8617 v2.1版本更新 2020-12-19 00:27:16 +08:00
ZQKC
ff78a9cc35 Merge pull request #101 from didi/dev
use mysql 8
2020-12-11 11:49:06 +08:00
zengqiao
3fea5c9c8c use mysql 8 2020-12-11 10:48:03 +08:00
ZQKC
aea63cad52 Merge pull request #94 from didi/dev
增加FAQ
2020-11-22 21:49:48 +08:00
zengqiao
800abe9920 增加FAQ 2020-11-22 21:43:52 +08:00
ZQKC
dd6069e41a Merge pull request #93 from didi/dev
夜莺Mon集成配置说明
2020-11-22 20:09:34 +08:00
zengqiao
90d31aeff0 夜莺Mon集成配置说明 2020-11-22 20:07:14 +08:00
ZQKC
4d9a327b1f Merge pull request #92 from didi/dev
FIX N9e Mon
2020-11-22 18:15:49 +08:00
zengqiao
06a97ef076 FIX N9e Mon 2020-11-22 18:13:36 +08:00
ZQKC
76c2477387 Merge pull request #91 from didi/dev
修复上报夜莺功能
2020-11-22 17:00:39 +08:00
zengqiao
bc4dac9cad 删除无效代码 2020-11-22 16:58:43 +08:00
zengqiao
36e3d6c18a 修复上报夜莺功能 2020-11-22 16:56:22 +08:00
ZQKC
edfd84a8e3 Merge pull request #88 from didi/dev
增加build.sh
2020-11-15 17:02:26 +08:00
zengqiao
fb20cf6069 增加build.sh 2020-11-15 16:58:28 +08:00
ZQKC
abbe47f6b9 Merge pull request #87 from didi/dev
初始化SQL优化&KCM修复&连接信息修复
2020-11-15 16:55:42 +08:00
zengqiao
f84d250134 kcm修复&连接信息接口修复 2020-11-15 16:50:59 +08:00
zengqiao
3ffb4b8990 初始化SQL优化 2020-11-15 16:31:10 +08:00
ZQKC
f70cfabede Merge pull request #84 from didi/dev
fix 前端资源加载问题
2020-11-14 16:56:16 +08:00
potaaato
3a81783d77 Merge pull request #83 from Candieslove/master
fix: remove track.js && add font.css
2020-11-13 14:04:41 +08:00
eilenexuzhe
237a4a90ff fix: remove track.js && add font.css 2020-11-13 11:58:46 +08:00
ZQKC
99c7dfc98d Merge pull request #81 from didi/dev
修复Topic详情中服务地址不展示问题
2020-11-08 20:13:03 +08:00
zengqiao
48aba34370 修复Topic详情中服务地址不展示问题 2020-11-08 20:07:45 +08:00
ZQKC
29cca36f2c Merge pull request #80 from didi/dev
增加上报监控指标开关
2020-11-08 17:14:50 +08:00
zengqiao
0f5819f5c2 增加上报监控指标开关 2020-11-08 17:13:04 +08:00
ZQKC
373772de2d Merge pull request #79 from didi/dev
文案优化|服务发现接口修复
2020-11-08 16:11:10 +08:00
zengqiao
7f5bbe8b5f 优化 2020-11-08 16:00:15 +08:00
zengqiao
daee57167b 服务发现接口修复 2020-11-08 15:59:50 +08:00
zengqiao
03467196b9 POM文件优化 2020-11-08 15:59:27 +08:00
zengqiao
d3f3531cdb 文案优化 2020-11-08 15:43:42 +08:00
ZQKC
883b694592 Merge pull request #78 from didi/dev
文档更新
2020-11-07 22:21:52 +08:00
zengqiao
6c89d66af9 文档更新 2020-11-07 22:09:22 +08:00
ZQKC
fb0a76b418 Merge pull request #77 from didi/master
master合并到dev
2020-11-07 22:02:24 +08:00
ZQKC
64f77fca5b Merge pull request #71 from didi/dev_2.x
开放接口
2020-10-26 22:53:53 +08:00
zengqiao
b1fca2c5be 删除无效代码 2020-10-26 11:23:28 +08:00
zengqiao
108d705f09 删除无效代码 2020-10-26 11:20:34 +08:00
zengqiao
a77242e66c 开放接口&近期BUG修复 2020-10-26 11:17:45 +08:00
ZQKC
8b153113ff Merge pull request #70 from didi/master
merge master
2020-10-26 10:45:56 +08:00
zengqiao
6d0ec37135 增加格式PDF文档防止图裂 2020-10-22 09:32:58 +08:00
ZQKC
603dadff35 Merge pull request #62 from didi/dev
统一依赖包版本
2020-09-29 17:32:32 +08:00
zengqiao
1a4ef3d9c1 统一pom包版本 2020-09-29 17:25:04 +08:00
ZQKC
788468054a Merge pull request #61 from didi/master
合并master分支
2020-09-29 17:16:52 +08:00
ZQKC
bdb44c6dce Merge pull request #59 from didi/dev_2.x
Dev 2.x
2020-09-29 10:47:16 +08:00
zengqiao
251086f9e9 问题修复: 修复账号搜索仅返回一个的问题 2020-09-28 21:52:50 +08:00
zengqiao
b22aa62046 修改console包版本 2020-09-28 17:31:08 +08:00
zengqiao
c6e4b60424 kafka-manager 2.0 2020-09-28 15:46:34 +08:00
ZQKC
28d985aaf1 Merge pull request #58 from didi/dev
版本调整为1.1.0
2020-09-25 12:00:38 +08:00
zengqiao
2397cbf80b 调整版本为1.1 2020-09-25 11:56:31 +08:00
zengqiao
a13d9daae3 Merge branch 'master' into dev 2020-09-25 11:44:23 +08:00
zengqiao
c23870e020 bugfix, fix topic overview page 2020-09-17 19:04:33 +08:00
zengqiao
dd2e29dd40 bugfix, fix collect consumer metrics task 2020-09-16 21:04:47 +08:00
ZQKC
74b5700573 Merge pull request #48 from ZQKC/master
扩分区工单详情优化
2020-07-30 21:00:37 +08:00
zengqiao
ba6abea6d8 扩分区工单优化 2020-07-30 20:56:45 +08:00
ZQKC
33b231d512 Merge pull request #3 from didi/master
MR
2020-07-30 20:45:58 +08:00
ZQKC
61f0b67a92 Merge pull request #47 from ZQKC/master
修复扩分区工单
2020-07-30 19:34:56 +08:00
zengqiao
4b679be310 fix execute order partition 2020-07-30 19:30:31 +08:00
ZQKC
a969795677 Merge pull request #40 from hyper-xx/master
增加docker及docker-compose部署方式
2020-07-06 14:00:24 +08:00
xuzhengxi
4f4e7e80fc 增加docker及docker-compose部署方式 2020-07-06 13:19:58 +08:00
ZQKC
2f72cbb627 Merge pull request #38 from yangbajing/feature/postgresql
管理端存储添加 PostgreSQL 数据库支持。
2020-07-05 16:10:16 +08:00
Yang Jing
a460e169ab 修改 Spring 默认配置为使用 MySQL 数据库。 2020-07-05 16:07:33 +08:00
Yang Jing
27ce4d6a0d 为 AccountDao.insert 也提供 PostgreSQL 的 AccountDao.insertOnPG 版。 2020-07-05 00:55:55 +08:00
Yang Jing
ac86f8aded 当使用 PostgreSQL 数据库时,使用 insert on conflict 替代 MySQL 的 replace SQL语句。 2020-07-05 00:42:55 +08:00
Yang Jing
93eca239cb 通过 spring.profiles.active 指定配置来选择后端不同数据库的配置。 2020-07-02 21:39:02 +08:00
Yang Jing
dc5949d497 管理端存储添加 PostgreSQL 数据库支持。 2020-07-02 16:21:34 +08:00
ZQKC
5e24f6b044 Merge pull request #37 from ZQKC/master
fix retention.ms when execute topic order
2020-07-01 11:01:38 +08:00
zengqiao
0cd31e0545 fix retentionTime when create topic 2020-07-01 10:56:55 +08:00
ZQKC
d4dc4b9d0a Merge pull request #1 from didi/dependabot/maven/org.apache.zookeeper-zookeeper-3.4.14
Bump zookeeper from 3.4.6 to 3.4.14
2020-06-08 16:46:33 +08:00
ZQKC
8c6fe40de1 Merge pull request #17 from pierre94/pierre94-add-assembly
Add assembly and operation scripts to simplify deployment
2020-06-05 18:45:41 +08:00
ZQKC
e4dc4bae30 Merge pull request #2 from didi/master
pull
2020-06-05 18:28:28 +08:00
potaaato
d99c21f4d7 Merge pull request #31 from Candieslove/bugfix
fix  重置用户密码&&broker状态修改&&leader rebalance弹框修复
2020-06-05 18:10:31 +08:00
eilenexuzhe
8ef549de80 feat bugfix 2020-06-05 18:03:16 +08:00
ZQKC
1b57758102 Merge pull request #30 from didi/dev
fix underReplicatedPartition and add cluster
2020-06-05 17:08:49 +08:00
ZQKC
553fe30662 Merge pull request #29 from ZQKC/master
fix underReplicatedPartitionCount and add cluster
2020-06-05 17:01:06 +08:00
zengqiao
b6138afe8b fix add cluster 2020-06-05 16:56:00 +08:00
zengqiao
64d64fe6fe bugfix, fix underReplicatedPartitionCount 2020-06-05 15:23:56 +08:00
ZQKC
f29b356b74 Merge pull request #1 from didi/master
pull code
2020-06-05 14:04:11 +08:00
pierrexiong
b5621d1ffd add assembly and operation scripts to simplify deployment
xiongyongxin@hotmail
2020-05-22 20:17:38 +08:00
pierre xiong
66f0da934d Merge pull request #1 from didi/master
pull from didi/kafka-manager
2020-05-22 19:48:35 +08:00
zengqiao
13a90fdd57 add dingding notes 2020-05-19 21:37:15 +08:00
dependabot[bot]
47265bb8d3 Bump zookeeper from 3.4.6 to 3.4.14
Bumps zookeeper from 3.4.6 to 3.4.14.

Signed-off-by: dependabot[bot] <support@github.com>
2020-03-24 06:19:53 +00:00
1530 changed files with 136239 additions and 45242 deletions

223
.gitignore vendored
View File

@@ -1,111 +1,112 @@
### Intellij ### ### Intellij ###
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
*.iml *.iml
## Directory-based project format: ## Directory-based project format:
.idea/ .idea/
# if you remove the above rule, at least ignore the following: # if you remove the above rule, at least ignore the following:
# User-specific stuff: # User-specific stuff:
# .idea/workspace.xml # .idea/workspace.xml
# .idea/tasks.xml # .idea/tasks.xml
# .idea/dictionaries # .idea/dictionaries
# .idea/shelf # .idea/shelf
# Sensitive or high-churn files: # Sensitive or high-churn files:
.idea/dataSources.ids .idea/dataSources.ids
.idea/dataSources.xml .idea/dataSources.xml
.idea/sqlDataSources.xml .idea/sqlDataSources.xml
.idea/dynamic.xml .idea/dynamic.xml
.idea/uiDesigner.xml .idea/uiDesigner.xml
# Mongo Explorer plugin: # Mongo Explorer plugin:
.idea/mongoSettings.xml .idea/mongoSettings.xml
## File-based project format: ## File-based project format:
*.ipr *.ipr
*.iws *.iws
## Plugin-specific files: ## Plugin-specific files:
# IntelliJ # IntelliJ
/out/ /out/
# mpeltonen/sbt-idea plugin # mpeltonen/sbt-idea plugin
.idea_modules/ .idea_modules/
# JIRA plugin # JIRA plugin
atlassian-ide-plugin.xml atlassian-ide-plugin.xml
# Crashlytics plugin (for Android Studio and IntelliJ) # Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml com_crashlytics_export_strings.xml
crashlytics.properties crashlytics.properties
crashlytics-build.properties crashlytics-build.properties
fabric.properties fabric.properties
### Java ### ### Java ###
*.class *.class
# Mobile Tools for Java (J2ME) # Mobile Tools for Java (J2ME)
.mtj.tmp/ .mtj.tmp/
# Package Files # # Package Files #
*.jar *.jar
*.war *.war
*.ear *.ear
*.tar.gz
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
hs_err_pid* # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
hs_err_pid*
### OSX ###
.DS_Store ### OSX ###
.AppleDouble .DS_Store
.LSOverride .AppleDouble
.LSOverride
# Icon must end with two \r
Icon # Icon must end with two \r
Icon
# Thumbnails
._* # Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100 # Files that might appear in the root of a volume
.fseventsd .DocumentRevisions-V100
.Spotlight-V100 .fseventsd
.TemporaryItems .Spotlight-V100
.Trashes .TemporaryItems
.VolumeIcon.icns .Trashes
.VolumeIcon.icns
# Directories potentially created on remote AFP share
.AppleDB # Directories potentially created on remote AFP share
.AppleDesktop .AppleDB
Network Trash Folder .AppleDesktop
Temporary Items Network Trash Folder
.apdisk Temporary Items
.apdisk
/target
target/ /target
*.log target/
*.log.* *.log
*.bak *.log.*
*.vscode *.bak
*/.vscode/* *.vscode
*/.vscode */.vscode/*
*/velocity.log* */.vscode
*/*.log */velocity.log*
*/*.log.* */*.log
web/node_modules/ */*.log.*
web/node_modules/* node_modules/
workspace.xml node_modules/*
/output/* workspace.xml
.gitversion /output/*
*/node_modules/* .gitversion
web/src/main/resources/templates/* out/*
*/out/* dist/
*/dist/* dist/*
.DS_Store km-rest/src/main/resources/templates/
*dependency-reduced-pom*

View File

@@ -7,13 +7,13 @@ Thanks for considering to contribute this project. All issues and pull requests
Before sending pull request to this project, please read and follow guidelines below. Before sending pull request to this project, please read and follow guidelines below.
1. Branch: We only accept pull request on `dev` branch. 1. Branch: We only accept pull request on `dev` branch.
2. Coding style: Follow the coding style used in kafka-manager. 2. Coding style: Follow the coding style used in LogiKM.
3. Commit message: Use English and be aware of your spell. 3. Commit message: Use English and be aware of your spell.
4. Test: Make sure to test your code. 4. Test: Make sure to test your code.
Add device mode, API version, related log, screenshots and other related information in your pull request if possible. Add device mode, API version, related log, screenshots and other related information in your pull request if possible.
NOTE: We assume all your contribution can be licensed under the [Apache License 2.0](LICENSE). NOTE: We assume all your contribution can be licensed under the [AGPL-3.0](LICENSE).
## Issues ## Issues

1094
LICENSE

File diff suppressed because it is too large Load Diff

240
README.md
View File

@@ -1,115 +1,141 @@
---
![kafka-manager-logo](doc/assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
## 主要功能特性
### 集群监控维度
- 多版本集群管控,支持从`0.10.2``2.4`版本;
- 集群Topic、Broker等多维度历史与实时关键指标查看
### 集群管控维度
- 集群运维包括逻辑Region方式管理集群
- Broker运维包括优先副本选举
- Topic运维包括创建、查询、扩容、修改属性、数据采样及迁移等
- 消费组运维,包括指定时间或指定偏移两种方式进行重置消费偏移;
### 用户使用维度
- 管理员用户与普通用户视角区分;
- 管理员用户与普通用户权限区分;
--- <p align="center">
<img src="https://user-images.githubusercontent.com/71620349/185368586-aed82d30-1534-453d-86ff-ecfa9d0f35bd.png" width = "256" div align=center />
## kafka-manager架构图
</p>
![kafka-manager-arch](doc/assets/images/common/arch.png)
<p align="center">
<a href="https://knowstreaming.com">产品官网</a> |
<a href="https://github.com/didi/KnowStreaming/releases">下载地址</a> |
<a href="https://doc.knowstreaming.com/product">文档资源</a> |
<a href="https://demo.knowstreaming.com">体验环境</a>
</p>
<p align="center">
<!--最近一次提交时间-->
<a href="https://img.shields.io/github/last-commit/didi/KnowStreaming">
<img src="https://img.shields.io/github/last-commit/didi/KnowStreaming" alt="LastCommit">
</a>
<!--最新版本-->
<a href="https://github.com/didi/KnowStreaming/blob/master/LICENSE">
<img src="https://img.shields.io/github/v/release/didi/KnowStreaming" alt="License">
</a>
<!--License信息-->
<a href="https://github.com/didi/KnowStreaming/blob/master/LICENSE">
<img src="https://img.shields.io/github/license/didi/KnowStreaming" alt="License">
</a>
<!--Open-Issue-->
<a href="https://github.com/didi/KnowStreaming/issues">
<img src="https://img.shields.io/github/issues-raw/didi/KnowStreaming" alt="Issues">
</a>
<!--知识星球-->
<a href="https://z.didi.cn/5gSF9">
<img src="https://img.shields.io/badge/join-%E7%9F%A5%E8%AF%86%E6%98%9F%E7%90%83-red" alt="Slack">
</a>
</p>
--- ---
## 安装手册
### 环境依赖
- `Maven 3.5.0+`(后端打包依赖)
- `node v8.12.0+`(前端打包依赖)
- `Java 8+`(运行环境需要)
- `MySQL`(数据存储)
---
### 环境初始化
执行[create_mysql_table.sql](doc/create_mysql_table.sql)中的SQL命令从而创建所需的MySQL库及表默认创建的库名是`kafka_manager`
```
############# 示例:
mysql -uXXXX -pXXX -h XXX.XXX.XXX.XXX -PXXXX < ./create_mysql_table.sql
```
---
### 打包
执行`mvn install`命令即可。
备注:每一次执行`mvn install`命令,都将在`web/src/main/resources/templates`下面生成最新的前端资源文件,如果`console`模块下的代码没有变更,可以修改`./pom.xml`文件,忽略对`console`模块的打包。 ## `Know Streaming` 简介
---
### 启动
```
############# application.yml 是配置文件
cp web/src/main/resources/application.yml web/target/
cd web/target/
nohup java -jar kafka-manager-web-1.0.0-SNAPSHOT.jar --spring.config.location=./application.yml > /dev/null 2>&1 &
```
### 使用
本地启动的话,访问`http://localhost:8080`,输入帐号及密码进行登录。更多参考:[kafka-manager使用手册](doc/user_cn_guide.md)
--- `Know Streaming`是一套云原生的Kafka管控平台脱胎于众多互联网内部多年的Kafka运营实践经验专注于Kafka运维管控、监控告警、资源治理、多活容灾等核心场景。在用户体验、监控、运维管控上进行了平台化、可视化、智能化的建设提供一系列特色的功能极大地方便了用户和运维人员的日常使用让普通运维人员都能成为Kafka专家。整体具有以下特点
## 相关文档
- [kafka-manager使用手册](doc/user_cn_guide.md)
## 钉钉交流群
搜索群号:`32821440` 或者扫码可入群交流 - 👀 &nbsp;**零侵入、全覆盖**
- 无需侵入改造 `Apache Kafka` ,一键便能纳管 `0.10.x` ~ `3.x.x` 众多版本的Kafka包括 `ZK``Raft` 运行模式的版本,同时在兼容架构上具备良好的扩展性,帮助您提升集群管理水平;
- 🌪️ &nbsp;**零成本、界面化**
![dingding_group](doc/assets/images/common/dingding_group.jpg) - 提炼高频 CLI 能力,设计合理的产品路径,提供清新美观的 GUI 界面,支持 Cluster、Broker、Zookeeper、Topic、ConsumerGroup、Message、ACL、Connect 等组件 GUI 管理普通用户5分钟即可上手
- 👏 &nbsp;**云原生、插件化**
## 项目成员 - 基于云原生构建,具备水平扩展能力,只需要增加节点即可获取更强的采集及对外服务能力,提供众多可热插拔的企业级特性,覆盖可观测性生态整合、资源治理、多活容灾等核心场景;
### 内部核心人员 - 🚀 &nbsp;**专业能力**
- 集群管理:支持一键纳管,健康分析、核心组件观测 等功能;
`iceyuhui``liuyaguang``limengmonty``zhangliangmike``nullhuangyiming``zengqiao``eilenexuzhe``huangjiaweihjw` - 观测提升:多维度指标观测大盘、观测指标最佳实践 等功能;
- 异常巡检:集群多维度健康巡检、集群多维度健康分 等功能;
- 能力增强集群负载均衡、Topic扩缩副本、Topic副本迁移 等功能;
### 外部贡献者
`fangjunyu``zhoutaiyang` &nbsp;
**产品图**
## 协议 <p align="center">
`kafka-manager`基于`Apache-2.0`协议进行分发和使用,更多信息参见[协议文件](./LICENSE) <img src="http://img-ys011.didistatic.com/static/dc2img/do1_sPmS4SNLX9m1zlpmHaLJ" width = "768" height = "473" div align=center />
</p>
## 文档资源
**`开发相关手册`**
- [打包编译手册](docs/install_guide/源码编译打包手册.md)
- [单机部署手册](docs/install_guide/单机部署手册.md)
- [版本升级手册](docs/install_guide/版本升级手册.md)
- [本地源码启动手册](docs/dev_guide/本地源码启动手册.md)
**`产品相关手册`**
- [产品使用指南](docs/user_guide/用户使用手册.md)
- [2.x与3.x新旧对比手册](docs/user_guide/新旧对比手册.md)
- [FAQ](docs/user_guide/faq.md)
**点击 [这里](https://doc.knowstreaming.com/product),也可以从官网获取到更多文档**
## 成为社区贡献者
点击 [这里](CONTRIBUTING.md),了解如何成为 Know Streaming 的贡献者
## 加入技术交流群
**`1、知识星球`**
<p align="left">
<img src="https://user-images.githubusercontent.com/71620349/185357284-fdff1dad-c5e9-4ddf-9a82-0be1c970980d.JPG" height = "180" div align=left />
</p>
<br/>
<br/>
<br/>
<br/>
<br/>
<br/>
<br/>
<br/>
👍 我们正在组建国内最大,最权威的 **[Kafka中文社区](https://z.didi.cn/5gSF9)**
在这里你可以结交各大互联网的 Kafka大佬 以及 4000+ Kafka爱好者一起实现知识共享实时掌控最新行业资讯期待 👏 &nbsp; 您的加入中~ https://z.didi.cn/5gSF9
有问必答~ 互动有礼~
PS: 提问请尽量把问题一次性描述清楚,并告知环境信息情况~!如使用版本、操作步骤、报错/警告信息等方便大V们快速解答
&nbsp;
**`2、微信群`**
微信加群:添加`mike_zhangliang``PenceXie`的微信号备注KnowStreaming加群。
<br/>
<img width="116" alt="wx" src="https://user-images.githubusercontent.com/71620349/192257217-c4ebc16c-3ad9-485d-a914-5911d3a4f46b.png">
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=didi/KnowStreaming&type=Date)](https://star-history.com/#didi/KnowStreaming&Date)

396
Releases_Notes.md Normal file
View File

@@ -0,0 +1,396 @@
## v3.0.0
**Bug修复**
- 修复 Group 指标防重复采集不生效问题
- 修复自动创建 ES 索引模版失败问题
- 修复 Group+Topic 列表中存在已删除Topic的问题
- 修复使用 MySQL-8 ,因兼容问题, start_time 信息为 NULL 时,会导致创建任务失败的问题
- 修复 Group 信息表更新时,出现死锁的问题
- 修复图表补点逻辑与图表时间范围不适配的问题
**体验优化**
- 按照资源类别,拆分健康巡检任务
- 优化 Group 详情页的指标为实时获取
- 图表拖拽排序支持用户级存储
- 多集群列表 ZK 信息展示兼容无 ZK 情况
- Topic 详情消息预览支持复制功能
- 部分内容大数字支持千位分割符展示
**新增**
- 集群信息中,新增 Zookeeper 客户端配置字段
- 集群信息中,新增 Kafka 集群运行模式字段
- 新增 docker-compose 的部署方式
## v3.0.0-beta.3
**文档**
- FAQ 补充权限识别失败问题的说明
- 同步更新文档,保持与官网一致
**Bug修复**
- Offset 信息获取时,过滤掉无 Leader 的分区
- 升级 oshi-core 版本至 5.6.1 版本,修复 Windows 系统获取系统指标失败问题
- 修复 JMX 连接被关闭后,未进行重建的问题
- 修复因 DB 中 Broker 信息不存在导致 TotalLogSize 指标获取时抛空指针问题
- 修复 dml-logi.sql 中SQL 注释错误的问题
- 修复 startup.sh 中,识别操作系统类型错误的问题
- 修复配置管理页面删除配置失败的问题
- 修复系统管理应用文件引用路径
- 修复 Topic Messages 详情提示信息点击跳转 404 的问题
- 修复扩副本时,当前副本数不显示问题
**体验优化**
- Topic-Messages 页面增加返回数据的排序以及按照Earliest/Latest的获取方式
- 优化 GroupOffsetResetEnum 类名为 OffsetTypeEnum使得类名含义更准确
- 移动 KafkaZKDAO 类,及 Kafka Znode 实体类的位置,使得 Kafka Zookeeper DAO 更加内聚及便于识别
- 后端补充 Overview 页面指标排序的功能
- 前端 Webpack 配置优化
- Cluster Overview 图表取消放大展示功能
- 列表页增加手动刷新功能
- 接入/编辑集群,优化 JMX-PORTVersion 信息的回显优化JMX信息的展示
- 提高登录页面图片展示清晰度
- 部分样式和文案优化
---
## v3.0.0-beta.2
**文档**
- 新增登录系统对接文档
- 优化前端工程打包构建部分文档说明
- FAQ补充KnowStreaming连接特定JMX IP的说明
**Bug修复**
- 修复logi_security_oplog表字段过短导致删除Topic等操作无法记录的问题
- 修复ES查询时抛java.lang.NumberFormatException: For input string: "{"value":0,"relation":"eq"}" 问题
- 修复LogStartOffset和LogEndOffset指标单位错误问题
- 修复进行副本变更时旧副本数为NULL的问题
- 修复集群Group列表在第二页搜索时搜索时返回的分页信息错误问题
- 修复重置Offset时返回的错误信息提示不一致的问题
- 修复集群查看系统查看LoadRebalance等页面权限点缺失问题
- 修复查询不存在的Topic时错误信息提示不明显的问题
- 修复Windows用户打包前端工程报错的问题
- package-lock.json锁定前端依赖版本号修复因依赖自动升级导致打包失败等问题
- 系统管理子应用补充后端返回的Code码拦截解决后端接口返回报错不展示的问题
- 修复用户登出后,依旧可以访问系统的问题
- 修复巡检任务配置时,数值显示错误的问题
- 修复Broker/Topic Overview 图表和图表详情问题
- 修复Job扩缩副本任务明细数据错误的问题
- 修复重置Offset时分区IDOffset数值无限制问题
- 修复扩缩/迁移副本时无法选中Kafka系统Topic的问题
- 修复Topic的Config页面编辑表单时不能正确回显当前值的问题
- 修复Broker Card返回数据后依旧展示加载态的问题
**体验优化**
- 优化默认用户密码为 admin/admin
- 缩短新增集群后,集群信息加载的耗时
- 集群Broker列表增加Controller角色信息
- 副本变更任务结束后,增加进行优先副本选举的操作
- Task模块任务分为Metrics、Common、Metadata三类任务每类任务配备独立线程池减少对Job模块的线程池以及不同类任务之间的相互影响
- 删除代码中存在的多余无用文件
- 自动新增ES索引模版及近7天索引减少用户搭建时需要做的事项
- 优化前端工程打包流程
- 优化登录页文案页面左侧栏内容单集群详情样式Topic列表趋势图等
- 首次进入Broker/Topic图表详情时进行预缓存数据从而优化体验
- 优化Topic详情Partition Tab的展示
- 多集群列表页增加编辑功能
- 优化副本变更时,迁移时间支持分钟级别粒度
- logi-security版本升级至2.10.13
- logi-elasticsearch-client版本升级至1.0.24
**能力提升**
- 支持Ldap登录认证
---
## v3.0.0-beta.1
**文档**
- 新增Task模块说明文档
- FAQ补充 `Specified key was too long; max key length is 767 bytes ` 错误说明
- FAQ补充 `出现ESIndexNotFoundException报错` 错误说明
**Bug修复**
- 修复 Consumer 点击 Stop 未停止检索的问题
- 修复创建/编辑角色权限报错问题
- 修复多集群管理/单集群详情均衡卡片状态错误问题
- 修复版本列表未排序问题
- 修复Raft集群Controller信息不断记录问题
- 修复部分版本消费组描述信息获取失败问题
- 修复分区Offset获取失败的日志中缺少Topic名称信息问题
- 修复GitHub图地址错误及图裂问题
- 修复Broker默认使用的地址和注释不一致问题
- 修复 Consumer 列表分页不生效问题
- 修复操作记录表operation_methods字段缺少默认值问题
- 修复集群均衡表中move_broker_list字段无效的问题
- 修复KafkaUser、KafkaACL信息获取时日志一直重复提示不支持问题
- 修复指标缺失时,曲线出现掉底的问题
**体验优化**
- 优化前端构建时间和打包体积,增加依赖打包的分包策略
- 优化产品样式和文案展示
- 优化ES客户端数为可配置
- 优化日志中大量出现的MySQL Key冲突日志
**能力提升**
- 增加周期任务用于主动创建缺少的ES模版及索引的能力减少额外的脚本操作
- 增加JMX连接的Broker地址可选择的能力
---
## v3.0.0-beta.0
**1、多集群管理**
- 增加健康监测体系、关键组件&指标 GUI 展示
- 增加 2.8.x 以上 Kafka 集群接入,覆盖 0.10.x-3.x
- 删除逻辑集群、共享集群、Region 概念
**2、Cluster 管理**
- 增加集群概览信息、集群配置变更记录
- 增加 Cluster 健康分,健康检查规则支持自定义配置
- 增加 Cluster 关键指标统计和 GUI 展示,支持自定义配置
- 增加 Cluster 层 I/O、Disk 的 Load Reblance 功能,支持定时均衡任务(企业版)
- 删除限流、鉴权功能
- 删除 APPID 概念
**3、Broker 管理**
- 增加 Broker 健康分
- 增加 Broker 关键指标统计和 GUI 展示,支持自定义配置
- 增加 Broker 参数配置功能,需重启生效
- 增加 Controller 变更记录
- 增加 Broker Datalogs 记录
- 删除 Leader Rebalance 功能
- 删除 Broker 优先副本选举
**4、Topic 管理**
- 增加 Topic 健康分
- 增加 Topic 关键指标统计和 GUI 展示,支持自定义配置
- 增加 Topic 参数配置功能,可实时生效
- 增加 Topic 批量迁移、Topic 批量扩缩副本功能
- 增加查看系统 Topic 功能
- 优化 Partition 分布的 GUI 展示
- 优化 Topic Message 数据采样
- 删除 Topic 过期概念
- 删除 Topic 申请配额功能
**5、Consumer 管理**
- 优化了 ConsumerGroup 展示形式,增加 Consumer Lag 的 GUI 展示
**6、ACL 管理**
- 增加原生 ACL GUI 配置功能,可配置生产、消费、自定义多种组合权限
- 增加 KafkaUser 功能,可自定义新增 KafkaUser
**7、消息测试企业版**
- 增加生产者消息模拟器,支持 Data、Flow、Header、Options 自定义配置(企业版)
- 增加消费者消息模拟器,支持 Data、Flow、Header、Options 自定义配置(企业版)
**8、Job**
- 优化 Job 模块,支持任务进度管理
**9、系统管理**
- 优化用户、角色管理体系,支持自定义角色配置页面及操作权限
- 优化审计日志信息
- 删除多租户体系
- 删除工单流程
---
## v2.6.0
版本上线时间2022-01-24
### 能力提升
- 增加简单回退工具类
### 体验优化
- 补充周期任务说明文档
- 补充集群安装部署使用说明文档
- 升级Swagger、SpringFramework、SpringBoot、EChats版本
- 优化Task模块的日志输出
- 优化corn表达式解析失败后退出无任何日志提示问题
- Ldap用户接入时增加部门及邮箱信息等
- 对Jmx模块增加连接失败后的回退机制及错误日志优化
- 增加线程池、客户端池可配置
- 删除无用的jmx_prometheus_javaagent-0.14.0.jar
- 优化迁移任务名称
- 优化创建Region时Region容量信息不能立即被更新问题
- 引入lombok
- 更新视频教程
- 优化kcm_script.sh脚本中的LogiKM地址为可通过程序传入
- 第三方接口及网关接口,增加是否跳过登录的开关
- extends模块相关配置调整为非必须在application.yml中配置
### bug修复
- 修复批量往DB写入空指标数组时报SQL语法异常的问题
- 修复网关增加配置及修改配置时version不变化问题
- 修复集群列表页,提示框遮挡问题
- 修复对高版本Broker元信息协议解析失败的问题
- 修复Dockerfile执行时提示缺少application.yml文件的问题
- 修复逻辑集群更新时,会报空指针的问题
## v2.5.0
版本上线时间2021-07-10
### 体验优化
- 更改产品名为LogiKM
- 更新产品图标
## v2.4.1+
版本上线时间2021-05-21
### 能力提升
- 增加直接增加权限和配额的接口(v2.4.1)
- 增加接口调用可绕过登录的功能(v2.4.1)
### 体验优化
- Tomcat 版本提升至8.5.66(v2.4.2)
- op接口优化拆分util接口为topic、leader两类接口(v2.4.1)
- 简化Gateway配置的Key长度(v2.4.1)
### bug修复
- 修复页面展示版本错误问题(v2.4.2)
## v2.4.0
版本上线时间2021-05-18
### 能力提升
- 增加App与Topic自动化审批开关
- Broker元信息中增加Rack信息
- 升级MySQL 驱动支持MySQL 8+
- 增加操作记录查询界面
### 体验优化
- FAQ告警组说明优化
- 用户手册共享及 独享集群概念优化
- 用户管理界面,前端限制用户删除自己
### bug修复
- 修复op-util类中创建Topic失败的接口
- 周期同步Topic到DB的任务修复将Topic列表查询从缓存调整为直接查DB
- 应用下线审批失败的功能修复将权限为0(无权限)的数据进行过滤
- 修复登录及权限绕过的漏洞
- 修复研发角色展示接入集群、暂停监控等按钮的问题
## v2.3.0
版本上线时间2021-02-08
### 能力提升
- 新增支持docker化部署
- 可指定Broker作为候选controller
- 可新增并管理网关配置
- 可获取消费组状态
- 增加集群的JMX认证
### 体验优化
- 优化编辑用户角色、修改密码的流程
- 新增consumerID的搜索功能
- 优化“Topic连接信息”、“消费组重置消费偏移”、“修改Topic保存时间”的文案提示
- 在相应位置增加《资源申请文档》链接
### bug修复
- 修复Broker监控图表时间轴展示错误的问题
- 修复创建夜莺监控告警规则时,使用的告警周期的单位不正确的问题
## v2.2.0
版本上线时间2021-01-25
### 能力提升
- 优化工单批量操作流程
- 增加获取Topic75分位/99分位的实时耗时数据
- 增加定时任务可将无主未落DB的Topic定期写入DB
### 体验优化
- 在相应位置增加《集群接入文档》链接
- 优化物理集群、逻辑集群含义
- 在Topic详情页、Topic扩分区操作弹窗增加展示Topic所属Region的信息
- 优化Topic审批时Topic数据保存时间的配置流程
- 优化Topic/应用申请、审批时的错误提示文案
- 优化Topic数据采样的操作项文案
- 优化运维人员删除Topic时的提示文案
- 优化运维人员删除Region的删除逻辑与提示文案
- 优化运维人员删除逻辑集群的提示文案
- 优化上传集群配置文件时的文件类型限制条件
### bug修复
- 修复填写应用名称时校验特殊字符出错的问题
- 修复普通用户越权访问应用详情的问题
- 修复由于Kafka版本升级导致的数据压缩格式无法获取的问题
- 修复删除逻辑集群或Topic之后界面依旧展示的问题
- 修复进行Leader rebalance操作时执行结果重复提示的问题
## v2.1.0
版本上线时间2020-12-19
### 体验优化
- 优化页面加载时的背景样式
- 优化普通用户申请Topic权限的流程
- 优化Topic申请配额、申请分区的权限限制
- 优化取消Topic权限的文案提示
- 优化申请配额表单的表单项名称
- 优化重置消费偏移的操作流程
- 优化创建Topic迁移任务的表单内容
- 优化Topic扩分区操作的弹窗界面样式
- 优化集群Broker监控可视化图表样式
- 优化创建逻辑集群的表单内容
- 优化集群安全协议的提示文案
### bug修复
- 修复偶发性重置消费偏移失败的问题

655
bin/init_es_template.sh Normal file
View File

@@ -0,0 +1,655 @@
esaddr=127.0.0.1
port=8060
curl -s --connect-timeout 10 -o /dev/null http://${esaddr}:${port}/_cat/nodes >/dev/null 2>&1
if [ "$?" != "0" ];then
echo "Elasticserach 访问失败, 请安装完后检查并重新执行该脚本 "
exit
fi
curl -s --connect-timeout 10 -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_broker_metric -d '{
"order" : 10,
"index_patterns" : [
"ks_kafka_broker_metric*"
],
"settings" : {
"index" : {
"number_of_shards" : "10"
}
},
"mappings" : {
"properties" : {
"brokerId" : {
"type" : "long"
},
"routingValue" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"clusterPhyId" : {
"type" : "long"
},
"metrics" : {
"properties" : {
"NetworkProcessorAvgIdle" : {
"type" : "float"
},
"UnderReplicatedPartitions" : {
"type" : "float"
},
"BytesIn_min_15" : {
"type" : "float"
},
"HealthCheckTotal" : {
"type" : "float"
},
"RequestHandlerAvgIdle" : {
"type" : "float"
},
"connectionsCount" : {
"type" : "float"
},
"BytesIn_min_5" : {
"type" : "float"
},
"HealthScore" : {
"type" : "float"
},
"BytesOut" : {
"type" : "float"
},
"BytesOut_min_15" : {
"type" : "float"
},
"BytesIn" : {
"type" : "float"
},
"BytesOut_min_5" : {
"type" : "float"
},
"TotalRequestQueueSize" : {
"type" : "float"
},
"MessagesIn" : {
"type" : "float"
},
"TotalProduceRequests" : {
"type" : "float"
},
"HealthCheckPassed" : {
"type" : "float"
},
"TotalResponseQueueSize" : {
"type" : "float"
}
}
},
"key" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"timestamp" : {
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
"index" : true,
"type" : "date",
"doc_values" : true
}
}
},
"aliases" : { }
}'
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_cluster_metric -d '{
"order" : 10,
"index_patterns" : [
"ks_kafka_cluster_metric*"
],
"settings" : {
"index" : {
"number_of_shards" : "10"
}
},
"mappings" : {
"properties" : {
"routingValue" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"clusterPhyId" : {
"type" : "long"
},
"metrics" : {
"properties" : {
"Connections" : {
"type" : "double"
},
"BytesIn_min_15" : {
"type" : "double"
},
"PartitionURP" : {
"type" : "double"
},
"HealthScore_Topics" : {
"type" : "double"
},
"EventQueueSize" : {
"type" : "double"
},
"ActiveControllerCount" : {
"type" : "double"
},
"GroupDeads" : {
"type" : "double"
},
"BytesIn_min_5" : {
"type" : "double"
},
"HealthCheckTotal_Topics" : {
"type" : "double"
},
"Partitions" : {
"type" : "double"
},
"BytesOut" : {
"type" : "double"
},
"Groups" : {
"type" : "double"
},
"BytesOut_min_15" : {
"type" : "double"
},
"TotalRequestQueueSize" : {
"type" : "double"
},
"HealthCheckPassed_Groups" : {
"type" : "double"
},
"TotalProduceRequests" : {
"type" : "double"
},
"HealthCheckPassed" : {
"type" : "double"
},
"TotalLogSize" : {
"type" : "double"
},
"GroupEmptys" : {
"type" : "double"
},
"PartitionNoLeader" : {
"type" : "double"
},
"HealthScore_Brokers" : {
"type" : "double"
},
"Messages" : {
"type" : "double"
},
"Topics" : {
"type" : "double"
},
"PartitionMinISR_E" : {
"type" : "double"
},
"HealthCheckTotal" : {
"type" : "double"
},
"Brokers" : {
"type" : "double"
},
"Replicas" : {
"type" : "double"
},
"HealthCheckTotal_Groups" : {
"type" : "double"
},
"GroupRebalances" : {
"type" : "double"
},
"MessageIn" : {
"type" : "double"
},
"HealthScore" : {
"type" : "double"
},
"HealthCheckPassed_Topics" : {
"type" : "double"
},
"HealthCheckTotal_Brokers" : {
"type" : "double"
},
"PartitionMinISR_S" : {
"type" : "double"
},
"BytesIn" : {
"type" : "double"
},
"BytesOut_min_5" : {
"type" : "double"
},
"GroupActives" : {
"type" : "double"
},
"MessagesIn" : {
"type" : "double"
},
"GroupReBalances" : {
"type" : "double"
},
"HealthCheckPassed_Brokers" : {
"type" : "double"
},
"HealthScore_Groups" : {
"type" : "double"
},
"TotalResponseQueueSize" : {
"type" : "double"
},
"Zookeepers" : {
"type" : "double"
},
"LeaderMessages" : {
"type" : "double"
},
"HealthScore_Cluster" : {
"type" : "double"
},
"HealthCheckPassed_Cluster" : {
"type" : "double"
},
"HealthCheckTotal_Cluster" : {
"type" : "double"
}
}
},
"key" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"timestamp" : {
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
"type" : "date"
}
}
},
"aliases" : { }
}'
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_group_metric -d '{
"order" : 10,
"index_patterns" : [
"ks_kafka_group_metric*"
],
"settings" : {
"index" : {
"number_of_shards" : "10"
}
},
"mappings" : {
"properties" : {
"group" : {
"type" : "keyword"
},
"partitionId" : {
"type" : "long"
},
"routingValue" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"clusterPhyId" : {
"type" : "long"
},
"topic" : {
"type" : "keyword"
},
"metrics" : {
"properties" : {
"HealthScore" : {
"type" : "float"
},
"Lag" : {
"type" : "float"
},
"OffsetConsumed" : {
"type" : "float"
},
"HealthCheckTotal" : {
"type" : "float"
},
"HealthCheckPassed" : {
"type" : "float"
}
}
},
"groupMetric" : {
"type" : "keyword"
},
"key" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"timestamp" : {
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
"index" : true,
"type" : "date",
"doc_values" : true
}
}
},
"aliases" : { }
}'
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_partition_metric -d '{
"order" : 10,
"index_patterns" : [
"ks_kafka_partition_metric*"
],
"settings" : {
"index" : {
"number_of_shards" : "10"
}
},
"mappings" : {
"properties" : {
"brokerId" : {
"type" : "long"
},
"partitionId" : {
"type" : "long"
},
"routingValue" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"clusterPhyId" : {
"type" : "long"
},
"topic" : {
"type" : "keyword"
},
"metrics" : {
"properties" : {
"LogStartOffset" : {
"type" : "float"
},
"Messages" : {
"type" : "float"
},
"LogEndOffset" : {
"type" : "float"
}
}
},
"key" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"timestamp" : {
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
"index" : true,
"type" : "date",
"doc_values" : true
}
}
},
"aliases" : { }
}'
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_replication_metric -d '{
"order" : 10,
"index_patterns" : [
"ks_kafka_partition_metric*"
],
"settings" : {
"index" : {
"number_of_shards" : "10"
}
},
"mappings" : {
"properties" : {
"brokerId" : {
"type" : "long"
},
"partitionId" : {
"type" : "long"
},
"routingValue" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"clusterPhyId" : {
"type" : "long"
},
"topic" : {
"type" : "keyword"
},
"metrics" : {
"properties" : {
"LogStartOffset" : {
"type" : "float"
},
"Messages" : {
"type" : "float"
},
"LogEndOffset" : {
"type" : "float"
}
}
},
"key" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"timestamp" : {
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
"index" : true,
"type" : "date",
"doc_values" : true
}
}
},
"aliases" : { }
}[root@10-255-0-23 template]# cat ks_kafka_replication_metric
PUT _template/ks_kafka_replication_metric
{
"order" : 10,
"index_patterns" : [
"ks_kafka_replication_metric*"
],
"settings" : {
"index" : {
"number_of_shards" : "10"
}
},
"mappings" : {
"properties" : {
"timestamp" : {
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
"index" : true,
"type" : "date",
"doc_values" : true
}
}
},
"aliases" : { }
}'
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_topic_metric -d '{
"order" : 10,
"index_patterns" : [
"ks_kafka_topic_metric*"
],
"settings" : {
"index" : {
"number_of_shards" : "10"
}
},
"mappings" : {
"properties" : {
"brokerId" : {
"type" : "long"
},
"routingValue" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"topic" : {
"type" : "keyword"
},
"clusterPhyId" : {
"type" : "long"
},
"metrics" : {
"properties" : {
"BytesIn_min_15" : {
"type" : "float"
},
"Messages" : {
"type" : "float"
},
"BytesRejected" : {
"type" : "float"
},
"PartitionURP" : {
"type" : "float"
},
"HealthCheckTotal" : {
"type" : "float"
},
"ReplicationCount" : {
"type" : "float"
},
"ReplicationBytesOut" : {
"type" : "float"
},
"ReplicationBytesIn" : {
"type" : "float"
},
"FailedFetchRequests" : {
"type" : "float"
},
"BytesIn_min_5" : {
"type" : "float"
},
"HealthScore" : {
"type" : "float"
},
"LogSize" : {
"type" : "float"
},
"BytesOut" : {
"type" : "float"
},
"BytesOut_min_15" : {
"type" : "float"
},
"FailedProduceRequests" : {
"type" : "float"
},
"BytesIn" : {
"type" : "float"
},
"BytesOut_min_5" : {
"type" : "float"
},
"MessagesIn" : {
"type" : "float"
},
"TotalProduceRequests" : {
"type" : "float"
},
"HealthCheckPassed" : {
"type" : "float"
}
}
},
"brokerAgg" : {
"type" : "keyword"
},
"key" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"timestamp" : {
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
"index" : true,
"type" : "date",
"doc_values" : true
}
}
},
"aliases" : { }
}'
for i in {0..6};
do
logdate=_$(date -d "${i} day ago" +%Y-%m-%d)
curl -s --connect-timeout 10 -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_broker_metric${logdate} && \
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_cluster_metric${logdate} && \
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_group_metric${logdate} && \
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_partition_metric${logdate} && \
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_replication_metric${logdate} && \
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_topic_metric${logdate} || \
exit 2
done

16
bin/shutdown.sh Normal file
View File

@@ -0,0 +1,16 @@
#!/bin/bash
cd `dirname $0`/../libs
target_dir=`pwd`
pid=`ps ax | grep -i 'ks-km' | grep ${target_dir} | grep java | grep -v grep | awk '{print $1}'`
if [ -z "$pid" ] ; then
echo "No ks-km running."
exit -1;
fi
echo "The ks-km (${pid}) is running..."
kill ${pid}
echo "Send shutdown request to ks-km (${pid}) OK"

82
bin/startup.sh Normal file
View File

@@ -0,0 +1,82 @@
error_exit ()
{
echo "ERROR: $1 !!"
exit 1
}
[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=$HOME/jdk/java
[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=/usr/java
[ ! -e "$JAVA_HOME/bin/java" ] && unset JAVA_HOME
if [ -z "$JAVA_HOME" ]; then
if [ "Darwin" = "$(uname -s)" ]; then
if [ -x '/usr/libexec/java_home' ] ; then
export JAVA_HOME=`/usr/libexec/java_home`
elif [ -d "/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home" ]; then
export JAVA_HOME="/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home"
fi
else
JAVA_PATH=`dirname $(readlink -f $(which javac))`
if [ "x$JAVA_PATH" != "x" ]; then
export JAVA_HOME=`dirname $JAVA_PATH 2>/dev/null`
fi
fi
if [ -z "$JAVA_HOME" ]; then
error_exit "Please set the JAVA_HOME variable in your environment, We need java(x64)! jdk8 or later is better!"
fi
fi
export WEB_SERVER="ks-km"
export JAVA_HOME
export JAVA="$JAVA_HOME/bin/java"
export BASE_DIR=`cd $(dirname $0)/..; pwd`
export CUSTOM_SEARCH_LOCATIONS=file:${BASE_DIR}/conf/
#===========================================================================================
# JVM Configuration
#===========================================================================================
JAVA_OPT="${JAVA_OPT} -server -Xms2g -Xmx2g -Xmn1g -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=320m"
JAVA_OPT="${JAVA_OPT} -XX:-OmitStackTraceInFastThrow -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${BASE_DIR}/logs/java_heapdump.hprof"
## jdk版本高的情况 有些 参数废弃了
JAVA_MAJOR_VERSION=$($JAVA -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p')
if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then
JAVA_OPT="${JAVA_OPT} -Xlog:gc*:file=${BASE_DIR}/logs/km_gc.log:time,tags:filecount=10,filesize=102400"
else
JAVA_OPT="${JAVA_OPT} -Djava.ext.dirs=${JAVA_HOME}/jre/lib/ext:${JAVA_HOME}/lib/ext"
JAVA_OPT="${JAVA_OPT} -Xloggc:${BASE_DIR}/logs/km_gc.log -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
fi
JAVA_OPT="${JAVA_OPT} -jar ${BASE_DIR}/libs/${WEB_SERVER}.jar"
JAVA_OPT="${JAVA_OPT} --spring.config.additional-location=${CUSTOM_SEARCH_LOCATIONS}"
JAVA_OPT="${JAVA_OPT} --logging.config=${BASE_DIR}/conf/logback-spring.xml"
JAVA_OPT="${JAVA_OPT} --server.max-http-header-size=524288"
if [ ! -d "${BASE_DIR}/logs" ]; then
mkdir ${BASE_DIR}/logs
fi
echo "$JAVA ${JAVA_OPT}"
# check the start.out log output file
if [ ! -f "${BASE_DIR}/logs/start.out" ]; then
touch "${BASE_DIR}/logs/start.out"
fi
# start
echo -e "---- 启动脚本 ------\n $JAVA ${JAVA_OPT}" > ${BASE_DIR}/logs/start.out 2>&1 &
nohup $JAVA ${JAVA_OPT} >> ${BASE_DIR}/logs/start.out 2>&1 &
echo "${WEB_SERVER} is startingyou can check the ${BASE_DIR}/logs/start.out"

View File

@@ -1,50 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>kafka-manager-common</artifactId>
<version>1.0.0-SNAPSHOT</version>
<packaging>jar</packaging>
<parent>
<artifactId>kafka-manager</artifactId>
<groupId>com.xiaojukeji.kafka</groupId>
<version>1.0.0-SNAPSHOT</version>
</parent>
<properties>
<kafka-manager.revision>1.0.0-SNAPSHOT</kafka-manager.revision>
<maven.test.skip>true</maven.test.skip>
<downloadSources>true</downloadSources>
<java_source_version>1.8</java_source_version>
<java_target_version>1.8</java_target_version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<file_encoding>UTF-8</file_encoding>
</properties>
<dependencies>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<version>1.9.3</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>2.10.0</version>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
</dependency>
</dependencies>
</project>

View File

@@ -1,21 +0,0 @@
package com.xiaojukeji.kafka.manager.common.constant;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* @author zengqiao
* @date 20/2/28
*/
public class Constant {
public static final String KAFKA_MANAGER_INNER_ERROR = "kafka-manager inner error";
public final static Map<Integer, List<String>> BROKER_METRICS_TYPE_MBEAN_NAME_MAP = new ConcurrentHashMap<>();
public final static Map<Integer, List<String>> TOPIC_METRICS_TYPE_MBEAN_NAME_MAP = new ConcurrentHashMap<>();
public static final String COLLECTOR_METRICS_LOGGER = "COLLECTOR_METRICS_LOGGER";
public static final String API_METRICS_LOGGER = "API_METRICS_LOGGER";
}

View File

@@ -1,23 +0,0 @@
package com.xiaojukeji.kafka.manager.common.constant;
public class MetricsType {
/**
* Broker流量详情
*/
public static final int BROKER_FLOW_DETAIL = 0;
public static final int BROKER_TO_DB_METRICS = 1; // Broker入DB的Metrics指标
public static final int BROKER_REAL_TIME_METRICS = 2; // Broker入DB的Metrics指标
public static final int BROKER_OVER_VIEW_METRICS = 3; // Broker状态概览的指标
public static final int BROKER_OVER_ALL_METRICS = 4; // Broker状态总揽的指标
public static final int BROKER_ANALYSIS_METRICS = 5; // Broker分析的指标
public static final int BROKER_TOPIC_ANALYSIS_METRICS = 6; // Broker分析的指标
/**
* Topic流量详情
*/
public static final int TOPIC_FLOW_DETAIL = 100;
public static final int TOPIC_FLOW_OVERVIEW = 101;
public static final int TOPIC_METRICS_TO_DB = 102;
}

View File

@@ -1,35 +0,0 @@
package com.xiaojukeji.kafka.manager.common.constant;
/**
* @author limeng
* @date 2017/11/21
*/
public enum OffsetStoreLocation {
ZOOKEEPER("zookeeper"),
BROKER("broker");
private final String location;
OffsetStoreLocation(String location) {
this.location = location;
}
public String getLocation() {
return location;
}
public static OffsetStoreLocation getOffsetStoreLocation(String location) {
if (location == null) {
return null;
}
for (OffsetStoreLocation offsetStoreLocation: OffsetStoreLocation.values()) {
if (offsetStoreLocation.location.equals(location)) {
return offsetStoreLocation;
}
}
return null;
}
}

View File

@@ -1,35 +0,0 @@
package com.xiaojukeji.kafka.manager.common.constant;
public class StatusCode {
/*
* kafka-manager status code: 17000 ~ 17999
*
* 正常 - 0
* 参数错误 - 10000
* 资源未就绪 - 10001
*/
/*
* 已约定的状态码
*/
public static final Integer SUCCESS = 0;
public static final Integer PARAM_ERROR = 10000; //参数错误
public static final Integer RES_UNREADY = 10001; //资源未就绪
public static final Integer MY_SQL_SELECT_ERROR = 17210; // MySQL 查询数据异常
public static final Integer MY_SQL_INSERT_ERROR = 17211; // MySQL 插入数据异常
public static final Integer MY_SQL_DELETE_ERROR = 17212; // MySQL 删除数据异常
public static final Integer MY_SQL_UPDATE_ERROR = 17213; // MySQL 更新数据异常
public static final Integer MY_SQL_REPLACE_ERROR = 17214; // MySQL 替换数据异常
public static final Integer OPERATION_ERROR = 17300; // 请求操作异常
/**
* Topic相关的异常
*/
public static final Integer TOPIC_EXISTED = 17400; //Topic已经存在了
public static final Integer PARTIAL_SUCESS = 17700; //操作部分成功
}

View File

@@ -1,71 +0,0 @@
package com.xiaojukeji.kafka.manager.common.constant.monitor;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.List;
/**
* 条件类型
* @author zengqiao
* @date 19/5/12
*/
public enum MonitorConditionType {
BIGGER(">", "大于"),
EQUAL("=", "等于"),
LESS("<", "小于"),
NOT_EQUAL("!=", "不等于");
private String name;
private String message;
MonitorConditionType(String name, String message) {
this.name = name;
this.message = message;
}
public static boolean legal(String name) {
for (MonitorConditionType elem: MonitorConditionType.values()) {
if (elem.name.equals(name)) {
return true;
}
}
return false;
}
@Override
public String toString() {
return "ConditionType{" +
"name='" + name + '\'' +
", message='" + message + '\'' +
'}';
}
public static List<AbstractMap.SimpleEntry<String, String>> toList() {
List<AbstractMap.SimpleEntry<String, String>> conditionTypeList = new ArrayList<>();
for (MonitorConditionType elem: MonitorConditionType.values()) {
conditionTypeList.add(new AbstractMap.SimpleEntry<>(elem.name, elem.message));
}
return conditionTypeList;
}
/**
* 计算 operation(data1, data2) 是否为true
* @param data1
* @param data2
* @param operation
* @author zengqiao
* @date 19/5/12
* @return boolean
*/
public static boolean matchCondition(Double data1, Double data2, String operation) {
switch (operation) {
case ">": return data1 > data2;
case "<": return data1 < data2;
case "=": return data1.equals(data2);
case "!=": return !data1.equals(data2);
default:
}
return false;
}
}

View File

@@ -1,19 +0,0 @@
package com.xiaojukeji.kafka.manager.common.constant.monitor;
/**
* @author zengqiao
* @date 20/3/18
*/
public enum MonitorMatchStatus {
UNKNOWN(0),
YES(1),
NO(2);
public Integer status;
MonitorMatchStatus(Integer status) {
this.status = status;
}
}

View File

@@ -1,59 +0,0 @@
package com.xiaojukeji.kafka.manager.common.constant.monitor;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.List;
/**
* 指标类型
* @author zengqiao
* @date 19/5/12
*/
public enum MonitorMetricsType {
BYTES_IN("BytesIn", "流入流量"),
BYTES_OUT("BytesOut", "流出流量"),
LAG("Lag", "消费组Lag");
private String name;
private String message;
MonitorMetricsType(String name, String message) {
this.name = name;
this.message = message;
}
public static boolean legal(String name) {
for (MonitorMetricsType elem: MonitorMetricsType.values()) {
if (elem.name.equals(name)) {
return true;
}
}
return false;
}
@Override
public String toString() {
return "MetricType{" +
"name='" + name + '\'' +
", message='" + message + '\'' +
'}';
}
public static List<AbstractMap.SimpleEntry<String, String>> toList() {
List<AbstractMap.SimpleEntry<String, String>> metricTypeList = new ArrayList<>();
for (MonitorMetricsType elem: MonitorMetricsType.values()) {
metricTypeList.add(new AbstractMap.SimpleEntry<>(elem.name, elem.message));
}
return metricTypeList;
}
public String getName() {
return name;
}
public String getMessage() {
return message;
}
}

View File

@@ -1,56 +0,0 @@
package com.xiaojukeji.kafka.manager.common.constant.monitor;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.List;
/**
* 通知类型
* @author huangyiminghappy@163.com
* @date 2019-05-06
*/
public enum MonitorNotifyType {
KAFKA_MESSAGE("KAFKA", "告警发送到KAFKA");
String name;
String message;
MonitorNotifyType(String name, String message){
this.name = name;
this.message = message;
}
public String getName() {
return name;
}
public String getMessage() {
return message;
}
public static boolean legal(String name) {
for (MonitorNotifyType elem: MonitorNotifyType.values()) {
if (elem.name.equals(name)) {
return true;
}
}
return false;
}
@Override
public String toString() {
return "NotifyType{" +
"name='" + name + '\'' +
", message='" + message + '\'' +
'}';
}
public static List<AbstractMap.SimpleEntry<String, String>> toList() {
List<AbstractMap.SimpleEntry<String, String>> notifyTypeList = new ArrayList<>();
for (MonitorNotifyType elem: MonitorNotifyType.values()) {
notifyTypeList.add(new AbstractMap.SimpleEntry<>(elem.name, elem.message));
}
return notifyTypeList;
}
}

View File

@@ -1,37 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity;
import kafka.admin.AdminClient;
import java.util.*;
/**
* @author zengqiao
* @date 19/5/14
*/
public class ConsumerMetadata {
private Set<String> consumerGroupSet = new HashSet<>();
private Map<String, Set<String>> topicNameConsumerGroupMap = new HashMap<>();
private Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap = new HashMap<>();
public ConsumerMetadata(Set<String> consumerGroupSet,
Map<String, Set<String>> topicNameConsumerGroupMap,
Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap) {
this.consumerGroupSet = consumerGroupSet;
this.topicNameConsumerGroupMap = topicNameConsumerGroupMap;
this.consumerGroupSummaryMap = consumerGroupSummaryMap;
}
public Set<String> getConsumerGroupSet() {
return consumerGroupSet;
}
public Map<String, Set<String>> getTopicNameConsumerGroupMap() {
return topicNameConsumerGroupMap;
}
public Map<String, AdminClient.ConsumerGroupSummary> getConsumerGroupSummaryMap() {
return consumerGroupSummaryMap;
}
}

View File

@@ -1,69 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity;
/**
* ConsumerMetrics
* @author tukun
* @date 2015/11/12
*/
public class ConsumerMetrics {
private Long clusterId;
private String topicName;
private String consumerGroup;
private String location;
private Long sumLag;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
public String getLocation() {
return location;
}
public void setLocation(String location) {
this.location = location;
}
public Long getSumLag() {
return sumLag;
}
public void setSumLag(Long sumLag) {
this.sumLag = sumLag;
}
@Override
public String toString() {
return "ConsumerMetrics{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", consumerGroup='" + consumerGroup + '\'' +
", location='" + location + '\'' +
", sumLag=" + sumLag +
'}';
}
}

View File

@@ -1,76 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity;
import com.alibaba.fastjson.JSON;
import com.xiaojukeji.kafka.manager.common.constant.StatusCode;
import java.io.Serializable;
/**
* @author huangyiminghappy@163.com
* @date 2019-07-08
*/
public class Result<T> implements Serializable {
private static final long serialVersionUID = -2772975319944108658L;
private T data;
private String message;
private Integer code;
public Result(T data) {
this.data = data;
this.code = StatusCode.SUCCESS;
this.message = "成功";
}
public Result() {
this(null);
}
public Result(Integer code, String message) {
this.message = message;
this.code = code;
}
public Result(Integer code, T data, String message) {
this.data = data;
this.message = message;
this.code = code;
}
public T getData()
{
return (T)this.data;
}
public void setData(T data)
{
this.data = data;
}
public String getMessage()
{
return this.message;
}
public void setMessage(String message)
{
this.message = message;
}
public Integer getCode()
{
return this.code;
}
public void setCode(Integer code)
{
this.code = code;
}
@Override
public String toString()
{
return JSON.toJSONString(this);
}
}

View File

@@ -1,24 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.annotations;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
/**
* FieldSelector
* @author huangyiminghappy@163.com
* @date 2019-06-19
*/
@Target(ElementType.FIELD)
@Retention(RUNTIME)
@Documented
public @interface FieldSelector {
//注解的属性
String name() default "";
int[] types() default {};
}

View File

@@ -1,35 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
/**
* 用户角色
* @author zengqiao_cn@163.com
* @date 19/4/15
*/
public enum AccountRoleEnum {
UNKNOWN(-1),
NORMAL(0),
SRE(1),
ADMIN(2);
private Integer role;
AccountRoleEnum(Integer role) {
this.role = role;
}
public Integer getRole() {
return role;
}
public static AccountRoleEnum getUserRoleEnum(Integer role) {
for (AccountRoleEnum elem: AccountRoleEnum.values()) {
if (elem.getRole().equals(role)) {
return elem;
}
}
return null;
}
}

View File

@@ -1,38 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
/**
* 操作Topic的状态
* @author zengqiao
* @date 19/11/26
*/
public enum AdminTopicStatusEnum {
SUCCESS(0, "成功"),
REPLACE_DB_FAILED(1, "更新DB失败"),
PARAM_NULL_POINTER(2, "参数错误"),
PARTITION_NUM_ILLEGAL(3, "分区数错误"),
BROKER_NUM_NOT_ENOUGH(4, "Broker数不足错误"),
TOPIC_NAME_ILLEGAL(5, "Topic名称非法"),
TOPIC_EXISTED(6, "Topic已存在"),
UNKNOWN_TOPIC_PARTITION(7, "Topic未知"),
TOPIC_CONFIG_ILLEGAL(8, "Topic配置错误"),
TOPIC_IN_DELETING(9, "Topic正在删除"),
UNKNOWN_ERROR(10, "未知错误");
private Integer code;
private String message;
AdminTopicStatusEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
public Integer getCode() {
return code;
}
public String getMessage() {
return message;
}
}

View File

@@ -1,42 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
/**
* DBStatus状态含义
* @author zengqiao_cn@163.com
* @date 19/4/15
*/
public enum DBStatusEnum {
/**
* 逻辑删除
*/
DELETED(-1),
/**
* 普通
*/
NORMAL(0),
/**
* 已完成并通过
*/
PASSED(1);
private Integer status;
DBStatusEnum(Integer status) {
this.status = status;
}
public Integer getStatus() {
return status;
}
public static DBStatusEnum getDBStatusEnum(Integer status) {
for (DBStatusEnum elem: DBStatusEnum.values()) {
if (elem.getStatus().equals(status)) {
return elem;
}
}
return null;
}
}

View File

@@ -1,19 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
/**
* 操作类型
* @author zengqiao
* @date 19/11/21
*/
public enum OperationEnum {
CREATE_TOPIC("create_topic"),
DELETE_TOPIC("delete_topic"),
MODIFY_TOPIC_CONFIG("modify_topic_config"),
EXPAND_TOPIC_PARTITION("expand_topic_partition");
public String message;
OperationEnum(String message) {
this.message = message;
}
}

View File

@@ -1,28 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
public enum OrderStatusEnum {
WAIT_DEAL(0, "待处理"),
PASSED(1, "通过"),
REFUSED(2, "拒绝"),
CANCELLED(3, "取消");
private Integer code;
private String message;
OrderStatusEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
public Integer getCode() {
return code;
}
public String getMessage() {
return message;
}
}

View File

@@ -1,33 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
/**
* 工单类型
* @author zengqiao
* @date 19/6/23
*/
public enum OrderTypeEnum {
UNKNOWN(-1),
APPLY_TOPIC(0),
APPLY_PARTITION(1);
private Integer code;
OrderTypeEnum(Integer code) {
this.code = code;
}
public Integer getCode() {
return code;
}
public static OrderTypeEnum getOrderTypeEnum(Integer code) {
for (OrderTypeEnum elem: OrderTypeEnum.values()) {
if (elem.getCode().equals(code)) {
return elem;
}
}
return OrderTypeEnum.UNKNOWN;
}
}

View File

@@ -1,31 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
/**
* 优先副本选举状态
* @author zengqiao
* @date 2017/6/29.
*/
public enum PreferredReplicaElectEnum {
SUCCESS(0, "成功[创建成功|执行成功]"),
RUNNING(1, "正在执行"),
ALREADY_EXIST(2, "任务已存在"),
PARAM_ILLEGAL(3, "参数错误"),
UNKNOWN(4, "进度未知");
private Integer code;
private String message;
PreferredReplicaElectEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
public Integer getCode() {
return code;
}
public String getMessage() {
return message;
}
}

View File

@@ -1,45 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
/**
* 迁移状态
* @author zengqiao
* @date 19/12/29
*/
public enum ReassignmentStatusEnum {
WAITING(0, "等待执行"),
RUNNING(1, "正在执行"),
SUCCESS(2, "迁移成功"),
FAILED(3, "迁移失败"),
CANCELED(4, "取消任务");
private Integer code;
private String message;
ReassignmentStatusEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
public Integer getCode() {
return code;
}
public String getMessage() {
return message;
}
public static boolean triggerTask(Integer status) {
if (WAITING.code.equals(status) || RUNNING.code.equals(status)) {
return true;
}
return false;
}
public static boolean cancelTask(Integer status) {
if (WAITING.code.equals(status)) {
return true;
}
return false;
}
}

View File

@@ -1,91 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.dto;
/**
* Broker基本信息
* @author zengqiao_cn@163.com
* @date 19/4/8
*/
public class BrokerBasicDTO {
private String host;
private Integer port;
private Integer jmxPort;
private Integer topicNum;
private Integer partitionCount;
private Long startTime;
private Integer leaderCount;
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Integer getPort() {
return port;
}
public void setPort(Integer port) {
this.port = port;
}
public Integer getJmxPort() {
return jmxPort;
}
public void setJmxPort(Integer jmxPort) {
this.jmxPort = jmxPort;
}
public Integer getTopicNum() {
return topicNum;
}
public void setTopicNum(Integer topicNum) {
this.topicNum = topicNum;
}
public Integer getPartitionCount() {
return partitionCount;
}
public void setPartitionCount(Integer partitionCount) {
this.partitionCount = partitionCount;
}
public Long getStartTime() {
return startTime;
}
public void setStartTime(Long startTime) {
this.startTime = startTime;
}
public Integer getLeaderCount() {
return leaderCount;
}
public void setLeaderCount(Integer leaderCount) {
this.leaderCount = leaderCount;
}
@Override
public String toString() {
return "BrokerBasicInfoDTO{" +
"host='" + host + '\'' +
", port=" + port +
", jmxPort=" + jmxPort +
", topicNum=" + topicNum +
", partitionCount=" + partitionCount +
", startTime=" + startTime +
", leaderCount=" + leaderCount +
'}';
}
}

View File

@@ -1,132 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.dto;
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.BrokerMetadata;
/**
* @author zengqiao
* @date 19/4/21
*/
public class BrokerOverallDTO {
private Integer brokerId;
private String host;
private Integer port;
private Integer jmxPort;
private Long startTime;
private Integer partitionCount;
private Integer underReplicatedPartitions;
private Integer leaderCount;
private Double bytesInPerSec;
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Integer getPort() {
return port;
}
public void setPort(Integer port) {
this.port = port;
}
public Integer getJmxPort() {
return jmxPort;
}
public void setJmxPort(Integer jmxPort) {
this.jmxPort = jmxPort;
}
public Long getStartTime() {
return startTime;
}
public void setStartTime(Long startTime) {
this.startTime = startTime;
}
public Integer getPartitionCount() {
return partitionCount;
}
public void setPartitionCount(Integer partitionCount) {
this.partitionCount = partitionCount;
}
public Integer getUnderReplicatedPartitions() {
return underReplicatedPartitions;
}
public void setUnderReplicatedPartitions(Integer underReplicatedPartitions) {
this.underReplicatedPartitions = underReplicatedPartitions;
}
public Integer getLeaderCount() {
return leaderCount;
}
public void setLeaderCount(Integer leaderCount) {
this.leaderCount = leaderCount;
}
public Double getBytesInPerSec() {
return bytesInPerSec;
}
public void setBytesInPerSec(Double bytesInPerSec) {
this.bytesInPerSec = bytesInPerSec;
}
@Override
public String toString() {
return "BrokerOverallDTO{" +
"brokerId=" + brokerId +
", host='" + host + '\'' +
", port=" + port +
", jmxPort=" + jmxPort +
", startTime=" + startTime +
", partitionCount=" + partitionCount +
", underReplicatedPartitions=" + underReplicatedPartitions +
", leaderCount=" + leaderCount +
", bytesInPerSec=" + bytesInPerSec +
'}';
}
public static BrokerOverallDTO newInstance(BrokerMetadata brokerMetadata, BrokerMetrics brokerMetrics) {
BrokerOverallDTO brokerOverallDTO = new BrokerOverallDTO();
brokerOverallDTO.setBrokerId(brokerMetadata.getBrokerId());
brokerOverallDTO.setHost(brokerMetadata.getHost());
brokerOverallDTO.setPort(brokerMetadata.getPort());
brokerOverallDTO.setJmxPort(brokerMetadata.getJmxPort());
brokerOverallDTO.setStartTime(brokerMetadata.getTimestamp());
if (brokerMetrics == null) {
return brokerOverallDTO;
}
brokerOverallDTO.setPartitionCount(brokerMetrics.getPartitionCount());
brokerOverallDTO.setLeaderCount(brokerMetrics.getLeaderCount());
brokerOverallDTO.setBytesInPerSec(brokerMetrics.getBytesInPerSec());
brokerOverallDTO.setUnderReplicatedPartitions(brokerMetrics.getUnderReplicatedPartitions());
return brokerOverallDTO;
}
}

View File

@@ -1,121 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.dto;
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.BrokerMetadata;
import com.xiaojukeji.kafka.manager.common.entity.bizenum.DBStatusEnum;
/**
* @author zengqiao_cn@163.com
* @date 19/4/21
*/
public class BrokerOverviewDTO {
private Integer brokerId;
private String host;
private Integer port;
private Integer jmxPort;
private Long startTime;
private Double byteIn;
private Double byteOut;
private Integer status;
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Integer getPort() {
return port;
}
public void setPort(Integer port) {
this.port = port;
}
public Integer getJmxPort() {
return jmxPort;
}
public void setJmxPort(Integer jmxPort) {
this.jmxPort = jmxPort;
}
public Long getStartTime() {
return startTime;
}
public void setStartTime(Long startTime) {
this.startTime = startTime;
}
public Double getByteIn() {
return byteIn;
}
public void setByteIn(Double byteIn) {
this.byteIn = byteIn;
}
public Double getByteOut() {
return byteOut;
}
public void setByteOut(Double byteOut) {
this.byteOut = byteOut;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
@Override
public String toString() {
return "BrokerInfoDTO{" +
"brokerId=" + brokerId +
", host='" + host + '\'' +
", port=" + port +
", jmxPort=" + jmxPort +
", startTime=" + startTime +
", byteIn=" + byteIn +
", byteOut=" + byteOut +
", status=" + status +
'}';
}
public static BrokerOverviewDTO newInstance(BrokerMetadata brokerMetadata, BrokerMetrics brokerMetrics) {
BrokerOverviewDTO brokerOverviewDTO = new BrokerOverviewDTO();
brokerOverviewDTO.setBrokerId(brokerMetadata.getBrokerId());
brokerOverviewDTO.setHost(brokerMetadata.getHost());
brokerOverviewDTO.setPort(brokerMetadata.getPort());
brokerOverviewDTO.setJmxPort(brokerMetadata.getJmxPort());
brokerOverviewDTO.setStartTime(brokerMetadata.getTimestamp());
brokerOverviewDTO.setStatus(DBStatusEnum.NORMAL.getStatus());
if (brokerMetrics == null) {
return brokerOverviewDTO;
}
brokerOverviewDTO.setByteIn(brokerMetrics.getBytesInPerSec());
brokerOverviewDTO.setByteOut(brokerMetrics.getBytesOutPerSec());
return brokerOverviewDTO;
}
}

View File

@@ -1,70 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.dto;
import java.util.Date;
/**
* @author zengqiao
* @date 19/4/22
*/
public class ControllerDTO {
private String clusterName;
private Integer brokerId;
private String host;
private Integer controllerVersion;
private Date controllerTimestamp;
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Integer getControllerVersion() {
return controllerVersion;
}
public void setControllerVersion(Integer controllerVersion) {
this.controllerVersion = controllerVersion;
}
public Date getControllerTimestamp() {
return controllerTimestamp;
}
public void setControllerTimestamp(Date controllerTimestamp) {
this.controllerTimestamp = controllerTimestamp;
}
@Override
public String toString() {
return "ControllerInfoDTO{" +
"clusterName='" + clusterName + '\'' +
", brokerId=" + brokerId +
", host='" + host + '\'' +
", controllerVersion=" + controllerVersion +
", controllerTimestamp=" + controllerTimestamp +
'}';
}
}

View File

@@ -1,62 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.dto;
/**
* Topic Offset
* @author zengqiao
* @date 19/6/2
*/
public class PartitionOffsetDTO {
private Integer partitionId;
private Long offset;
private Long timestamp;
public PartitionOffsetDTO() {
}
public PartitionOffsetDTO(Integer partitionId, Long offset) {
this.partitionId = partitionId;
this.offset = offset;
}
public PartitionOffsetDTO(Integer partitionId, Long offset, Long timestamp) {
this.partitionId = partitionId;
this.offset = offset;
this.timestamp = timestamp;
}
public Integer getPartitionId() {
return partitionId;
}
public void setPartitionId(Integer partitionId) {
this.partitionId = partitionId;
}
public Long getOffset() {
return offset;
}
public void setOffset(Long offset) {
this.offset = offset;
}
public Long getTimestamp() {
return timestamp;
}
public void setTimestamp(Long timestamp) {
this.timestamp = timestamp;
}
@Override
public String toString() {
return "TopicOffsetDTO{" +
", partitionId=" + partitionId +
", offset=" + offset +
", timestamp=" + timestamp +
'}';
}
}

View File

@@ -1,123 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.dto;
/**
* @author arthur
* @date 2018/09/03
*/
public class TopicBasicDTO {
private String topicName;
private Integer partitionNum;
private Integer replicaNum;
private Integer brokerNum;
private String remark;
private Long modifyTime;
private Long createTime;
private String region;
private Long retentionTime;
private String principal;
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Integer getPartitionNum() {
return partitionNum;
}
public void setPartitionNum(Integer partitionNum) {
this.partitionNum = partitionNum;
}
public Integer getReplicaNum() {
return replicaNum;
}
public void setReplicaNum(Integer replicaNum) {
this.replicaNum = replicaNum;
}
public Integer getBrokerNum() {
return brokerNum;
}
public void setBrokerNum(Integer brokerNum) {
this.brokerNum = brokerNum;
}
public String getRemark() {
return remark;
}
public void setRemark(String remark) {
this.remark = remark;
}
public String getRegion() {
return region;
}
public void setRegion(String region) {
this.region = region;
}
public Long getRetentionTime() {
return retentionTime;
}
public void setRetentionTime(Long retentionTime) {
this.retentionTime = retentionTime;
}
public Long getModifyTime() {
return modifyTime;
}
public void setModifyTime(Long modifyTime) {
this.modifyTime = modifyTime;
}
public Long getCreateTime() {
return createTime;
}
public void setCreateTime(Long createTime) {
this.createTime = createTime;
}
public String getPrincipal() {
return principal;
}
public void setPrincipal(String principal) {
this.principal = principal;
}
@Override
public String toString() {
return "TopicBasicInfoDTO{" +
"topicName='" + topicName + '\'' +
", partitionNum=" + partitionNum +
", replicaNum=" + replicaNum +
", brokerNum=" + brokerNum +
", remark='" + remark + '\'' +
", modifyTime=" + modifyTime +
", createTime=" + createTime +
", region='" + region + '\'' +
", retentionTime=" + retentionTime +
", principal='" + principal + '\'' +
'}';
}
}

View File

@@ -1,86 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.dto;
public class TopicOverviewDTO {
private Long clusterId;
private String topicName;
private Integer replicaNum;
private Integer partitionNum;
private Double bytesInPerSec;
private Double produceRequestPerSec;
private Long updateTime;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Integer getReplicaNum() {
return replicaNum;
}
public void setReplicaNum(Integer replicaNum) {
this.replicaNum = replicaNum;
}
public Integer getPartitionNum() {
return partitionNum;
}
public void setPartitionNum(Integer partitionNum) {
this.partitionNum = partitionNum;
}
public Double getBytesInPerSec() {
return bytesInPerSec;
}
public void setBytesInPerSec(Double bytesInPerSec) {
this.bytesInPerSec = bytesInPerSec;
}
public Double getProduceRequestPerSec() {
return produceRequestPerSec;
}
public void setProduceRequestPerSec(Double produceRequestPerSec) {
this.produceRequestPerSec = produceRequestPerSec;
}
public Long getUpdateTime() {
return updateTime;
}
public void setUpdateTime(Long updateTime) {
this.updateTime = updateTime;
}
@Override
public String toString() {
return "TopicOverviewDTO{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", replicaNum=" + replicaNum +
", partitionNum=" + partitionNum +
", bytesInPerSec=" + bytesInPerSec +
", produceRequestPerSec=" + produceRequestPerSec +
", updateTime=" + updateTime +
'}';
}
}

View File

@@ -1,105 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.dto;
import java.io.Serializable;
import java.util.List;
/**
* @author arthur
* @date 2017/6/6.
*/
public class TopicPartitionDTO implements Serializable {
private Integer partitionId;
private Long offset;
private Integer leaderBrokerId;
private Integer preferredBrokerId;
private Integer leaderEpoch;
private List<Integer> replicasBroker;
private List<Integer> isr;
private Boolean underReplicated;
public Integer getPartitionId() {
return partitionId;
}
public void setPartitionId(Integer partitionId) {
this.partitionId = partitionId;
}
public Long getOffset() {
return offset;
}
public void setOffset(Long offset) {
this.offset = offset;
}
public Integer getLeaderBrokerId() {
return leaderBrokerId;
}
public void setLeaderBrokerId(Integer leaderBrokerId) {
this.leaderBrokerId = leaderBrokerId;
}
public Integer getPreferredBrokerId() {
return preferredBrokerId;
}
public void setPreferredBrokerId(Integer preferredBrokerId) {
this.preferredBrokerId = preferredBrokerId;
}
public Integer getLeaderEpoch() {
return leaderEpoch;
}
public void setLeaderEpoch(Integer leaderEpoch) {
this.leaderEpoch = leaderEpoch;
}
public List<Integer> getReplicasBroker() {
return replicasBroker;
}
public void setReplicasBroker(List<Integer> replicasBroker) {
this.replicasBroker = replicasBroker;
}
public List<Integer> getIsr() {
return isr;
}
public void setIsr(List<Integer> isr) {
this.isr = isr;
}
public boolean isUnderReplicated() {
return underReplicated;
}
public void setUnderReplicated(boolean underReplicated) {
this.underReplicated = underReplicated;
}
@Override
public String toString() {
return "TopicPartitionDTO{" +
"partitionId=" + partitionId +
", offset=" + offset +
", leaderBrokerId=" + leaderBrokerId +
", preferredBrokerId=" + preferredBrokerId +
", leaderEpoch=" + leaderEpoch +
", replicasBroker=" + replicasBroker +
", isr=" + isr +
", underReplicated=" + underReplicated +
'}';
}
}

View File

@@ -1,47 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.alarm;
/**
* 告警通知
* @author zengqiao
* @date 2020-02-14
*/
public class AlarmNotifyDTO {
private Long alarmRuleId;
private String actionTag;
private String message;
public Long getAlarmRuleId() {
return alarmRuleId;
}
public void setAlarmRuleId(Long alarmRuleId) {
this.alarmRuleId = alarmRuleId;
}
public String getActionTag() {
return actionTag;
}
public void setActionTag(String actionTag) {
this.actionTag = actionTag;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
@Override
public String toString() {
return "AlarmNotifyDTO{" +
"alarmRuleId=" + alarmRuleId +
", actionTag='" + actionTag + '\'' +
", message='" + message + '\'' +
'}';
}
}

View File

@@ -1,127 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.alarm;
import java.util.Map;
/**
* @author zengqiao
* @date 19/12/16
*/
public class AlarmRuleDTO {
/**
* 告警ID
*/
private Long id;
/**
* 告警名称
*/
private String name;
/**
* 已持续次数
*/
private Integer duration;
/**
* 集群ID, 过滤条件中必有的, 单独拿出来
*/
private Long clusterId;
/**
* 告警策略表达式
*/
private AlarmStrategyExpressionDTO strategyExpression;
/**
* 告警策略过滤条件
*/
private Map<String, String> strategyFilterMap;
/**
* 告警策略Action方式
*/
private Map<String, AlarmStrategyActionDTO> strategyActionMap;
/**
* 修改时间
*/
private Long gmtModify;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Integer getDuration() {
return duration;
}
public void setDuration(Integer duration) {
this.duration = duration;
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public AlarmStrategyExpressionDTO getStrategyExpression() {
return strategyExpression;
}
public void setStrategyExpression(AlarmStrategyExpressionDTO strategyExpression) {
this.strategyExpression = strategyExpression;
}
public Map<String, String> getStrategyFilterMap() {
return strategyFilterMap;
}
public void setStrategyFilterMap(Map<String, String> strategyFilterMap) {
this.strategyFilterMap = strategyFilterMap;
}
public Map<String, AlarmStrategyActionDTO> getStrategyActionMap() {
return strategyActionMap;
}
public void setStrategyActionMap(Map<String, AlarmStrategyActionDTO> strategyActionMap) {
this.strategyActionMap = strategyActionMap;
}
public Long getGmtModify() {
return gmtModify;
}
public void setGmtModify(Long gmtModify) {
this.gmtModify = gmtModify;
}
@Override
public String toString() {
return "AlarmRuleDTO{" +
"id=" + id +
", name='" + name + '\'' +
", duration=" + duration +
", clusterId=" + clusterId +
", strategyExpression=" + strategyExpression +
", strategyFilterMap=" + strategyFilterMap +
", strategyActionMap=" + strategyActionMap +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -1,43 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.alarm;
/**
* @author zengqiao
* @date 19/12/16
*/
public class AlarmStrategyActionDTO {
private String actionWay; // 告知方式: kafka
private String actionTag;
public String getActionWay() {
return actionWay;
}
public void setActionWay(String actionWay) {
this.actionWay = actionWay;
}
public String getActionTag() {
return actionTag;
}
public void setActionTag(String actionTag) {
this.actionTag = actionTag;
}
@Override
public String toString() {
return "AlarmStrategyActionDTO{" +
"actionWay='" + actionWay + '\'' +
", actionTag='" + actionTag + '\'' +
'}';
}
public boolean legal() {
if (actionWay == null
|| actionTag == null) {
return false;
}
return true;
}
}

View File

@@ -1,68 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.alarm;
/**
* 策略表达式
* @author zengqiao
* @date 19/12/16
*/
public class AlarmStrategyExpressionDTO {
private String metric;
private String opt;
private Long threshold;
private Integer duration;
public String getMetric() {
return metric;
}
public void setMetric(String metric) {
this.metric = metric;
}
public String getOpt() {
return opt;
}
public void setOpt(String opt) {
this.opt = opt;
}
public Long getThreshold() {
return threshold;
}
public void setThreshold(Long threshold) {
this.threshold = threshold;
}
public Integer getDuration() {
return duration;
}
public void setDuration(Integer duration) {
this.duration = duration;
}
@Override
public String toString() {
return "AlarmStrategyExpressionModel{" +
"metric='" + metric + '\'' +
", opt='" + opt + '\'' +
", threshold=" + threshold +
", duration=" + duration +
'}';
}
public boolean legal() {
if (metric == null
|| opt == null
|| threshold == null
|| duration == null || duration <= 0) {
return false;
}
return true;
}
}

View File

@@ -1,44 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.alarm;
/**
* 告警过滤条件
* @author zengqiao
* @date 19/12/16
*/
public class AlarmStrategyFilterDTO {
private String key;
private String value;
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
@Override
public String toString() {
return "AlarmStrategyFilterModel{" +
"key='" + key + '\'' +
", value='" + value + '\'' +
'}';
}
public boolean legal() {
if (key == null
|| value == null) {
return false;
}
return true;
}
}

View File

@@ -1,114 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.analysis;
import java.util.List;
/**
* @author zengqiao
* @date 19/12/29
*/
public class AnalysisBrokerDTO {
private Long clusterId;
private Integer brokerId;
private Long baseTime;
private Double bytesIn;
private Double bytesOut;
private Double messagesIn;
private Double totalFetchRequests;
private Double totalProduceRequests;
List<AnalysisTopicDTO> topicAnalysisVOList;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public Long getBaseTime() {
return baseTime;
}
public void setBaseTime(Long baseTime) {
this.baseTime = baseTime;
}
public Double getBytesIn() {
return bytesIn;
}
public void setBytesIn(Double bytesIn) {
this.bytesIn = bytesIn;
}
public Double getBytesOut() {
return bytesOut;
}
public void setBytesOut(Double bytesOut) {
this.bytesOut = bytesOut;
}
public Double getMessagesIn() {
return messagesIn;
}
public void setMessagesIn(Double messagesIn) {
this.messagesIn = messagesIn;
}
public Double getTotalFetchRequests() {
return totalFetchRequests;
}
public void setTotalFetchRequests(Double totalFetchRequests) {
this.totalFetchRequests = totalFetchRequests;
}
public Double getTotalProduceRequests() {
return totalProduceRequests;
}
public void setTotalProduceRequests(Double totalProduceRequests) {
this.totalProduceRequests = totalProduceRequests;
}
public List<AnalysisTopicDTO> getTopicAnalysisVOList() {
return topicAnalysisVOList;
}
public void setTopicAnalysisVOList(List<AnalysisTopicDTO> topicAnalysisVOList) {
this.topicAnalysisVOList = topicAnalysisVOList;
}
@Override
public String toString() {
return "AnalysisBrokerDTO{" +
"clusterId=" + clusterId +
", brokerId=" + brokerId +
", baseTime=" + baseTime +
", bytesIn=" + bytesIn +
", bytesOut=" + bytesOut +
", messagesIn=" + messagesIn +
", totalFetchRequests=" + totalFetchRequests +
", totalProduceRequests=" + totalProduceRequests +
", topicAnalysisVOList=" + topicAnalysisVOList +
'}';
}
}

View File

@@ -1,134 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.analysis;
/**
* @author zengqiao
* @date 19/12/29
*/
public class AnalysisTopicDTO {
private String topicName;
private Double bytesIn;
private Double bytesInRate;
private Double bytesOut;
private Double bytesOutRate;
private Double messagesIn;
private Double messagesInRate;
private Double totalFetchRequests;
private Double totalFetchRequestsRate;
private Double totalProduceRequests;
private Double totalProduceRequestsRate;
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Double getBytesIn() {
return bytesIn;
}
public void setBytesIn(Double bytesIn) {
this.bytesIn = bytesIn;
}
public Double getBytesInRate() {
return bytesInRate;
}
public void setBytesInRate(Double bytesInRate) {
this.bytesInRate = bytesInRate;
}
public Double getBytesOut() {
return bytesOut;
}
public void setBytesOut(Double bytesOut) {
this.bytesOut = bytesOut;
}
public Double getBytesOutRate() {
return bytesOutRate;
}
public void setBytesOutRate(Double bytesOutRate) {
this.bytesOutRate = bytesOutRate;
}
public Double getMessagesIn() {
return messagesIn;
}
public void setMessagesIn(Double messagesIn) {
this.messagesIn = messagesIn;
}
public Double getMessagesInRate() {
return messagesInRate;
}
public void setMessagesInRate(Double messagesInRate) {
this.messagesInRate = messagesInRate;
}
public Double getTotalFetchRequests() {
return totalFetchRequests;
}
public void setTotalFetchRequests(Double totalFetchRequests) {
this.totalFetchRequests = totalFetchRequests;
}
public Double getTotalFetchRequestsRate() {
return totalFetchRequestsRate;
}
public void setTotalFetchRequestsRate(Double totalFetchRequestsRate) {
this.totalFetchRequestsRate = totalFetchRequestsRate;
}
public Double getTotalProduceRequests() {
return totalProduceRequests;
}
public void setTotalProduceRequests(Double totalProduceRequests) {
this.totalProduceRequests = totalProduceRequests;
}
public Double getTotalProduceRequestsRate() {
return totalProduceRequestsRate;
}
public void setTotalProduceRequestsRate(Double totalProduceRequestsRate) {
this.totalProduceRequestsRate = totalProduceRequestsRate;
}
@Override
public String toString() {
return "AnalysisTopicDTO{" +
"topicName='" + topicName + '\'' +
", bytesIn=" + bytesIn +
", bytesInRate=" + bytesInRate +
", bytesOut=" + bytesOut +
", bytesOutRate=" + bytesOutRate +
", messagesIn=" + messagesIn +
", messagesInRate=" + messagesInRate +
", totalFetchRequests=" + totalFetchRequests +
", totalFetchRequestsRate=" + totalFetchRequestsRate +
", totalProduceRequests=" + totalProduceRequests +
", totalProduceRequestsRate=" + totalProduceRequestsRate +
'}';
}
}

View File

@@ -1,57 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.consumer;
/**
* @author zengqiao
* @date 20/1/9
*/
public class ConsumeDetailDTO {
private Integer partitionId;
private Long offset;
private Long consumeOffset;
private String consumerId;
public Integer getPartitionId() {
return partitionId;
}
public void setPartitionId(Integer partitionId) {
this.partitionId = partitionId;
}
public Long getOffset() {
return offset;
}
public void setOffset(Long offset) {
this.offset = offset;
}
public Long getConsumeOffset() {
return consumeOffset;
}
public void setConsumeOffset(Long consumeOffset) {
this.consumeOffset = consumeOffset;
}
public String getConsumerId() {
return consumerId;
}
public void setConsumerId(String consumerId) {
this.consumerId = consumerId;
}
@Override
public String toString() {
return "ConsumeDetailDTO{" +
"partitionId=" + partitionId +
", offset=" + offset +
", consumeOffset=" + consumeOffset +
", consumerId='" + consumerId + '\'' +
'}';
}
}

View File

@@ -1,61 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.consumer;
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.PartitionState;
import java.util.List;
import java.util.Map;
/**
* Consumer实体类
* @author tukun
* @date 2015/11/12
*/
public class ConsumerDTO {
/**
* 消费group名
*/
private String consumerGroup;
/**
* 消费类型一般为static
*/
private String location;
/**
* 订阅的每个topic的partition状态列表
*/
private Map<String, List<PartitionState>> topicPartitionMap;
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
public String getLocation() {
return location;
}
public void setLocation(String location) {
this.location = location;
}
public Map<String, List<PartitionState>> getTopicPartitionMap() {
return topicPartitionMap;
}
public void setTopicPartitionMap(Map<String, List<PartitionState>> topicPartitionMap) {
this.topicPartitionMap = topicPartitionMap;
}
@Override
public String toString() {
return "Consumer{" +
"consumerGroup='" + consumerGroup + '\'' +
", location='" + location + '\'' +
", topicPartitionMap=" + topicPartitionMap +
'}';
}
}

View File

@@ -1,76 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.consumer;
import com.xiaojukeji.kafka.manager.common.constant.OffsetStoreLocation;
import java.util.Objects;
/**
* 消费组信息
* @author zengqiao
* @date 19/4/18
*/
public class ConsumerGroupDTO {
private Long clusterId;
private String consumerGroup;
private OffsetStoreLocation offsetStoreLocation;
public ConsumerGroupDTO(Long clusterId, String consumerGroup, OffsetStoreLocation offsetStoreLocation) {
this.clusterId = clusterId;
this.consumerGroup = consumerGroup;
this.offsetStoreLocation = offsetStoreLocation;
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
public OffsetStoreLocation getOffsetStoreLocation() {
return offsetStoreLocation;
}
public void setOffsetStoreLocation(OffsetStoreLocation offsetStoreLocation) {
this.offsetStoreLocation = offsetStoreLocation;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ConsumerGroupDTO that = (ConsumerGroupDTO) o;
return clusterId.equals(that.clusterId)
&& consumerGroup.equals(that.consumerGroup)
&& offsetStoreLocation == that.offsetStoreLocation;
}
@Override
public int hashCode() {
return Objects.hash(clusterId, consumerGroup, offsetStoreLocation);
}
@Override
public String toString() {
return "ConsumerGroupDTO{" +
"clusterId=" + clusterId +
", consumerGroup='" + consumerGroup + '\'' +
", offsetStoreLocation=" + offsetStoreLocation +
'}';
}
}

View File

@@ -1,394 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.metrics;
import com.xiaojukeji.kafka.manager.common.constant.MetricsType;
import com.xiaojukeji.kafka.manager.common.entity.annotations.FieldSelector;
import com.xiaojukeji.kafka.manager.common.entity.po.BaseEntryDO;
/**
* @author zengqiao
* @date 19/11/25
*/
public class BaseMetrics extends BaseEntryDO {
/**
* 每秒流入的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_TO_DB_METRICS,
MetricsType.BROKER_REAL_TIME_METRICS,
MetricsType.BROKER_OVER_VIEW_METRICS,
MetricsType.BROKER_ANALYSIS_METRICS,
MetricsType.BROKER_TOPIC_ANALYSIS_METRICS,
MetricsType.TOPIC_FLOW_DETAIL,
MetricsType.TOPIC_FLOW_OVERVIEW,
MetricsType.TOPIC_METRICS_TO_DB
})
protected Double bytesInPerSec = 0.0;
protected Double bytesInPerSecMeanRate = 0.0;
protected Double bytesInPerSecFiveMinuteRate = 0.0;
protected Double bytesInPerSecFifteenMinuteRate = 0.0;
/**
* 每秒流出的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_TO_DB_METRICS,
MetricsType.BROKER_REAL_TIME_METRICS,
MetricsType.BROKER_OVER_VIEW_METRICS,
MetricsType.BROKER_ANALYSIS_METRICS,
MetricsType.BROKER_TOPIC_ANALYSIS_METRICS,
MetricsType.TOPIC_FLOW_DETAIL,
MetricsType.TOPIC_METRICS_TO_DB
})
protected Double bytesOutPerSec = 0.0;
protected Double bytesOutPerSecMeanRate = 0.0;
protected Double bytesOutPerSecFiveMinuteRate = 0.0;
protected Double bytesOutPerSecFifteenMinuteRate = 0.0;
/**
* 每秒流入的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_TO_DB_METRICS,
MetricsType.BROKER_REAL_TIME_METRICS,
MetricsType.BROKER_ANALYSIS_METRICS,
MetricsType.BROKER_TOPIC_ANALYSIS_METRICS,
MetricsType.TOPIC_FLOW_DETAIL,
MetricsType.TOPIC_METRICS_TO_DB
})
protected Double messagesInPerSec = 0.0;
protected Double messagesInPerSecMeanRate = 0.0;
protected Double messagesInPerSecFiveMinuteRate = 0.0;
protected Double messagesInPerSecFifteenMinuteRate = 0.0;
/**
* 每秒拒绝的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_TO_DB_METRICS,
MetricsType.BROKER_REAL_TIME_METRICS,
MetricsType.TOPIC_FLOW_DETAIL,
MetricsType.TOPIC_METRICS_TO_DB
})
protected Double bytesRejectedPerSec = 0.0;
protected Double bytesRejectedPerSecMeanRate = 0.0;
protected Double bytesRejectedPerSecFiveMinuteRate = 0.0;
protected Double bytesRejectedPerSecFifteenMinuteRate = 0.0;
/**
* 每秒失败的Produce请求数的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_TO_DB_METRICS,
MetricsType.BROKER_REAL_TIME_METRICS,
MetricsType.TOPIC_FLOW_DETAIL
})
protected Double failProduceRequestPerSec = 0.0;
protected Double failProduceRequestPerSecMeanRate = 0.0;
protected Double failProduceRequestPerSecFiveMinuteRate = 0.0;
protected Double failProduceRequestPerSecFifteenMinuteRate = 0.0;
/**
* 每秒失败的Fetch请求数的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_TO_DB_METRICS,
MetricsType.BROKER_REAL_TIME_METRICS,
MetricsType.TOPIC_FLOW_DETAIL
})
protected Double failFetchRequestPerSec = 0.0;
protected Double failFetchRequestPerSecMeanRate = 0.0;
protected Double failFetchRequestPerSecFiveMinuteRate = 0.0;
protected Double failFetchRequestPerSecFifteenMinuteRate = 0.0;
/**
* 每秒总Produce请求数的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_ANALYSIS_METRICS,
MetricsType.BROKER_TOPIC_ANALYSIS_METRICS,
MetricsType.TOPIC_FLOW_DETAIL,
MetricsType.TOPIC_METRICS_TO_DB,
MetricsType.TOPIC_FLOW_OVERVIEW
})
protected Double totalProduceRequestsPerSec = 0.0;
protected Double totalProduceRequestsPerSecMeanRate = 0.0;
protected Double totalProduceRequestsPerSecFiveMinuteRate = 0.0;
protected Double totalProduceRequestsPerSecFifteenMinuteRate = 0.0;
/**
* 每秒总Fetch请求数的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_ANALYSIS_METRICS,
MetricsType.BROKER_TOPIC_ANALYSIS_METRICS,
MetricsType.TOPIC_FLOW_DETAIL
})
protected Double totalFetchRequestsPerSec = 0.0;
protected Double totalFetchRequestsPerSecMeanRate = 0.0;
protected Double totalFetchRequestsPerSecFiveMinuteRate = 0.0;
protected Double totalFetchRequestsPerSecFifteenMinuteRate = 0.0;
public Double getBytesInPerSec() {
return bytesInPerSec;
}
public void setBytesInPerSec(Double bytesInPerSec) {
this.bytesInPerSec = bytesInPerSec;
}
public Double getBytesInPerSecMeanRate() {
return bytesInPerSecMeanRate;
}
public void setBytesInPerSecMeanRate(Double bytesInPerSecMeanRate) {
this.bytesInPerSecMeanRate = bytesInPerSecMeanRate;
}
public Double getBytesInPerSecFiveMinuteRate() {
return bytesInPerSecFiveMinuteRate;
}
public void setBytesInPerSecFiveMinuteRate(Double bytesInPerSecFiveMinuteRate) {
this.bytesInPerSecFiveMinuteRate = bytesInPerSecFiveMinuteRate;
}
public Double getBytesInPerSecFifteenMinuteRate() {
return bytesInPerSecFifteenMinuteRate;
}
public void setBytesInPerSecFifteenMinuteRate(Double bytesInPerSecFifteenMinuteRate) {
this.bytesInPerSecFifteenMinuteRate = bytesInPerSecFifteenMinuteRate;
}
public Double getBytesOutPerSec() {
return bytesOutPerSec;
}
public void setBytesOutPerSec(Double bytesOutPerSec) {
this.bytesOutPerSec = bytesOutPerSec;
}
public Double getBytesOutPerSecMeanRate() {
return bytesOutPerSecMeanRate;
}
public void setBytesOutPerSecMeanRate(Double bytesOutPerSecMeanRate) {
this.bytesOutPerSecMeanRate = bytesOutPerSecMeanRate;
}
public Double getBytesOutPerSecFiveMinuteRate() {
return bytesOutPerSecFiveMinuteRate;
}
public void setBytesOutPerSecFiveMinuteRate(Double bytesOutPerSecFiveMinuteRate) {
this.bytesOutPerSecFiveMinuteRate = bytesOutPerSecFiveMinuteRate;
}
public Double getBytesOutPerSecFifteenMinuteRate() {
return bytesOutPerSecFifteenMinuteRate;
}
public void setBytesOutPerSecFifteenMinuteRate(Double bytesOutPerSecFifteenMinuteRate) {
this.bytesOutPerSecFifteenMinuteRate = bytesOutPerSecFifteenMinuteRate;
}
public Double getMessagesInPerSec() {
return messagesInPerSec;
}
public void setMessagesInPerSec(Double messagesInPerSec) {
this.messagesInPerSec = messagesInPerSec;
}
public Double getMessagesInPerSecMeanRate() {
return messagesInPerSecMeanRate;
}
public void setMessagesInPerSecMeanRate(Double messagesInPerSecMeanRate) {
this.messagesInPerSecMeanRate = messagesInPerSecMeanRate;
}
public Double getMessagesInPerSecFiveMinuteRate() {
return messagesInPerSecFiveMinuteRate;
}
public void setMessagesInPerSecFiveMinuteRate(Double messagesInPerSecFiveMinuteRate) {
this.messagesInPerSecFiveMinuteRate = messagesInPerSecFiveMinuteRate;
}
public Double getMessagesInPerSecFifteenMinuteRate() {
return messagesInPerSecFifteenMinuteRate;
}
public void setMessagesInPerSecFifteenMinuteRate(Double messagesInPerSecFifteenMinuteRate) {
this.messagesInPerSecFifteenMinuteRate = messagesInPerSecFifteenMinuteRate;
}
public Double getBytesRejectedPerSec() {
return bytesRejectedPerSec;
}
public void setBytesRejectedPerSec(Double bytesRejectedPerSec) {
this.bytesRejectedPerSec = bytesRejectedPerSec;
}
public Double getBytesRejectedPerSecMeanRate() {
return bytesRejectedPerSecMeanRate;
}
public void setBytesRejectedPerSecMeanRate(Double bytesRejectedPerSecMeanRate) {
this.bytesRejectedPerSecMeanRate = bytesRejectedPerSecMeanRate;
}
public Double getBytesRejectedPerSecFiveMinuteRate() {
return bytesRejectedPerSecFiveMinuteRate;
}
public void setBytesRejectedPerSecFiveMinuteRate(Double bytesRejectedPerSecFiveMinuteRate) {
this.bytesRejectedPerSecFiveMinuteRate = bytesRejectedPerSecFiveMinuteRate;
}
public Double getBytesRejectedPerSecFifteenMinuteRate() {
return bytesRejectedPerSecFifteenMinuteRate;
}
public void setBytesRejectedPerSecFifteenMinuteRate(Double bytesRejectedPerSecFifteenMinuteRate) {
this.bytesRejectedPerSecFifteenMinuteRate = bytesRejectedPerSecFifteenMinuteRate;
}
public Double getFailProduceRequestPerSec() {
return failProduceRequestPerSec;
}
public void setFailProduceRequestPerSec(Double failProduceRequestPerSec) {
this.failProduceRequestPerSec = failProduceRequestPerSec;
}
public Double getFailProduceRequestPerSecMeanRate() {
return failProduceRequestPerSecMeanRate;
}
public void setFailProduceRequestPerSecMeanRate(Double failProduceRequestPerSecMeanRate) {
this.failProduceRequestPerSecMeanRate = failProduceRequestPerSecMeanRate;
}
public Double getFailProduceRequestPerSecFiveMinuteRate() {
return failProduceRequestPerSecFiveMinuteRate;
}
public void setFailProduceRequestPerSecFiveMinuteRate(Double failProduceRequestPerSecFiveMinuteRate) {
this.failProduceRequestPerSecFiveMinuteRate = failProduceRequestPerSecFiveMinuteRate;
}
public Double getFailProduceRequestPerSecFifteenMinuteRate() {
return failProduceRequestPerSecFifteenMinuteRate;
}
public void setFailProduceRequestPerSecFifteenMinuteRate(Double failProduceRequestPerSecFifteenMinuteRate) {
this.failProduceRequestPerSecFifteenMinuteRate = failProduceRequestPerSecFifteenMinuteRate;
}
public Double getFailFetchRequestPerSec() {
return failFetchRequestPerSec;
}
public void setFailFetchRequestPerSec(Double failFetchRequestPerSec) {
this.failFetchRequestPerSec = failFetchRequestPerSec;
}
public Double getFailFetchRequestPerSecMeanRate() {
return failFetchRequestPerSecMeanRate;
}
public void setFailFetchRequestPerSecMeanRate(Double failFetchRequestPerSecMeanRate) {
this.failFetchRequestPerSecMeanRate = failFetchRequestPerSecMeanRate;
}
public Double getFailFetchRequestPerSecFiveMinuteRate() {
return failFetchRequestPerSecFiveMinuteRate;
}
public void setFailFetchRequestPerSecFiveMinuteRate(Double failFetchRequestPerSecFiveMinuteRate) {
this.failFetchRequestPerSecFiveMinuteRate = failFetchRequestPerSecFiveMinuteRate;
}
public Double getFailFetchRequestPerSecFifteenMinuteRate() {
return failFetchRequestPerSecFifteenMinuteRate;
}
public void setFailFetchRequestPerSecFifteenMinuteRate(Double failFetchRequestPerSecFifteenMinuteRate) {
this.failFetchRequestPerSecFifteenMinuteRate = failFetchRequestPerSecFifteenMinuteRate;
}
public Double getTotalProduceRequestsPerSec() {
return totalProduceRequestsPerSec;
}
public void setTotalProduceRequestsPerSec(Double totalProduceRequestsPerSec) {
this.totalProduceRequestsPerSec = totalProduceRequestsPerSec;
}
public Double getTotalProduceRequestsPerSecMeanRate() {
return totalProduceRequestsPerSecMeanRate;
}
public void setTotalProduceRequestsPerSecMeanRate(Double totalProduceRequestsPerSecMeanRate) {
this.totalProduceRequestsPerSecMeanRate = totalProduceRequestsPerSecMeanRate;
}
public Double getTotalProduceRequestsPerSecFiveMinuteRate() {
return totalProduceRequestsPerSecFiveMinuteRate;
}
public void setTotalProduceRequestsPerSecFiveMinuteRate(Double totalProduceRequestsPerSecFiveMinuteRate) {
this.totalProduceRequestsPerSecFiveMinuteRate = totalProduceRequestsPerSecFiveMinuteRate;
}
public Double getTotalProduceRequestsPerSecFifteenMinuteRate() {
return totalProduceRequestsPerSecFifteenMinuteRate;
}
public void setTotalProduceRequestsPerSecFifteenMinuteRate(Double totalProduceRequestsPerSecFifteenMinuteRate) {
this.totalProduceRequestsPerSecFifteenMinuteRate = totalProduceRequestsPerSecFifteenMinuteRate;
}
public Double getTotalFetchRequestsPerSec() {
return totalFetchRequestsPerSec;
}
public void setTotalFetchRequestsPerSec(Double totalFetchRequestsPerSec) {
this.totalFetchRequestsPerSec = totalFetchRequestsPerSec;
}
public Double getTotalFetchRequestsPerSecMeanRate() {
return totalFetchRequestsPerSecMeanRate;
}
public void setTotalFetchRequestsPerSecMeanRate(Double totalFetchRequestsPerSecMeanRate) {
this.totalFetchRequestsPerSecMeanRate = totalFetchRequestsPerSecMeanRate;
}
public Double getTotalFetchRequestsPerSecFiveMinuteRate() {
return totalFetchRequestsPerSecFiveMinuteRate;
}
public void setTotalFetchRequestsPerSecFiveMinuteRate(Double totalFetchRequestsPerSecFiveMinuteRate) {
this.totalFetchRequestsPerSecFiveMinuteRate = totalFetchRequestsPerSecFiveMinuteRate;
}
public Double getTotalFetchRequestsPerSecFifteenMinuteRate() {
return totalFetchRequestsPerSecFifteenMinuteRate;
}
public void setTotalFetchRequestsPerSecFifteenMinuteRate(Double totalFetchRequestsPerSecFifteenMinuteRate) {
this.totalFetchRequestsPerSecFifteenMinuteRate = totalFetchRequestsPerSecFifteenMinuteRate;
}
}

View File

@@ -1,331 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.metrics;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.constant.MetricsType;
import com.xiaojukeji.kafka.manager.common.entity.annotations.FieldSelector;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.List;
/**
* 需要定时拉取的broker数据
* @author tukun
* @date 2015/11/6.
*/
public class BrokerMetrics extends BaseMetrics {
/**
* 集群ID
*/
private Long clusterId;
/**
* Topic名称
*/
private Integer brokerId;
/**
* 每秒Produce请求数的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_TO_DB_METRICS,
MetricsType.BROKER_REAL_TIME_METRICS
})
private Double produceRequestPerSec = 0.0;
private Double produceRequestPerSecMeanRate = 0.0;
private Double produceRequestPerSecFiveMinuteRate = 0.0;
private Double produceRequestPerSecFifteenMinuteRate = 0.0;
/**
* 每秒Fetch请求数的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_TO_DB_METRICS,
MetricsType.BROKER_REAL_TIME_METRICS
})
private Double fetchConsumerRequestPerSec = 0.0;
private Double fetchConsumerRequestPerSecMeanRate = 0.0;
private Double fetchConsumerRequestPerSecFiveMinuteRate = 0.0;
private Double fetchConsumerRequestPerSecFifteenMinuteRate = 0.0;
/**
* Broker分区数量
*/
@FieldSelector(types = {MetricsType.BROKER_OVER_ALL_METRICS, 5})
private int partitionCount;
/**
* Broker已同步分区数量
*/
@FieldSelector(types = {MetricsType.BROKER_OVER_ALL_METRICS})
private int underReplicatedPartitions;
/**
* Broker Leader的数量
*/
@FieldSelector(types = {MetricsType.BROKER_OVER_ALL_METRICS, 5})
private int leaderCount;
/**
* Broker请求处理器空闲百分比
*/
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
private Double requestHandlerAvgIdlePercent = 0.0;
/**
* 网络处理器空闲百分比
*/
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
private Double networkProcessorAvgIdlePercent = 0.0;
/**
* 请求列表大小
*/
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
private Integer requestQueueSize = 0;
/**
* 响应列表大小
*/
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
private Integer responseQueueSize = 0;
/**
* 刷日志时间
*/
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
private Double logFlushRateAndTimeMs = 0.0;
/**
* produce请求总时间-平均值
*/
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
private Double totalTimeProduceMean = 0.0;
/**
* produce请求总时间-99th
*/
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
private Double totalTimeProduce99Th = 0.0;
/**
* fetch consumer请求总时间-平均值
*/
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
private Double totalTimeFetchConsumerMean = 0.0;
/**
* fetch consumer请求总时间-99th
*/
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
private Double totalTimeFetchConsumer99Th = 0.0;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public Double getProduceRequestPerSec() {
return produceRequestPerSec;
}
public void setProduceRequestPerSec(Double produceRequestPerSec) {
this.produceRequestPerSec = produceRequestPerSec;
}
public Double getProduceRequestPerSecMeanRate() {
return produceRequestPerSecMeanRate;
}
public void setProduceRequestPerSecMeanRate(Double produceRequestPerSecMeanRate) {
this.produceRequestPerSecMeanRate = produceRequestPerSecMeanRate;
}
public Double getProduceRequestPerSecFiveMinuteRate() {
return produceRequestPerSecFiveMinuteRate;
}
public void setProduceRequestPerSecFiveMinuteRate(Double produceRequestPerSecFiveMinuteRate) {
this.produceRequestPerSecFiveMinuteRate = produceRequestPerSecFiveMinuteRate;
}
public Double getProduceRequestPerSecFifteenMinuteRate() {
return produceRequestPerSecFifteenMinuteRate;
}
public void setProduceRequestPerSecFifteenMinuteRate(Double produceRequestPerSecFifteenMinuteRate) {
this.produceRequestPerSecFifteenMinuteRate = produceRequestPerSecFifteenMinuteRate;
}
public Double getFetchConsumerRequestPerSec() {
return fetchConsumerRequestPerSec;
}
public void setFetchConsumerRequestPerSec(Double fetchConsumerRequestPerSec) {
this.fetchConsumerRequestPerSec = fetchConsumerRequestPerSec;
}
public Double getFetchConsumerRequestPerSecMeanRate() {
return fetchConsumerRequestPerSecMeanRate;
}
public void setFetchConsumerRequestPerSecMeanRate(Double fetchConsumerRequestPerSecMeanRate) {
this.fetchConsumerRequestPerSecMeanRate = fetchConsumerRequestPerSecMeanRate;
}
public Double getFetchConsumerRequestPerSecFiveMinuteRate() {
return fetchConsumerRequestPerSecFiveMinuteRate;
}
public void setFetchConsumerRequestPerSecFiveMinuteRate(Double fetchConsumerRequestPerSecFiveMinuteRate) {
this.fetchConsumerRequestPerSecFiveMinuteRate = fetchConsumerRequestPerSecFiveMinuteRate;
}
public Double getFetchConsumerRequestPerSecFifteenMinuteRate() {
return fetchConsumerRequestPerSecFifteenMinuteRate;
}
public void setFetchConsumerRequestPerSecFifteenMinuteRate(Double fetchConsumerRequestPerSecFifteenMinuteRate) {
this.fetchConsumerRequestPerSecFifteenMinuteRate = fetchConsumerRequestPerSecFifteenMinuteRate;
}
public int getPartitionCount() {
return partitionCount;
}
public void setPartitionCount(int partitionCount) {
this.partitionCount = partitionCount;
}
public int getUnderReplicatedPartitions() {
return underReplicatedPartitions;
}
public void setUnderReplicatedPartitions(int underReplicatedPartitions) {
this.underReplicatedPartitions = underReplicatedPartitions;
}
public int getLeaderCount() {
return leaderCount;
}
public void setLeaderCount(int leaderCount) {
this.leaderCount = leaderCount;
}
public Double getRequestHandlerAvgIdlePercent() {
return requestHandlerAvgIdlePercent;
}
public void setRequestHandlerAvgIdlePercent(Double requestHandlerAvgIdlePercent) {
this.requestHandlerAvgIdlePercent = requestHandlerAvgIdlePercent;
}
public Double getNetworkProcessorAvgIdlePercent() {
return networkProcessorAvgIdlePercent;
}
public void setNetworkProcessorAvgIdlePercent(Double networkProcessorAvgIdlePercent) {
this.networkProcessorAvgIdlePercent = networkProcessorAvgIdlePercent;
}
public Integer getRequestQueueSize() {
return requestQueueSize;
}
public void setRequestQueueSize(Integer requestQueueSize) {
this.requestQueueSize = requestQueueSize;
}
public Integer getResponseQueueSize() {
return responseQueueSize;
}
public void setResponseQueueSize(Integer responseQueueSize) {
this.responseQueueSize = responseQueueSize;
}
public Double getLogFlushRateAndTimeMs() {
return logFlushRateAndTimeMs;
}
public void setLogFlushRateAndTimeMs(Double logFlushRateAndTimeMs) {
this.logFlushRateAndTimeMs = logFlushRateAndTimeMs;
}
public Double getTotalTimeProduceMean() {
return totalTimeProduceMean;
}
public void setTotalTimeProduceMean(Double totalTimeProduceMean) {
this.totalTimeProduceMean = totalTimeProduceMean;
}
public Double getTotalTimeProduce99Th() {
return totalTimeProduce99Th;
}
public void setTotalTimeProduce99Th(Double totalTimeProduce99Th) {
this.totalTimeProduce99Th = totalTimeProduce99Th;
}
public Double getTotalTimeFetchConsumerMean() {
return totalTimeFetchConsumerMean;
}
public void setTotalTimeFetchConsumerMean(Double totalTimeFetchConsumerMean) {
this.totalTimeFetchConsumerMean = totalTimeFetchConsumerMean;
}
public Double getTotalTimeFetchConsumer99Th() {
return totalTimeFetchConsumer99Th;
}
public void setTotalTimeFetchConsumer99Th(Double totalTimeFetchConsumer99Th) {
this.totalTimeFetchConsumer99Th = totalTimeFetchConsumer99Th;
}
private static void initialization(Field[] fields){
for(Field field : fields){
FieldSelector annotation = field.getAnnotation(FieldSelector.class);
if(annotation ==null){
continue;
}
String fieldName;
if("".equals(annotation.name())) {
fieldName = field.getName().substring(0,1).toUpperCase() + field.getName().substring(1);
} else{
fieldName = annotation.name();
}
for(int type: annotation.types()){
List<String> list = Constant.BROKER_METRICS_TYPE_MBEAN_NAME_MAP.getOrDefault(type, new ArrayList<>());
list.add(fieldName);
Constant.BROKER_METRICS_TYPE_MBEAN_NAME_MAP.put(type, list);
}
}
}
public static List<String> getFieldNameList(int metricsType){
synchronized (BrokerMetrics.class) {
if (Constant.BROKER_METRICS_TYPE_MBEAN_NAME_MAP.isEmpty()) {
initialization(BrokerMetrics.class.getDeclaredFields());
initialization(BaseMetrics.class.getDeclaredFields());
}
}
return Constant.BROKER_METRICS_TYPE_MBEAN_NAME_MAP.getOrDefault(metricsType, new ArrayList<>());
}
}

View File

@@ -1,68 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.metrics;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.entity.annotations.FieldSelector;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.List;
public class TopicMetrics extends BaseMetrics {
/**
* 集群ID
*/
private Long clusterId;
/**
* Topic名称
*/
private String topicName;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
private static void initialization(Field[] fields){
for(Field field : fields){
FieldSelector annotation = field.getAnnotation(FieldSelector.class);
if(annotation ==null){
continue;
}
String fieldName;
if("".equals(annotation.name())){
String name = field.getName();
fieldName = name.substring(0,1).toUpperCase()+name.substring(1);
}else{
fieldName = annotation.name();
}
for(int type: annotation.types()){
List<String> list = Constant.TOPIC_METRICS_TYPE_MBEAN_NAME_MAP.getOrDefault(type, new ArrayList<>());
list.add(fieldName);
Constant.TOPIC_METRICS_TYPE_MBEAN_NAME_MAP.put(type, list);
}
}
}
public static List<String> getFieldNameList(int type){
synchronized (TopicMetrics.class) {
if (Constant.TOPIC_METRICS_TYPE_MBEAN_NAME_MAP.isEmpty()) {
initialization(TopicMetrics.class.getDeclaredFields());
initialization(BaseMetrics.class.getDeclaredFields());
}
}
return Constant.TOPIC_METRICS_TYPE_MBEAN_NAME_MAP.get(type);
}
}

View File

@@ -1,50 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
/**
* @author zengqiao
* @date 19/5/3
*/
public class AccountDO extends BaseDO {
private String username;
private String password;
private Integer role;
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public Integer getRole() {
return role;
}
public void setRole(Integer role) {
this.role = role;
}
@Override
public String toString() {
return "AccountDO{" +
"username='" + username + '\'' +
", password='" + password + '\'' +
", role=" + role +
", id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -1,68 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
public class AlarmRuleDO extends BaseDO {
private String alarmName;
private String strategyExpressions;
private String strategyFilters;
private String strategyActions;
private String principals;
public String getAlarmName() {
return alarmName;
}
public void setAlarmName(String alarmName) {
this.alarmName = alarmName;
}
public String getStrategyExpressions() {
return strategyExpressions;
}
public void setStrategyExpressions(String strategyExpressions) {
this.strategyExpressions = strategyExpressions;
}
public String getStrategyFilters() {
return strategyFilters;
}
public void setStrategyFilters(String strategyFilters) {
this.strategyFilters = strategyFilters;
}
public String getStrategyActions() {
return strategyActions;
}
public void setStrategyActions(String strategyActions) {
this.strategyActions = strategyActions;
}
public String getPrincipals() {
return principals;
}
public void setPrincipals(String principals) {
this.principals = principals;
}
@Override
public String toString() {
return "AlarmRuleDO{" +
"alarmName='" + alarmName + '\'' +
", strategyExpressions='" + strategyExpressions + '\'' +
", strategyFilters='" + strategyFilters + '\'' +
", strategyActions='" + strategyActions + '\'' +
", principals='" + principals + '\'' +
", id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -1,59 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
import java.util.Date;
/**
* @author arthur
* @date 2017/7/25.
*/
public class BaseDO {
protected Long id;
protected Integer status;
protected Date gmtCreate;
protected Date gmtModify;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
public Date getGmtCreate() {
return gmtCreate;
}
public void setGmtCreate(Date gmtCreate) {
this.gmtCreate = gmtCreate;
}
public Date getGmtModify() {
return gmtModify;
}
public void setGmtModify(Date gmtModify) {
this.gmtModify = gmtModify;
}
@Override
public String toString() {
return "BaseDO{" +
"id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -1,37 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
import java.util.Date;
/**
* @author zengqiao
* @date 19/11/25
*/
public abstract class BaseEntryDO {
protected Long id;
protected Date gmtCreate;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public Date getGmtCreate() {
return gmtCreate;
}
public void setGmtCreate(Date gmtCreate) {
this.gmtCreate = gmtCreate;
}
@Override
public String toString() {
return "BaseEntryDO{" +
"id=" + id +
", gmtCreate=" + gmtCreate +
'}';
}
}

View File

@@ -1,72 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
/**
* @author zengqiao
* @date 19/4/3
*/
public class BrokerDO extends BaseDO {
private Long clusterId;
private Integer brokerId;
private String host;
private Integer port;
private Long timestamp;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Integer getPort() {
return port;
}
public void setPort(Integer port) {
this.port = port;
}
public Long getTimestamp() {
return timestamp;
}
public void setTimestamp(Long timestamp) {
this.timestamp = timestamp;
}
@Override
public String toString() {
return "BrokerDO{" +
"clusterId=" + clusterId +
", brokerId=" + brokerId +
", host='" + host + '\'' +
", port=" + port +
", timestamp=" + timestamp +
", id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -1,127 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
import java.util.Date;
public class ClusterDO extends BaseDO{
private String clusterName;
private String zookeeper;
private String bootstrapServers;
private String kafkaVersion;
private Integer alarmFlag;
private String securityProtocol;
private String saslMechanism;
private String saslJaasConfig;
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
public String getZookeeper() {
return zookeeper;
}
public void setZookeeper(String zookeeper) {
this.zookeeper = zookeeper;
}
public String getBootstrapServers() {
return bootstrapServers;
}
public void setBootstrapServers(String bootstrapServers) {
this.bootstrapServers = bootstrapServers;
}
public String getKafkaVersion() {
return kafkaVersion;
}
public void setKafkaVersion(String kafkaVersion) {
this.kafkaVersion = kafkaVersion;
}
public Integer getAlarmFlag() {
return alarmFlag;
}
public void setAlarmFlag(Integer alarmFlag) {
this.alarmFlag = alarmFlag;
}
public String getSecurityProtocol() {
return securityProtocol;
}
public void setSecurityProtocol(String securityProtocol) {
this.securityProtocol = securityProtocol;
}
public String getSaslMechanism() {
return saslMechanism;
}
public void setSaslMechanism(String saslMechanism) {
this.saslMechanism = saslMechanism;
}
public String getSaslJaasConfig() {
return saslJaasConfig;
}
public void setSaslJaasConfig(String saslJaasConfig) {
this.saslJaasConfig = saslJaasConfig;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
public Date getGmtCreate() {
return gmtCreate;
}
public void setGmtCreate(Date gmtCreate) {
this.gmtCreate = gmtCreate;
}
public Date getGmtModify() {
return gmtModify;
}
public void setGmtModify(Date gmtModify) {
this.gmtModify = gmtModify;
}
@Override
public String toString() {
return "ClusterDO{" +
"clusterName='" + clusterName + '\'' +
", zookeeper='" + zookeeper + '\'' +
", bootstrapServers='" + bootstrapServers + '\'' +
", kafkaVersion='" + kafkaVersion + '\'' +
", alarmFlag=" + alarmFlag +
", securityProtocol='" + securityProtocol + '\'' +
", saslMechanism='" + saslMechanism + '\'' +
", saslJaasConfig='" + saslJaasConfig + '\'' +
", id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -1,110 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
public class ClusterMetricsDO extends BaseEntryDO {
private Long clusterId;
private Integer topicNum = 0;
private Integer partitionNum = 0;
private Integer brokerNum = 0;
private Double bytesInPerSec = 0.0;
private Double bytesOutPerSec = 0.0;
private Double bytesRejectedPerSec = 0.0;
private Double messagesInPerSec = 0.0;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public Integer getTopicNum() {
return topicNum;
}
public void setTopicNum(Integer topicNum) {
this.topicNum = topicNum;
}
public Integer getPartitionNum() {
return partitionNum;
}
public void setPartitionNum(Integer partitionNum) {
this.partitionNum = partitionNum;
}
public Integer getBrokerNum() {
return brokerNum;
}
public void setBrokerNum(Integer brokerNum) {
this.brokerNum = brokerNum;
}
public Double getBytesInPerSec() {
return bytesInPerSec;
}
public void setBytesInPerSec(Double bytesInPerSec) {
this.bytesInPerSec = bytesInPerSec;
}
public Double getBytesOutPerSec() {
return bytesOutPerSec;
}
public void setBytesOutPerSec(Double bytesOutPerSec) {
this.bytesOutPerSec = bytesOutPerSec;
}
public Double getBytesRejectedPerSec() {
return bytesRejectedPerSec;
}
public void setBytesRejectedPerSec(Double bytesRejectedPerSec) {
this.bytesRejectedPerSec = bytesRejectedPerSec;
}
public Double getMessagesInPerSec() {
return messagesInPerSec;
}
public void setMessagesInPerSec(Double messagesInPerSec) {
this.messagesInPerSec = messagesInPerSec;
}
public void addBrokerMetrics(BrokerMetrics brokerMetrics) {
this.clusterId = brokerMetrics.getClusterId();
this.brokerNum += 1;
this.bytesInPerSec += brokerMetrics.getBytesInPerSec();
this.bytesOutPerSec += brokerMetrics.getBytesOutPerSec();
this.bytesRejectedPerSec += brokerMetrics.getBytesRejectedPerSec();
this.messagesInPerSec += brokerMetrics.getMessagesInPerSec();
}
@Override
public String toString() {
return "ClusterMetricsDO{" +
"clusterId=" + clusterId +
", topicNum=" + topicNum +
", partitionNum=" + partitionNum +
", brokerNum=" + brokerNum +
", bytesInPerSec=" + bytesInPerSec +
", bytesOutPerSec=" + bytesOutPerSec +
", bytesRejectedPerSec=" + bytesRejectedPerSec +
", messagesInPerSec=" + messagesInPerSec +
", id=" + id +
", gmtCreate=" + gmtCreate +
'}';
}
}

View File

@@ -1,84 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
/**
* @author zengqiao
* @date 20/2/28
*/
public class ControllerDO extends BaseEntryDO {
private Long clusterId;
private Integer brokerId;
private String host;
private Long timestamp;
private Integer version;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Long getTimestamp() {
return timestamp;
}
public void setTimestamp(Long timestamp) {
this.timestamp = timestamp;
}
public Integer getVersion() {
return version;
}
public void setVersion(Integer version) {
this.version = version;
}
@Override
public String toString() {
return "ControllerDO{" +
"id=" + id +
", clusterId=" + clusterId +
", brokerId=" + brokerId +
", host='" + host + '\'' +
", timestamp=" + timestamp +
", version=" + version +
", gmtCreate=" + gmtCreate +
'}';
}
public static ControllerDO newInstance(Long clusterId,
Integer brokerId,
String host,
Long timestamp,
Integer version) {
ControllerDO controllerDO = new ControllerDO();
controllerDO.setClusterId(clusterId);
controllerDO.setBrokerId(brokerId);
controllerDO.setHost(host == null? "": host);
controllerDO.setTimestamp(timestamp);
controllerDO.setVersion(version);
return controllerDO;
}
}

View File

@@ -1,96 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
/**
* migrate topic task do
* @author zengqiao
* @date 19/4/16
*/
public class MigrationTaskDO extends BaseDO {
private Long clusterId;
private String topicName;
private String reassignmentJson;
private Long throttle;
private String operator;
private String description;
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getOperator() {
return operator;
}
public void setOperator(String operator) {
this.operator = operator;
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public String getReassignmentJson() {
return reassignmentJson;
}
public void setReassignmentJson(String reassignmentJson) {
this.reassignmentJson = reassignmentJson;
}
public Long getThrottle() {
return throttle;
}
public void setThrottle(Long throttle) {
this.throttle = throttle;
}
@Override
public String toString() {
return "MigrationTaskDO{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", reassignmentJson='" + reassignmentJson + '\'' +
", throttle=" + throttle +
", id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
public static MigrationTaskDO createInstance(Long clusterId,
String topicName,
String reassignmentJson,
Long throttle,
String description) {
MigrationTaskDO migrationTaskDO = new MigrationTaskDO();
migrationTaskDO.setClusterId(clusterId);
migrationTaskDO.setTopicName(topicName);
migrationTaskDO.setReassignmentJson(reassignmentJson);
migrationTaskDO.setThrottle(throttle);
migrationTaskDO.setDescription(description);
return migrationTaskDO;
}
}

View File

@@ -1,64 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
public class OperationHistoryDO extends BaseEntryDO {
private Long clusterId;
private String topicName;
private String operator;
private String operation;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public String getOperator() {
return operator;
}
public void setOperator(String operator) {
this.operator = operator;
}
public String getOperation() {
return operation;
}
public void setOperation(String operation) {
this.operation = operation;
}
@Override
public String toString() {
return "OperationHistoryDO{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", operator='" + operator + '\'' +
", operation='" + operation + '\'' +
", id=" + id +
", gmtCreate=" + gmtCreate +
'}';
}
public static OperationHistoryDO newInstance(Long clusterId, String topicName, String operator, String operation) {
OperationHistoryDO operationHistoryDO = new OperationHistoryDO();
operationHistoryDO.setClusterId(clusterId);
operationHistoryDO.setTopicName(topicName);
operationHistoryDO.setOperator(operator);
operationHistoryDO.setOperation(operation);
return operationHistoryDO;
}
}

View File

@@ -1,112 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
public class OrderPartitionDO extends BaseDO{
private Long clusterId;
private String clusterName;
private String topicName;
private String applicant;
private Long peakBytesIn;
private String description;
private Integer orderStatus;
private String approver;
private String opinion;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public String getApplicant() {
return applicant;
}
public void setApplicant(String applicant) {
this.applicant = applicant;
}
public Long getPeakBytesIn() {
return peakBytesIn;
}
public void setPeakBytesIn(Long peakBytesIn) {
this.peakBytesIn = peakBytesIn;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Integer getOrderStatus() {
return orderStatus;
}
public void setOrderStatus(Integer orderStatus) {
this.orderStatus = orderStatus;
}
public String getApprover() {
return approver;
}
public void setApprover(String approver) {
this.approver = approver;
}
public String getOpinion() {
return opinion;
}
public void setOpinion(String opinion) {
this.opinion = opinion;
}
@Override
public String toString() {
return "OrderPartitionDO{" +
"clusterId=" + clusterId +
", clusterName='" + clusterName + '\'' +
", topicName='" + topicName + '\'' +
", applicant='" + applicant + '\'' +
", peakBytesIn=" + peakBytesIn +
", description='" + description + '\'' +
", orderStatus=" + orderStatus +
", approver='" + approver + '\'' +
", opinion='" + opinion + '\'' +
", id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -1,178 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
public class OrderTopicDO extends BaseDO {
private Long clusterId;
private String clusterName;
private String topicName;
private Long retentionTime;
private Integer partitionNum;
private Integer replicaNum;
private String regions;
private String brokers;
private Long peakBytesIn;
private String applicant;
private String principals;
private String description;
private Integer orderStatus;
private String approver;
private String opinion;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Long getRetentionTime() {
return retentionTime;
}
public void setRetentionTime(Long retentionTime) {
this.retentionTime = retentionTime;
}
public Integer getPartitionNum() {
return partitionNum;
}
public void setPartitionNum(Integer partitionNum) {
this.partitionNum = partitionNum;
}
public Integer getReplicaNum() {
return replicaNum;
}
public void setReplicaNum(Integer replicaNum) {
this.replicaNum = replicaNum;
}
public String getRegions() {
return regions;
}
public void setRegions(String regions) {
this.regions = regions;
}
public String getBrokers() {
return brokers;
}
public void setBrokers(String brokers) {
this.brokers = brokers;
}
public Long getPeakBytesIn() {
return peakBytesIn;
}
public void setPeakBytesIn(Long peakBytesIn) {
this.peakBytesIn = peakBytesIn;
}
public String getApplicant() {
return applicant;
}
public void setApplicant(String applicant) {
this.applicant = applicant;
}
public String getPrincipals() {
return principals;
}
public void setPrincipals(String principals) {
this.principals = principals;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Integer getOrderStatus() {
return orderStatus;
}
public void setOrderStatus(Integer orderStatus) {
this.orderStatus = orderStatus;
}
public String getApprover() {
return approver;
}
public void setApprover(String approver) {
this.approver = approver;
}
public String getOpinion() {
return opinion;
}
public void setOpinion(String opinion) {
this.opinion = opinion;
}
@Override
public String toString() {
return "OrderTopicDO{" +
"clusterId=" + clusterId +
", clusterName='" + clusterName + '\'' +
", topicName='" + topicName + '\'' +
", retentionTime=" + retentionTime +
", partitionNum=" + partitionNum +
", replicaNum=" + replicaNum +
", regions='" + regions + '\'' +
", brokers='" + brokers + '\'' +
", peakBytesIn=" + peakBytesIn +
", applicant='" + applicant + '\'' +
", principals='" + principals + '\'' +
", description='" + description + '\'' +
", orderStatus=" + orderStatus +
", approver='" + approver + '\'' +
", opinion='" + opinion + '\'' +
", id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -1,63 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
public class RegionDO extends BaseDO{
private String regionName;
private Long clusterId;
private String brokerList;
private Integer level;
private String description;
private String operator;
public String getRegionName() {
return regionName;
}
public void setRegionName(String regionName) {
this.regionName = regionName;
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getBrokerList() {
return brokerList;
}
public void setBrokerList(String brokerList) {
this.brokerList = brokerList;
}
public Integer getLevel() {
return level;
}
public void setLevel(Integer level) {
this.level = level;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getOperator() {
return operator;
}
public void setOperator(String operator) {
this.operator = operator;
}
}

View File

@@ -1,68 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
public class TopicDO extends BaseDO{
private Long clusterId;
private String topicName;
private String applicant;
private String principals;
private String description;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public String getApplicant() {
return applicant;
}
public void setApplicant(String applicant) {
this.applicant = applicant;
}
public String getPrincipals() {
return principals;
}
public void setPrincipals(String principals) {
this.principals = principals;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Override
public String toString() {
return "TopicDO{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", applicant='" + applicant + '\'' +
", principals='" + principals + '\'' +
", description='" + description + '\'' +
", id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -1,46 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
public class TopicFavoriteDO extends BaseDO{
private String username;
private Long clusterId;
private String topicName;
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
@Override
public String toString() {
return "TopicFavoriteDO{" +
"username='" + username + '\'' +
", clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -1,17 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.po.query;
/**
* @author zengqiao
* @date 19/12/2
*/
public class AlarmRuleQueryOption extends BaseQueryOption {
private String alarmName;
public String getAlarmName() {
return alarmName;
}
public void setAlarmName(String alarmName) {
this.alarmName = alarmName;
}
}

View File

@@ -1,24 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.po.query;
/**
* @author zengqiao
* @date 19/12/2
*/
public class BaseQueryOption {
protected Long id;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
@Override
public String toString() {
return "BaseQueryOption{" +
"id=" + id +
'}';
}
}

View File

@@ -1,17 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.po.query;
/**
* @author zengqiao
* @date 19/12/4
*/
public class ClusterQueryOption extends BaseQueryOption {
private String clusterName;
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
}

View File

@@ -1,132 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
/**
* @author zengqiao
* @date 19/4/3
*
* 存储Broker的元信息, 元信息对应的ZK节点是/brokers/ids/{brokerId}
* 节点结构:
* {
* "listener_security_protocol_map":{"SASL_PLAINTEXT":"SASL_PLAINTEXT"},
* "endpoints":["SASL_PLAINTEXT://10.179.162.202:9093"],
* "jmx_port":9999,
* "host":null,
* "timestamp":"1546632983233",
* "port":-1,
* "version":4
* }
*/
public class BrokerMetadata implements Cloneable {
private final static Logger LOGGER = LoggerFactory.getLogger(TopicMetadata.class);
private long clusterId;
private int brokerId;
private List<String> endpoints;
private String host;
private int port;
//zk上字段对应
private int jmx_port;
private String version;
private long timestamp;
public long getClusterId() {
return clusterId;
}
public void setClusterId(long clusterId) {
this.clusterId = clusterId;
}
public int getBrokerId() {
return brokerId;
}
public void setBrokerId(int brokerId) {
this.brokerId = brokerId;
}
public List<String> getEndpoints() {
return endpoints;
}
public void setEndpoints(List<String> endpoints) {
this.endpoints = endpoints;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
public int getJmxPort() {
return jmx_port;
}
public void setJmxPort(int jmxPort) {
this.jmx_port = jmxPort;
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public long getTimestamp() {
return timestamp;
}
public void setTimestamp(long timestamp) {
this.timestamp = timestamp;
}
@Override
public Object clone() {
try {
return super.clone();
} catch (CloneNotSupportedException var3) {
LOGGER.error("clone BrokerMetadata failed.", var3);
}
return null;
}
@Override
public String toString() {
return "BrokerMetadata{" +
"clusterId=" + clusterId +
", brokerId=" + brokerId +
", endpoints=" + endpoints +
", host='" + host + '\'' +
", port=" + port +
", jmxPort=" + jmx_port +
", version='" + version + '\'' +
", timestamp=" + timestamp +
'}';
}
}

View File

@@ -1,46 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
/**
* @author zengqiao
* @date 19/4/22
*/
public class ControllerData {
private Integer brokerid;
private Integer version;
private Long timestamp;
public Integer getBrokerid() {
return brokerid;
}
public void setBrokerid(Integer brokerid) {
this.brokerid = brokerid;
}
public Integer getVersion() {
return version;
}
public void setVersion(Integer version) {
this.version = version;
}
public Long getTimestamp() {
return timestamp;
}
public void setTimestamp(Long timestamp) {
this.timestamp = timestamp;
}
@Override
public String toString() {
return "ControllerData{" +
"brokerid=" + brokerid +
", version=" + version +
", timestamp=" + timestamp +
'}';
}
}

View File

@@ -1,44 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
import java.io.Serializable;
import java.util.List;
import java.util.Map;
/**
* 根据/brokers/topics/topic的节点内容定义
* @author tukun
* @date 2015/11/10.
*/
public class PartitionMap implements Serializable {
/**
* 版本号
*/
private int version;
/**
* Map<PartitionId副本所在的brokerId列表>
*/
private Map<Integer, List<Integer>> partitions;
public int getVersion() {
return version;
}
public void setVersion(int version) {
this.version = version;
}
public Map<Integer, List<Integer>> getPartitions() {
return partitions;
}
public void setPartitions(Map<Integer, List<Integer>> partitions) {
this.partitions = partitions;
}
@Override
public String toString() {
return "PartitionMap{" + "version=" + version + ", partitions=" + partitions + '}';
}
}

View File

@@ -1,177 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
import java.util.ArrayList;
import java.util.List;
/**
* PartitionState实例
* 对应zookeeper下的state节点信息以及partition的其它信息
* @author tukun
* @date 2015/11/10.
*/
public class PartitionState implements Cloneable {
/**
* partition id
*/
private int partitionId;
/**
* kafka集群中的中央控制器选举次数
*/
private int controller_epoch;
/**
* Partition所属的leader broker编号
*/
private int leader;
/**
* partition的版本号
*/
private int version;
/**
* 该partition leader选举次数
*/
private int leader_epoch;
/**
* 同步副本组brokerId列表
*/
private List<Integer> isr;
/**
* 是否处于复制同步状态
*/
private boolean isUnderReplicated;
/**
* Partition的offset
*/
private long offset;
/**
* 被消费的offset
*/
private long consumeOffset;
/**
* 消费者对应的消费group
*/
private String consumerGroup;
public int getPartitionId() {
return partitionId;
}
public void setPartitionId(int partitionId) {
this.partitionId = partitionId;
}
public int getControllerEpoch() {
return controller_epoch;
}
public void setControllerEpoch(int controllerEpoch) {
this.controller_epoch = controllerEpoch;
}
public int getLeader() {
return leader;
}
public void setLeader(int leader) {
this.leader = leader;
}
public int getVersion() {
return version;
}
public void setVersion(int version) {
this.version = version;
}
public int getLeaderEpoch() {
return leader_epoch;
}
public void setLeaderEpoch(int leaderEpoch) {
this.leader_epoch = leaderEpoch;
}
public List<Integer> getIsr() {
return isr;
}
public void setIsr(List<Integer> isr) {
this.isr = isr;
}
public boolean isUnderReplicated() {
return isUnderReplicated;
}
public void setUnderReplicated(boolean underReplicated) {
isUnderReplicated = underReplicated;
}
public long getOffset() {
return offset;
}
public void setOffset(long offset) {
this.offset = offset;
}
public long getConsumeOffset() {
return consumeOffset;
}
public void setConsumeOffset(long consumeOffset) {
this.consumeOffset = consumeOffset;
}
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
@Override
public String toString() {
return "PartitionState{" +
"partitionId=" + partitionId +
", controller_epoch=" + controller_epoch +
", leader=" + leader +
", version=" + version +
", leader_epoch=" + leader_epoch +
", isr=" + isr +
", isUnderReplicated=" + isUnderReplicated +
", offset=" + offset +
", consumeOffset=" + consumeOffset +
", consumerGroup='" + consumerGroup + '\'' +
'}';
}
@Override
public PartitionState clone() {
try {
PartitionState partitionState = (PartitionState) super.clone();
partitionState.setPartitionId(this.partitionId);
partitionState.setControllerEpoch(this.controller_epoch);
partitionState.setLeader(this.leader);
partitionState.setVersion(this.version);
partitionState.setLeaderEpoch(this.leader_epoch);
partitionState.setIsr(new ArrayList<>(this.isr));
partitionState.setOffset(this.offset);
partitionState.setConsumeOffset(this.consumeOffset);
partitionState.setConsumerGroup(this.consumerGroup);
return partitionState;
} catch (CloneNotSupportedException e) {
}
return null;
}
}

View File

@@ -1,48 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* @author zengqiao
* @date 20/1/15
*/
public class ReassignmentDTO {
private Integer version;
private List<Map<String, String>> topics;
public ReassignmentDTO(Integer version, String topicName) {
this.version = version;
Map<String, String> topic = new HashMap<>();
topic.put("topic", topicName);
topics = new ArrayList<>();
topics.add(topic);
}
public Integer getVersion() {
return version;
}
public void setVersion(Integer version) {
this.version = version;
}
public List<Map<String, String>> getTopics() {
return topics;
}
public void setTopics(List<Map<String, String>> topics) {
this.topics = topics;
}
@Override
public String toString() {
return "ReassignmentDTO{" +
"version=" + version +
", topics=" + topics +
'}';
}
}

View File

@@ -1,48 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
import java.util.List;
/**
* @author zengqiao
* @date 20/1/15
*/
public class ReassignmentElemDTO {
private String topic;
private Integer partition;
private List<Integer> replicas;
public String getTopic() {
return topic;
}
public void setTopic(String topic) {
this.topic = topic;
}
public Integer getPartition() {
return partition;
}
public void setPartition(Integer partition) {
this.partition = partition;
}
public List<Integer> getReplicas() {
return replicas;
}
public void setReplicas(List<Integer> replicas) {
this.replicas = replicas;
}
@Override
public String toString() {
return "ReassignmentElemDTO{" +
"topic='" + topic + '\'' +
", partition=" + partition +
", replicas=" + replicas +
'}';
}
}

View File

@@ -1,37 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
import java.util.List;
/**
* @author zengqiao
* @date 20/1/15
*/
public class ReassignmentJsonDTO {
private Integer version;
private List<ReassignmentElemDTO> partitions;
public Integer getVersion() {
return version;
}
public void setVersion(Integer version) {
this.version = version;
}
public List<ReassignmentElemDTO> getPartitions() {
return partitions;
}
public void setPartitions(List<ReassignmentElemDTO> partitions) {
this.partitions = partitions;
}
@Override
public String toString() {
return "ReassignmentJsonDTO{" +
"version=" + version +
", partitions=" + partitions +
'}';
}
}

View File

@@ -1,93 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
import java.util.Set;
/**
* 存储Topic的元信息, 元信息对应的ZK节点是/brokers/topics/${topicName}
* @author zengqiao
* @date 19/4/3
*/
public class TopicMetadata implements Cloneable {
private String topic; //topic名称
private PartitionMap partitionMap; //partition所在的Broker
private Set<Integer> brokerIdSet; //topic所在的broker, 由partitionMap获取得到
private int replicaNum; //副本数
private int partitionNum; //分区数
private long modifyTime; //修改节点的时间
private long createTime; //创建节点的时间
public String getTopic() {
return topic;
}
public void setTopic(String topic) {
this.topic = topic;
}
public int getReplicaNum() {
return replicaNum;
}
public void setReplicaNum(int replicaNum) {
this.replicaNum = replicaNum;
}
public PartitionMap getPartitionMap() {
return partitionMap;
}
public void setPartitionMap(PartitionMap partitionMap) {
this.partitionMap = partitionMap;
}
public Set<Integer> getBrokerIdSet() {
return brokerIdSet;
}
public void setBrokerIdSet(Set<Integer> brokerIdSet) {
this.brokerIdSet = brokerIdSet;
}
public int getPartitionNum() {
return partitionNum;
}
public void setPartitionNum(int partitionNum) {
this.partitionNum = partitionNum;
}
public long getModifyTime() {
return modifyTime;
}
public void setModifyTime(long modifyTime) {
this.modifyTime = modifyTime;
}
public long getCreateTime() {
return createTime;
}
public void setCreateTime(long createTime) {
this.createTime = createTime;
}
@Override
public String toString() {
return "TopicMetadata{" +
"topic='" + topic + '\'' +
", partitionMap=" + partitionMap +
", brokerIdSet=" + brokerIdSet +
", replicaNum=" + replicaNum +
", partitionNum=" + partitionNum +
", modifyTime=" + modifyTime +
", createTime=" + createTime +
'}';
}
}

View File

@@ -1,25 +0,0 @@
package com.xiaojukeji.kafka.manager.common.exception;
/**
* @author huangyiminghappy@163.com
* @date 2019/3/15
*/
public class CopyException extends RuntimeException {
private final static long serialVersionUID = 1L;
public CopyException(String message) {
super(message);
}
public CopyException(String message, Throwable cause) {
super(message, cause);
}
public CopyException(Throwable cause) {
super(cause);
}
public CopyException() {
super();
}
}

View File

@@ -1,482 +0,0 @@
package com.xiaojukeji.kafka.manager.common.utils;
import com.xiaojukeji.kafka.manager.common.exception.CopyException;
import org.apache.commons.beanutils.PropertyUtils;
import java.beans.PropertyDescriptor;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
/**
* 对象复制新类型和同类型深度克隆工具类
* @author huangyiminghappy@163.com
* @date 2019/3/15
*/
public class CopyUtils {
@SuppressWarnings({"unchecked", "rawtypes"})
public static <T> T deepCopy(T obj) {
if (obj == null) {
return null;
} else if (obj instanceof String) {
return (T)(String) obj;
} else if (obj instanceof Integer) {
return (T)(Integer) obj;
} else if (obj instanceof Double) {
return (T)(Double) obj;
} else if (obj instanceof Byte) {
return (T)(Byte) obj;
} else if (obj instanceof Short) {
return (T)(Short) obj;
} else if (obj instanceof Long) {
return (T)(Long) obj;
} else if (obj instanceof Float) {
return (T)(Float) obj;
} else if (obj instanceof Character) {
return (T)(Character) obj;
} else if (obj instanceof Boolean) {
return (T)(Boolean) obj;
} else if (obj instanceof ArrayList<?>) {
return (T) arrayListHandler((ArrayList<?>) obj);
} else if (obj instanceof HashMap<?, ?>) {
return (T) mapHandler((Map<?, ?>) obj);
} else if (obj instanceof ConcurrentHashMap<?, ?>) {
return (T) concurrentMapHandler((Map<?, ?>) obj);
} else if (obj instanceof TreeMap<?, ?>) {
return (T) treeMapHandler((Map<?, ?>) obj);
} else if (obj instanceof LinkedList<?>) {
return (T) linkedListHandler((LinkedList<?>) obj);
} else if (obj instanceof HashSet<?>) {
return (T) hashSetHandler((HashSet<?>) obj);
} else if (isPrimitiveArray(obj)) {
return getPrimitiveArray(obj);
}
T finObj = null;
Class rezClass = obj.getClass();
rezClass.cast(finObj);
try {
Constructor<T> constructor = getCompleteConstructor(rezClass);
finObj = (T) constructor.newInstance(getParamsObjForConstructor(rezClass));
copyFields(rezClass, obj, finObj);
} catch (Exception e) {
e.printStackTrace();
}
return finObj;
}
@SuppressWarnings({"unchecked", "rawtypes"})
private static <T> T deepCopy(T obj, Object parrent) {
if (obj == null) {
return null;
} else if (obj instanceof String) {
return (T)String.valueOf((String) obj);
} else if (obj instanceof Integer) {
return (T)Integer.valueOf((Integer) obj);
} else if (obj instanceof Double) {
return (T)Double.valueOf((Double) obj);
} else if (obj instanceof Byte) {
return (T)Byte.valueOf((Byte) obj);
} else if (obj instanceof Short) {
return (T)Short.valueOf((Short) obj);
} else if (obj instanceof Long) {
return (T)Long.valueOf((Long) obj);
} else if (obj instanceof Float) {
return (T)Float.valueOf((Float) obj);
} else if (obj instanceof Character) {
return (T)Character.valueOf((Character) obj);
} else if (obj instanceof Boolean) {
return (T)Boolean.valueOf((Boolean) obj);
} else if (obj instanceof ArrayList<?>) {
return (T) arrayListHandler((ArrayList<?>) obj);
} else if (obj instanceof HashMap<?, ?>) {
return (T) mapHandler((Map<?, ?>) obj);
} else if (obj instanceof ConcurrentHashMap<?, ?>) {
return (T) concurrentMapHandler((Map<?, ?>) obj);
} else if (obj instanceof TreeMap<?, ?>) {
return (T) treeMapHandler((Map<?, ?>) obj);
} else if (obj instanceof LinkedList<?>) {
return (T) linkedListHandler((LinkedList<?>) obj);
} else if (obj instanceof HashSet<?>) {
return (T) hashSetHandler((HashSet<?>) obj);
} else if (isPrimitiveArray(obj)) {
return getPrimitiveArray(obj);
}
T finObj = null;
Class rezClass = obj.getClass();
rezClass.cast(finObj);
try {
Constructor<T> constructor = getCompleteConstructor(rezClass);
finObj = (T) constructor.newInstance(getParamsObjForConstructor(rezClass));
copyFields(rezClass, obj, finObj, parrent);
} catch (Exception e) {
e.printStackTrace();
}
return finObj;
}
@SuppressWarnings({"rawtypes", "unchecked"})
private static ArrayList<?> arrayListHandler(ArrayList<?> obj) {
ArrayList srcList = obj;
ArrayList finList = new ArrayList();
for (int i = 0; i < srcList.size(); i++) {
finList.add(CopyUtils.deepCopy(srcList.get(i)));
}
return finList;
}
@SuppressWarnings({"unchecked", "rawtypes"})
private static <K, V> Map<K, V> mapHandler(Map<K, V> obj) {
Map<K, V> src = obj;
Map<K, V> fin = new HashMap<K, V>();
for (Map.Entry entry : src.entrySet()) {
K key = (K) CopyUtils.deepCopy(entry.getKey());
V value = (V) CopyUtils.deepCopy(entry.getValue());
fin.put(key, value);
}
return fin;
}
@SuppressWarnings({"unchecked", "rawtypes"})
private static <K, V> Map<K, V> concurrentMapHandler(Map<K, V> obj) {
Map<K, V> src = obj;
Map<K, V> fin = new ConcurrentHashMap<K, V>();
for (Map.Entry entry : src.entrySet()) {
K key = (K) CopyUtils.deepCopy(entry.getKey());
V value = (V) CopyUtils.deepCopy(entry.getValue());
fin.put(key, value);
}
return fin;
}
@SuppressWarnings({"unchecked", "rawtypes"})
private static <K, V> Map<K, V> treeMapHandler(Map<K, V> obj) {
Map<K, V> src = obj;
Map<K, V> fin = new TreeMap<K, V>();
for (Map.Entry entry : src.entrySet()) {
K key = (K) CopyUtils.deepCopy(entry.getKey());
V value = (V) CopyUtils.deepCopy(entry.getValue());
fin.put(key, value);
}
return fin;
}
@SuppressWarnings({"rawtypes", "unchecked"})
private static LinkedList<?> linkedListHandler(LinkedList<?> obj) {
LinkedList srcList = obj;
LinkedList finList = new LinkedList<>();
for (int i = 0; i < srcList.size(); i++) {
finList.add(CopyUtils.deepCopy(srcList.get(i)));
}
return finList;
}
@SuppressWarnings({"rawtypes", "unchecked"})
private static HashSet<?> hashSetHandler(HashSet<?> obj) {
HashSet srcList = obj;
HashSet finList = new HashSet<>();
for (Object o : srcList) {
finList.add(CopyUtils.deepCopy(o));
}
return finList;
}
private static boolean isPrimitiveArray(Object obj) {
if (obj instanceof byte[] ||
obj instanceof short[] ||
obj instanceof int[] ||
obj instanceof long[] ||
obj instanceof float[] ||
obj instanceof double[] ||
obj instanceof char[] ||
obj instanceof boolean[]) {
return true;
} else {
return false;
}
}
private static boolean isPrimitiveArray(String type) {
if ("byte[]".equals(type) ||
"short[]".equals(type) ||
"int[]".equals(type) ||
"long[]".equals(type) ||
"float[]".equals(type) ||
"double[]".equals(type) ||
"char[]".equals(type) ||
"boolean[]".equals(type)) {
return true;
} else {
return false;
}
}
@SuppressWarnings("unchecked")
private static <T> T getPrimitiveArray(T obj) {
if (obj instanceof int[]) {
int[] arr = new int[((int[]) obj).length];
for (int i = 0; i < ((int[]) obj).length; i++) {
arr[i] = ((int[]) obj)[i];
}
return (T) arr;
} else if (obj instanceof byte[]) {
byte[] arr = new byte[((byte[]) obj).length];
for (int i = 0; i < ((byte[]) obj).length; i++) {
arr[i] = ((byte[]) obj)[i];
}
return (T) arr;
} else if (obj instanceof short[]) {
short[] arr = new short[((short[]) obj).length];
for (int i = 0; i < ((short[]) obj).length; i++) {
arr[i] = ((short[]) obj)[i];
}
return (T) arr;
} else if (obj instanceof long[]) {
long[] arr = new long[((long[]) obj).length];
for (int i = 0; i < ((long[]) obj).length; i++) {
arr[i] = ((long[]) obj)[i];
}
return (T) arr;
} else if (obj instanceof float[]) {
float[] arr = new float[((float[]) obj).length];
for (int i = 0; i < ((float[]) obj).length; i++) {
arr[i] = ((float[]) obj)[i];
}
return (T) arr;
} else if (obj instanceof double[]) {
double[] arr = new double[((double[]) obj).length];
for (int i = 0; i < ((double[]) obj).length; i++) {
arr[i] = ((double[]) obj)[i];
}
return (T) arr;
} else if (obj instanceof char[]) {
char[] arr = new char[((char[]) obj).length];
for (int i = 0; i < ((char[]) obj).length; i++) {
arr[i] = ((char[]) obj)[i];
}
return (T) arr;
} else if (obj instanceof boolean[]) {
boolean[] arr = new boolean[((boolean[]) obj).length];
for (int i = 0; i < ((boolean[]) obj).length; i++) {
arr[i] = ((boolean[]) obj)[i];
}
return (T) arr;
}
return null;
}
@SuppressWarnings("unchecked")
private static <T> T getPrimitiveArray(T obj, String type) {
if ("int[]".equals(type)) {
int[] arr = new int[1];
arr[0] = 0;
return (T) arr;
} else if ("byte[]".equals(type)) {
byte[] arr = new byte[1];
arr[0] = 0;
return (T) arr;
} else if ("short[]".equals(type)) {
short[] arr = new short[1];
arr[0] = 0;
return (T) arr;
} else if ("long[]".equals(type)) {
long[] arr = new long[1];
arr[0] = 0;
return (T) arr;
} else if ("float[]".equals(type)) {
float[] arr = new float[1];
arr[0] = 0;
return (T) arr;
} else if ("double[]".equals(type)) {
double[] arr = new double[1];
arr[0] = 0;
return (T) arr;
} else if ("char[]".equals(type)) {
char[] arr = new char[1];
arr[0] = 0;
return (T) arr;
} else if ("boolean[]".equals(type)) {
boolean[] arr = new boolean[1];
arr[0] = false;
return (T) arr;
}
return null;
}
@SuppressWarnings({"unchecked", "rawtypes"})
private static Constructor getCompleteConstructor(Class ourClass)
throws NoSuchMethodException, SecurityException {
Constructor constructor = null;
Class[] params = new Class[ourClass.getDeclaredConstructors()[0].getParameterTypes().length];
for (int i = 0; i < ourClass.getDeclaredConstructors()[0].getParameterTypes().length; i++) {
params[i] = ourClass.getDeclaredConstructors()[0].getParameterTypes()[i];
}
constructor = ourClass.getConstructor(params);
constructor.setAccessible(true);
return constructor;
}
@SuppressWarnings("rawtypes")
private static Object[] getParamsObjForConstructor(Class ourClass)
throws NoSuchMethodException, SecurityException {
Constructor constuctor = null;
constuctor = ourClass.getDeclaredConstructors()[0];
constuctor.setAccessible(true);
Object[] objParams = new Object[constuctor.getParameterTypes().length];
for (int i = 0; i < constuctor.getParameterTypes().length; i++) {
String fieldType = constuctor.getParameterTypes()[i].toString();
if ("int".equalsIgnoreCase(fieldType) ||
"double".toString().equalsIgnoreCase(fieldType) ||
"float".equalsIgnoreCase(fieldType) ||
"byte".toString().equalsIgnoreCase(fieldType) ||
"char".equalsIgnoreCase(fieldType) ||
"long".equalsIgnoreCase(fieldType)) {
objParams[i] = 0;
} else if ("boolean".equalsIgnoreCase(fieldType)) {
objParams[i] = false;
} else if (isPrimitiveArray(constuctor.getParameterTypes()[i].getCanonicalName())) {
objParams[i] = getPrimitiveArray(constuctor.getParameterTypes()[i],
constuctor.getParameterTypes()[i].getCanonicalName()
);
} else {
objParams[i] = null;
}
}
return objParams;
}
@SuppressWarnings("rawtypes")
private static <T> void copyFields(Class ourClass, T srcObj, T finObj)
throws IllegalArgumentException, IllegalAccessException, NoSuchFieldException, SecurityException {
Field[] fields = ourClass.getDeclaredFields();
for (int i = 0; i < fields.length; i++) {
fields[i].setAccessible(true);
Field modField = Field.class.getDeclaredField("modifiers");
modField.setAccessible(true);
modField.setInt(fields[i], fields[i].getModifiers() & ~Modifier.FINAL);
String fieldType = fields[i].getType().toString();
if ("int".equalsIgnoreCase(fieldType) ||
"double".equalsIgnoreCase(fieldType) ||
"float".equalsIgnoreCase(fieldType) ||
"byte".equalsIgnoreCase(fieldType) ||
"char".equalsIgnoreCase(fieldType) ||
"boolean".equalsIgnoreCase(fieldType) ||
"short".equalsIgnoreCase(fieldType) ||
"long".equalsIgnoreCase(fieldType)) {
fields[i].set(finObj, fields[i].get(srcObj));
} else {
fields[i].set(finObj, CopyUtils.deepCopy(fields[i].get(srcObj), finObj));
}
}
}
@SuppressWarnings("rawtypes")
private static <T> void copyFields(Class ourClass, T srcObj, T finObj, Object parent)
throws IllegalArgumentException, IllegalAccessException, NoSuchFieldException, SecurityException {
Field[] fields = ourClass.getDeclaredFields();
for (int i = 0; i < fields.length; i++) {
fields[i].setAccessible(true);
Field modField = Field.class.getDeclaredField("modifiers");
modField.setAccessible(true);
modField.setInt(fields[i], fields[i].getModifiers() & ~Modifier.FINAL);
String fieldType = fields[i].getType().toString();
if ("int".equalsIgnoreCase(fieldType) ||
"double".equalsIgnoreCase(fieldType) ||
"float".equalsIgnoreCase(fieldType) ||
"byte".equalsIgnoreCase(fieldType) ||
"char".equalsIgnoreCase(fieldType) ||
"boolean".equalsIgnoreCase(fieldType) ||
"short".equalsIgnoreCase(fieldType) ||
"long".equalsIgnoreCase(fieldType)) {
fields[i].set(finObj, fields[i].get(srcObj));
} else {
if (fields[i].get(srcObj).toString().equals(parent.toString())) {
fields[i].set(finObj, fields[i].get(srcObj));
} else {
fields[i].set(finObj, CopyUtils.deepCopy(fields[i].get(srcObj), finObj));
}
}
}
}
static void setFinalStaticField(Field field, Object newValue) throws Exception {
field.setAccessible(true);
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
field.set(null, newValue);
}
public static Object copyProperties(Object target, Object orig) {
if (target == null || orig == null) {
return target;
}
PropertyDescriptor[] destDesc = PropertyUtils.getPropertyDescriptors(target);
try {
for (int i = 0; i < destDesc.length; i++) {
Class destType = destDesc[i].getPropertyType();
Class origType = PropertyUtils.getPropertyType(orig, destDesc[i].getName());
if (destType != null && destType.equals(origType) && !destType.equals(Class.class)) {
if (!Collection.class.isAssignableFrom(origType)) {
try {
Object value = PropertyUtils.getProperty(orig, destDesc[i].getName());
PropertyUtils.setProperty(target, destDesc[i].getName(), value);
} catch (Exception ex) {
}
}
}
}
return target;
} catch (Exception ex) {
throw new CopyException(ex);
}
}
public static Object copyProperties(Object dest, Object orig, String[] ignores) {
if (dest == null || orig == null) {
return dest;
}
PropertyDescriptor[] destDesc = PropertyUtils.getPropertyDescriptors(dest);
try {
for (int i = 0; i < destDesc.length; i++) {
if (contains(ignores, destDesc[i].getName())) {
continue;
}
Class destType = destDesc[i].getPropertyType();
Class origType = PropertyUtils.getPropertyType(orig, destDesc[i].getName());
if (destType != null && destType.equals(origType) && !destType.equals(Class.class)) {
if (!Collection.class.isAssignableFrom(origType)) {
Object value = PropertyUtils.getProperty(orig, destDesc[i].getName());
PropertyUtils.setProperty(dest, destDesc[i].getName(), value);
}
}
}
return dest;
} catch (Exception ex) {
throw new CopyException(ex);
}
}
static boolean contains(String[] ignores, String name) {
boolean ignored = false;
for (int j = 0; ignores != null && j < ignores.length; j++) {
if (ignores[j].equals(name)) {
ignored = true;
break;
}
}
return ignored;
}
}

View File

@@ -1,15 +0,0 @@
package com.xiaojukeji.kafka.manager.common.utils;
import java.util.Calendar;
import java.util.Date;
/**
* 日期工具
* @author huangyiminghappy@163.com
* @date 2019-03-20
*/
public class DateUtils {
public static Date long2Date(Long time){
return new Date(time);
}
}

View File

@@ -1,57 +0,0 @@
package com.xiaojukeji.kafka.manager.common.utils;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Created by limeng on 2017/12/22
*/
public class DefaultThreadFactory implements ThreadFactory {
private static final AtomicInteger POOL_ID = new AtomicInteger();
private final AtomicInteger nextId;
private final String prefix;
private final boolean daemon;
private final int priority;
public DefaultThreadFactory(String poolName) {
this((String) poolName, false, 5);
}
public DefaultThreadFactory(String poolName, boolean daemon, int priority) {
this.nextId = new AtomicInteger();
if (poolName == null) {
throw new NullPointerException("poolName");
} else if (priority >= 1 && priority <= 10) {
this.prefix = poolName + '-' + POOL_ID.incrementAndGet() + '-';
this.daemon = daemon;
this.priority = priority;
} else {
throw new IllegalArgumentException(
"priority: " + priority
+ " (expected: Thread.MIN_PRIORITY <= priority <= Thread.MAX_PRIORITY)");
}
}
@Override
public Thread newThread(Runnable r) {
Thread t = new Thread(r, this.prefix + this.nextId.incrementAndGet());
try {
if (t.isDaemon()) {
if (!this.daemon) {
t.setDaemon(false);
}
} else if (this.daemon) {
t.setDaemon(true);
}
if (t.getPriority() != this.priority) {
t.setPriority(this.priority);
}
} catch (Exception e) {
;
}
return t;
}
}

View File

@@ -1,36 +0,0 @@
package com.xiaojukeji.kafka.manager.common.utils;
import java.security.MessageDigest;
/**
* @author zengqiao
* @date 20/3/17
*/
public class EncryptUtil {
private static final char[] HEX_DIGITS = {
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
};
public static String md5(String key) {
try {
byte[] btInput = key.getBytes();
MessageDigest mdInst = MessageDigest.getInstance("MD5");
// 使用指定的字节更新摘要
mdInst.update(btInput);
// 获得密文
byte[] md = mdInst.digest();
// 把密文转换成十六进制的字符串形式
char[] str = new char[md.length * 2];
for (int i = 0, k = 0; i < md.length; i++) {
str[k++] = HEX_DIGITS[md[i] >>> 4 & 0xf];
str[k++] = HEX_DIGITS[md[i] & 0xf];
}
return new String(str);
} catch (Exception e) {
return null;
}
}
}

View File

@@ -1,72 +0,0 @@
package com.xiaojukeji.kafka.manager.common.utils.jmx;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.management.remote.JMXConnector;
import javax.management.remote.JMXConnectorFactory;
import javax.management.remote.JMXServiceURL;
import java.io.IOException;
import java.net.MalformedURLException;
/**
* JMXConnector包装类
* @author tukun
* @date 2015/11/9.
*/
public class JmxConnectorWrap {
private final static Logger logger = LoggerFactory.getLogger(JmxConnectorWrap.class);
private JMXConnector jmxConnector;
/**
* JMX连接的主机名
*/
private String host;
/**
* JMX连接端口
*/
private int port;
public JmxConnectorWrap(String host, int port) {
this.host = host;
this.port = port;
}
public JMXConnector getJmxConnector() {
// 如果JMX连接断开则进行重新连接
if (jmxConnector == null && port != -1) {
createJMXConnector();
}
return jmxConnector;
}
private synchronized void createJMXConnector() {
if (jmxConnector != null) {
return;
}
String jmxUrl = String.format("service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi", host, port);
try {
JMXServiceURL url = new JMXServiceURL(jmxUrl);
jmxConnector = JMXConnectorFactory.connect(url, null);
} catch (MalformedURLException e) {
logger.error("JMX url exception, host:{} port:{} jmxUrl:{}", host, port, jmxUrl, e);
} catch (IOException e) {
logger.error("JMX connect exception, host:{} port:{}.", host, port, e);
}
logger.info("JMX connect success, host:{} port:{}.", host, port);
}
public void close() {
if (jmxConnector == null) {
return;
}
try {
jmxConnector.close();
} catch (IOException e) {
logger.warn("close JmxConnector exception, host:{} port:{}.", host, port, e);
}
}
}

View File

@@ -1,62 +0,0 @@
package com.xiaojukeji.kafka.manager.common.utils.jmx;
/**
* Mbean的对象封装
* @author tukun
* @date 2015/11/9.
*/
public class Mbean {
/**
* mbean的对象名称
*/
private String objectName;
/**
* mbean对象被监控的属性名称
*/
private String property;
/**
* mbean对象被监控的属性值对象类型
*/
private Class propertyClass;
public Mbean(String objectName, String property, Class propertyClass) {
this.objectName = objectName;
this.property = property;
this.propertyClass = propertyClass;
}
public String getObjectName() {
return objectName;
}
public void setObjectName(String objectName) {
this.objectName = objectName;
}
public String getProperty() {
return property;
}
public void setProperty(String property) {
this.property = property;
}
public Class getPropertyClass() {
return propertyClass;
}
public void setPropertyClass(Class propertyClass) {
this.propertyClass = propertyClass;
}
@Override
public String toString() {
return "Mbean{" +
"objectName='" + objectName + '\'' +
", property='" + property + '\'' +
", propertyClass=" + propertyClass +
'}';
}
}

View File

@@ -1,93 +0,0 @@
package com.xiaojukeji.kafka.manager.common.utils.jmx;
import java.util.HashMap;
import java.util.Map;
/**
* kafka集群的mbean的object name集合
* @author tukun, zengqiao
* @date 2015/11/5.
*/
public class MbeanNameUtil {
//broker监控参数
private static final String MESSAGE_IN_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec";
private static final String BYTES_IN_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec";
private static final String BYTES_OUT_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec";
private static final String BYTES_REJECTED_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=BytesRejectedPerSec";
private static final String FAILED_FETCH_REQUEST_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=FailedFetchRequestsPerSec";
private static final String FAILED_PRODUCE_REQUEST_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=FailedProduceRequestsPerSec";
private static final String PRODUCE_REQUEST_PER_SEC = "kafka.network:type=RequestMetrics,name=RequestsPerSec,request=Produce";
private static final String CONSUMER_REQUEST_PER_SEC = "kafka.network:type=RequestMetrics,name=RequestsPerSec,request=FetchConsumer";
private static final String TOTAL_PRODUCE_REQUEST_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=TotalProduceRequestsPerSec";
private static final String TOTAL_FETCH_REQUEST_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=TotalFetchRequestsPerSec";
private static final String REQUEST_HANDLER_AVG_IDLE_PERCENT = "kafka.server:type=KafkaRequestHandlerPool,name=RequestHandlerAvgIdlePercent";
private static final String NETWORK_PROCESSOR_AVG_IDLE_PERCENT = "kafka.network:type=SocketServer,name=NetworkProcessorAvgIdlePercent";
private static final String REQUEST_QUEUE_SIZE = "kafka.network:type=RequestChannel,name=RequestQueueSize";
private static final String RESPONSE_QUEUE_SIZE = "kafka.network:type=RequestChannel,name=ResponseQueueSize";
private static final String LOG_FLUSH_RATE_AND_TIME_MS = "kafka.log:type=LogFlushStats,name=LogFlushRateAndTimeMs";
private static final String TOTAL_TIME_PRODUCE = "kafka.network:type=RequestMetrics,name=TotalTimeMs,request=Produce";
private static final String TOTAL_TIME_FETCH_CONSUMER = "kafka.network:type=RequestMetrics,name=TotalTimeMs,request=FetchConsumer";
private static final String PART_COUNT = "kafka.server:type=ReplicaManager,name=PartitionCount";
private static final String PARTITION_OFFSET_PULL = "kafka.log:type=Log,name=LogEndOffset,topic=${topic},partition=${partition}";
private static final String UNDER_REPLICATED_PARTITIONS = "kafka.server:type=ReplicaManager,name=UnderReplicatedPartitions";
private static final String LEADER_COUNT = "kafka.server:type=ReplicaManager,name=LeaderCount";
// private static final String PRODUCE_REQUEST_TIME = "kafka.network:type=TopicRequestMetrics,name=TotalTimeMs,request=Produce";
// private static final String FETCH_REQUEST_TIME = "kafka.network:type=TopicRequestMetrics,name=TotalTimeMs,request=FetchConsumer";
//存储监控的参数name到获取的object_name的映射关系图
private static Map<String, Mbean> mbeanNameMap = new HashMap<String, Mbean>();
static {
//监控参数配置object_name和监控的属性名
mbeanNameMap.put("MessagesInPerSec", new Mbean(MESSAGE_IN_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("BytesInPerSec", new Mbean(BYTES_IN_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("BytesOutPerSec", new Mbean(BYTES_OUT_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("BytesRejectedPerSec", new Mbean(BYTES_REJECTED_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("FailFetchRequestPerSec", new Mbean(FAILED_FETCH_REQUEST_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("FailProduceRequestPerSec", new Mbean(FAILED_PRODUCE_REQUEST_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("ProduceRequestPerSec", new Mbean(PRODUCE_REQUEST_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("FetchConsumerRequestPerSec", new Mbean(CONSUMER_REQUEST_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("TotalProduceRequestsPerSec", new Mbean(TOTAL_PRODUCE_REQUEST_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("TotalFetchRequestsPerSec", new Mbean(TOTAL_FETCH_REQUEST_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("PartitionOffset", new Mbean(PARTITION_OFFSET_PULL,"Value", int.class));
mbeanNameMap.put("PartitionCount", new Mbean(PART_COUNT,"Value", int.class));
mbeanNameMap.put("UnderReplicatedPartitions", new Mbean(UNDER_REPLICATED_PARTITIONS,"Value", int.class));
mbeanNameMap.put("LeaderCount", new Mbean(LEADER_COUNT,"Value", int.class));
mbeanNameMap.put("RequestHandlerAvgIdlePercent", new Mbean(REQUEST_HANDLER_AVG_IDLE_PERCENT,"OneMinuteRate", Double.class));
mbeanNameMap.put("NetworkProcessorAvgIdlePercent", new Mbean(NETWORK_PROCESSOR_AVG_IDLE_PERCENT,"Value", Double.class));
mbeanNameMap.put("RequestQueueSize", new Mbean(REQUEST_QUEUE_SIZE,"Value", int.class));
mbeanNameMap.put("ResponseQueueSize", new Mbean(RESPONSE_QUEUE_SIZE, "Value", int.class));
mbeanNameMap.put("LogFlushRateAndTimeMs", new Mbean(LOG_FLUSH_RATE_AND_TIME_MS,"OneMinuteRate", Double.class));
mbeanNameMap.put("TotalTimeProduceMean", new Mbean(TOTAL_TIME_PRODUCE,"Mean", Double.class));
mbeanNameMap.put("TotalTimeProduce99Th", new Mbean(TOTAL_TIME_PRODUCE,"99thPercentile", Double.class));
mbeanNameMap.put("TotalTimeFetchConsumerMean", new Mbean(TOTAL_TIME_FETCH_CONSUMER,"Mean", Double.class));
mbeanNameMap.put("TotalTimeFetchConsumer99Th", new Mbean(TOTAL_TIME_FETCH_CONSUMER,"99thPercentile", Double.class));
// mbeanNameMap.put("ProduceRequestTime", new Mbean(PRODUCE_REQUEST_TIME,"Value"));
// mbeanNameMap.put("FetchRequestTime", new Mbean(FETCH_REQUEST_TIME,"Value"));
}
/**
* 根据属性名kafka版本topic获取相应的Mbean
*/
public static Mbean getMbean(String name, String topic) {
Mbean mbean = mbeanNameMap.get(name);
if (mbean == null) {
return null;
}
if (topic != null && !topic.isEmpty()) {
return new Mbean(mbean.getObjectName() + ",topic=" + topic, mbean.getProperty(), mbean.getPropertyClass());
}
return mbean;
}
}

View File

@@ -1,172 +0,0 @@
package com.xiaojukeji.kafka.manager.common.utils.zk;
import com.xiaojukeji.kafka.manager.common.exception.ConfigException;
import org.apache.zookeeper.data.Stat;
import java.util.List;
/**
* Created by limeng on 2017/12/22
*/
public interface ConfigClient {
/**
* 添加连接状态监听器
*
* @param listener
*/
void addStateChangeListener(StateChangeListener listener);
/**
* 检查节点是否存在
*
* @param path
* @return
* @throws ConfigException
*/
boolean checkPathExists(String path) throws ConfigException;
/**
* 获取节点信息
*
* @param path
* @return
* @throws ConfigException
*/
Stat getNodeStat(String path) throws ConfigException;
/**
* 重置zk下面数据
*
* @param path
* @param data
* @throws ConfigException
*/
Stat setNodeStat(String path, String data) throws ConfigException;
Stat setOrCreatePersistentNodeStat(String path, String data) throws ConfigException;
String createPersistentSequential(String path, String data) throws ConfigException;
/**
* 创建一个节点并包含数据,在失去连接后不会删除.
* <p/>
* save是持久化存储,如果是临时数据,请使用register
*
* @param path
* @param data
* @param <T>
* @throws ConfigException
*/
// <T> void save(String path, T data) throws ConfigException;
/**
* 创建一个节点并包含数据,在失去连接后不会删除.
* <p/>
* save是持久化存储,如果是临时数据,请使用register
*
* @param path
* @param data
* @param <T>
* @throws ConfigException
*/
// <T> void saveIfNotExisted(String path, T data) throws ConfigException;
// /**
// * 注册一个数据,在连接断开时需要重新删除,重连后重新注册
// *
// * @param path
// * @param data
// * @param <T>
// * @throws ConfigException
// */
// <T> void register(String path, T data) throws ConfigException;
/**
* 获取数据
*
* @param path
* @param clazz
* @param <T>
* @return
* @throws ConfigException
*/
<T> T get(String path, Class<T> clazz) throws ConfigException;
/**
* 删除数据,如果有子节点也会删除
*
* @param path
* @throws ConfigException
*/
void delete(String path) throws ConfigException;
/**
* 获取zkString字符
* @param path
* @return
* @throws ConfigException
*/
String get(String path) throws ConfigException;
/**
* 监听数据变化
*
* @param path
* @param listener
*/
void watch(String path, StateChangeListener listener) throws ConfigException;
/**
* 获取路径下的子节点
*
* @param path
* @return
* @throws ConfigException
*/
List<String> getChildren(String path) throws ConfigException;
/**
* 监听子节点的变化并通知出来
*
* @param path
* @param listener
* @return
* @throws ConfigException
*/
void watchChildren(String path, StateChangeListener listener) throws ConfigException;
/**
* 取消监听子节点的变化
*
* @param path
* @return
*/
void cancelWatchChildren(String path);
/**
* 锁住某个节点
*
* @param path
* @param timeoutMS
* @param data
* @param <T>
* @return
* @throws ConfigException
*/
<T> void lock(String path, long timeoutMS, T data) throws ConfigException;
/**
* 释放节点锁
*
* @param path
*/
void unLock(String path);
/**
* 资源释放
*/
void close();
// void setConfigClientTracer(ConfigClientTracer configClientTracer);
}

View File

@@ -1,17 +0,0 @@
package com.xiaojukeji.kafka.manager.common.utils.zk;
/**
* Created by limeng on 2017/12/22
*/
public interface StateChangeListener {
enum State {
CONNECTION_RECONNECT, //
CONNECTION_DISCONNECT, NODE_DATA_CHANGED, CHILD_UPDATED, CHILD_ADDED, CHILD_DELETED,
//
;
}
void onChange(State state, String path);
}

View File

@@ -1,532 +0,0 @@
package com.xiaojukeji.kafka.manager.common.utils.zk;
import com.alibaba.fastjson.JSON;
import com.xiaojukeji.kafka.manager.common.exception.ConfigException;
import com.google.common.base.Preconditions;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.recipes.cache.*;
import org.apache.curator.framework.recipes.locks.InterProcessSemaphoreMutex;
import org.apache.curator.framework.state.ConnectionState;
import org.apache.curator.framework.state.ConnectionStateListener;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.curator.utils.ThreadUtils;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
/**
* @author limeng
* @date 2017/12/22
*/
public class ZkConfigImpl implements ConfigClient, ConnectionStateListener {
private static final int DEFAULT_SESSION_TIMEOUT_MS = 12000;
private static final int DEFAULT_CONNECTION_TIMEOUT_MS = 3000;
private static final int DEFAULT_THREAD_POOL_SIZE = Math.max(Runtime.getRuntime().availableProcessors(), 16);
private final static Logger logger = LoggerFactory.getLogger(ZkConfigImpl.class);
final byte[] EMPTY = new byte[0];
/**
* 监听连接状态
*/
private final Map<String, java.util.concurrent.locks.Lock> registerLocks = new ConcurrentHashMap<>();
private Map<String, StateChangeListener> connectionListenerMap = new ConcurrentHashMap<>();
private Set<StateChangeListener> connectionStateListeners = new HashSet<>();
/**
* 监听节点数据变化的缓存
*/
private final Map<String, java.util.concurrent.locks.Lock> dataPathLocks = new ConcurrentHashMap<>();
private final Map<String, NodeCache> dataWatchers = new ConcurrentHashMap<>();
private final Map<String, List<StateChangeListener>> dataListeners = new ConcurrentHashMap<>();
/**
* 监听子节点变化的缓存
*/
private final Map<String, java.util.concurrent.locks.Lock> childrenPathLocks = new ConcurrentHashMap<>();
private final Map<String, PathChildrenCache> childrenWatcher = new ConcurrentHashMap<>();
private final Map<String, List<StateChangeListener>> childrenListeners = new ConcurrentHashMap<>();
/**
* 所有持有的锁
*/
private final Map<String, Lock> lockMap = new ConcurrentHashMap<>();
private final CuratorFramework curator;
private final ExecutorService executor;
public ZkConfigImpl(String zkAddress) {
this(zkAddress, DEFAULT_SESSION_TIMEOUT_MS, DEFAULT_CONNECTION_TIMEOUT_MS);
}
public ZkConfigImpl(String zkAddress, int sessionTimeoutMs, int connectionTimeoutMs) {
this(zkAddress, sessionTimeoutMs, connectionTimeoutMs, DEFAULT_THREAD_POOL_SIZE);
}
public ZkConfigImpl(String zkAddress, int sessionTimeoutMs, int connectionTimeoutMs, int threadPoolSize) {
ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3);
CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder().connectString(zkAddress);
builder.retryPolicy(retryPolicy);
builder.sessionTimeoutMs(sessionTimeoutMs).connectionTimeoutMs(connectionTimeoutMs);
curator = builder.build();
curator.getConnectionStateListenable().addListener(this);
curator.start();
executor = Executors.newFixedThreadPool(threadPoolSize, ThreadUtils.newThreadFactory("PathChildrenCache"));
}
private synchronized java.util.concurrent.locks.Lock getRegisterLock(String registerPath) {
registerLocks.putIfAbsent(registerPath, new ReentrantLock());
return registerLocks.get(registerPath);
}
private synchronized java.util.concurrent.locks.Lock getDataPathLock(String dataPath) {
dataPathLocks.putIfAbsent(dataPath, new ReentrantLock());
return dataPathLocks.get(dataPath);
}
private synchronized java.util.concurrent.locks.Lock getChildrenPathLock(String childrenPath) {
childrenPathLocks.putIfAbsent(childrenPath, new ReentrantLock());
return childrenPathLocks.get(childrenPath);
}
@Override
public void stateChanged(CuratorFramework client, ConnectionState newState) {
StateChangeListener.State state;
switch (newState) {
case LOST:
logger.error("[zk] current connection status is {}", newState);
releaseLocks();
state = StateChangeListener.State.CONNECTION_DISCONNECT;
break;
case CONNECTED:
case RECONNECTED:
logger.warn("[zk] current connection status is {}", newState);
state = StateChangeListener.State.CONNECTION_RECONNECT;
break;
default:
logger.info("[zk] current connection status is {}", newState);
return;
}
for (StateChangeListener listener : connectionListenerMap.values()) {
listener.onChange(state, null);
}
for (StateChangeListener listener : connectionStateListeners) {
listener.onChange(state, null);
}
}
@Override
public void addStateChangeListener(StateChangeListener listener) {
connectionStateListeners.add(listener);
}
@Override
public boolean checkPathExists(String path) throws ConfigException {
try {
return curator.checkExists().forPath(path) != null;
} catch (Exception e) {
String info = String.format("[zk] Failed to check EXIST for path [%s]", path);
logger.warn(info);
throw new ConfigException(e);
}
}
@Override
public Stat getNodeStat(String path) throws ConfigException {
try {
return curator.checkExists().forPath(path);
} catch (Exception e) {
String info = String.format("[zk] Failed to get node stat for path [%s]", path);
logger.warn(info);
throw new ConfigException(e);
}
}
@Override
public Stat setNodeStat(String path, String data) throws ConfigException {
try {
return curator.setData().forPath(path, data.getBytes());
} catch (Exception e) {
throw new ConfigException(e);
}
}
@Override
public Stat setOrCreatePersistentNodeStat(String path, String data) throws ConfigException {
try {
return curator.setData().forPath(path, data.getBytes());
} catch (KeeperException.NoNodeException e) {
try {
curator.create().withMode(CreateMode.PERSISTENT).forPath(path);
return setNodeStat(path, data);
} catch (KeeperException.NodeExistsException nee) {
return setNodeStat(path, data);
} catch (Exception e2) {
throw new ConfigException(e2);
}
} catch (Exception e) {
throw new ConfigException(e);
}
}
@Override
public String createPersistentSequential(String path, String data) throws ConfigException {
try {
return curator.create().withMode(CreateMode.PERSISTENT_SEQUENTIAL).forPath(path, data.getBytes());
} catch (Exception e) {
throw new ConfigException(e);
}
}
//
// @Override
// public <T> void save(String path, T data) throws ConfigException {
// try {
// byte[] bytes = EMPTY;
// if (data != null) {
// bytes = JSON.toJSONBytes(data);
// }
// Stat stat = curator.checkExists().forPath(path);
// if (stat == null) {
// curator.create().creatingParentsIfNeeded().forPath(path, bytes);
// } else {
// curator.setData().forPath(path, bytes);
// }
// } catch (Exception e) {
// logger.warn("create {} failed", path);
// throw new ConfigException(e);
// }
// }
//
// @Override
// public <T> void saveIfNotExisted(String path, T data) throws ConfigException {
// try {
// byte[] bytes = EMPTY;
// if (data != null) {
// bytes = JSON.toJSONBytes(data);
// }
// Stat stat = curator.checkExists().forPath(path);
// if (stat == null) {
// curator.create().creatingParentsIfNeeded().forPath(path, bytes);
// }
// } catch (Exception e) {
// logger.warn("create {} failed", path, e);
// throw new ConfigException(e);
// }
// }
// @Override
// public <T> void register(final String path, final T data) throws ConfigException {
// java.util.concurrent.locks.Lock registerLock = getRegisterLock(path);
// registerLock.lock();
// try {
// byte[] bytes = EMPTY;
// if (data != null) {
// bytes = JSON.toJSONBytes(data);
// }
// if (!connectionListenerMap.containsKey(path)) {
// connectionListenerMap.put(path, new StateChangeListener() {
// @Override
// public void onChange(State state, String stateChangePath) {
// logger.warn("on state change " + state);
// switch (state) {
// case CONNECTION_RECONNECT:
// try {
// register(path, data);
// } catch (ConfigException e) {
// logger.warn("register {} failed", path);
// }
// break;
// default:
// break;
// }
// }
// });
// }
// try {
// deletePath(path);
// logger.warn("register reconnect delete {} succeed", path);
// } catch (ConfigException e) {
// logger.warn("register reconnect delete {} failed", path);
// }
// curator.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL).forPath(path, bytes);
// logger.info("register reconnect create {} succeed", path);
// } catch (Exception e) {
// logger.warn("register reconnect create {} failed", path);
// throw new ConfigException(e);
// } finally {
// registerLock.unlock();
// }
// }
@Override
public <T> T get(String path, Class<T> clazz) throws ConfigException {
try {
byte[] bytes = curator.getData().forPath(path);
return JSON.parseObject(bytes, clazz);
} catch (Exception e) {
throw new ConfigException(e);
}
}
@Override
public String get(String path) throws ConfigException {
try {
byte[] bytes = curator.getData().forPath(path);
return new String(bytes);
} catch (Exception e) {
throw new ConfigException(e);
}
}
@Override
public void delete(String path) throws ConfigException {
try {
connectionListenerMap.remove(path);
if (curator.checkExists().forPath(path) != null) {
curator.delete().deletingChildrenIfNeeded().forPath(path);
}
} catch (Exception e) {
throw new ConfigException(e);
}
}
// private void deletePath(String path) throws ConfigException {
// try {
// if (curator.checkExists().forPath(path) != null) {
// curator.delete().deletingChildrenIfNeeded().forPath(path);
// }
// } catch (Exception e) {
// throw new ConfigException(e);
// }
// }
@SuppressWarnings("all")
@Override
public void watch(final String path, final StateChangeListener listener) throws ConfigException {
java.util.concurrent.locks.Lock dataLock = getDataPathLock(path);
dataLock.lock();
try {
NodeCache nodeCache = dataWatchers.get(path);
if (nodeCache == null) {
nodeCache = new NodeCache(curator, path);
nodeCache.start();
dataWatchers.put(path, nodeCache);
nodeCache.getListenable().addListener(new NodeCacheListener() {
@Override
public void nodeChanged() throws Exception {
listener.onChange(StateChangeListener.State.NODE_DATA_CHANGED, path);
}
});
List<StateChangeListener> listeners = new ArrayList<>();
listeners.add(listener);
dataListeners.put(path, listeners);
} else {
List<StateChangeListener> listeners = dataListeners.get(path);
Preconditions.checkState(listeners != null);
if (!listeners.contains(listener)) {
listeners.add(listener);
nodeCache.getListenable().addListener(new NodeCacheListener() {
@Override
public void nodeChanged() throws Exception {
listener.onChange(StateChangeListener.State.NODE_DATA_CHANGED, path);
}
});
}
}
} catch (Exception e) {
throw new ConfigException(e);
} finally {
dataLock.unlock();
}
}
@Override
public List<String> getChildren(String path) throws ConfigException{
try {
return curator.getChildren().forPath(path);
} catch (Exception e) {
throw new ConfigException(e);
}
}
@Override
public void watchChildren(final String path, final StateChangeListener listener) throws ConfigException {
java.util.concurrent.locks.Lock childrenLock = getChildrenPathLock(path);
childrenLock.lock();
try {
PathChildrenCache pathChildrenCache = childrenWatcher.get(path);
if (pathChildrenCache == null) {
pathChildrenCache = new PathChildrenCache(curator, path, false, false, executor);
pathChildrenCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT);
childrenWatcher.put(path, pathChildrenCache);
pathChildrenCache.getListenable().addListener(new PathChildrenCacheListenerImpl(listener));
List<StateChangeListener> listeners = new ArrayList<>();
listeners.add(listener);
childrenListeners.put(path, listeners);
} else {
List<StateChangeListener> listeners = childrenListeners.get(path);
Preconditions.checkState(listeners != null);
if (!listeners.contains(listener)) {
listeners.add(listener);
pathChildrenCache.getListenable().addListener(new PathChildrenCacheListenerImpl(listener));
}
}
} catch (Exception e) {
throw new ConfigException(e);
} finally {
childrenLock.unlock();
}
}
@Override
public void cancelWatchChildren(String path) {
java.util.concurrent.locks.Lock childrenLock = getChildrenPathLock(path);
childrenLock.lock();
try {
PathChildrenCache pathChildrenCache = childrenWatcher.get(path);
if (pathChildrenCache != null) {
try {
pathChildrenCache.close();
} catch (IOException e) {
logger.warn("close node cache for path {} error", path, e);
}
}
childrenWatcher.remove(path);
childrenListeners.remove(path);
} finally {
childrenLock.unlock();
}
}
private static class PathChildrenCacheListenerImpl implements PathChildrenCacheListener {
StateChangeListener listener;
public PathChildrenCacheListenerImpl(StateChangeListener listener) {
this.listener = listener;
}
@Override
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
String path = event.getData() == null ? null : event.getData().getPath();
switch (event.getType()) {
case CHILD_ADDED:
listener.onChange(StateChangeListener.State.CHILD_ADDED, path);
break;
case CHILD_UPDATED:
listener.onChange(StateChangeListener.State.CHILD_UPDATED, path);
break;
case CHILD_REMOVED:
listener.onChange(StateChangeListener.State.CHILD_DELETED, path);
break;
default:
break;
}
}
}
@Override
public <T> void lock(String path, long timeoutMS, T t) throws ConfigException {
try {
Lock lock = lockMap.get(path);
if (lock != null) {
if (lock.isAcquiredInThisProcess()) {
return;
}
lock.release();
lockMap.remove(path);
}
InterProcessSemaphoreMutex mutex = new InterProcessSemaphoreMutex(curator, path);
boolean locked = mutex.acquire(timeoutMS, TimeUnit.MILLISECONDS);
if (!locked) {
throw new ConfigException("lock " + path + " failed " + timeoutMS);
}
if (t != null) {
curator.setData().forPath(path, JSON.toJSONBytes(t));
}
lock = new Lock(mutex, path);
lockMap.put(path, lock);
} catch (Exception e) {
logger.warn("lock {} failed", path, e);
throw new ConfigException(e);
}
}
@Override
public void unLock(String path) {
Lock lock = lockMap.remove(path);
if (lock != null) {
lock.release();
}
}
public class Lock {
InterProcessSemaphoreMutex mutex;
String path;
public Lock(InterProcessSemaphoreMutex mutex, String path) {
this.mutex = mutex;
this.path = path;
}
public void release() {
lockMap.remove(path);
try {
mutex.release();
} catch (Exception e) {
logger.warn("release path {} lock error {}", path, e.getMessage());
}
}
public boolean isAcquiredInThisProcess() {
return mutex.isAcquiredInThisProcess();
}
}
@Override
public void close() {
connectionListenerMap.clear();
connectionStateListeners.clear();
for (NodeCache nodeCache : dataWatchers.values()) {
try {
nodeCache.close();
} catch (Exception e) {
logger.warn("close node cache error", e);
}
}
dataWatchers.clear();
for (PathChildrenCache pathChildrenCache : childrenWatcher.values()) {
try {
pathChildrenCache.close();
} catch (IOException e) {
logger.warn("close children cache error", e);
}
}
childrenWatcher.clear();
releaseLocks();
curator.close();
executor.shutdown();
}
private void releaseLocks() {
for (Lock lock : lockMap.values()) {
lock.release();
}
lockMap.clear();
}
}

View File

@@ -1,165 +0,0 @@
package com.xiaojukeji.kafka.manager.common.utils.zk;
import java.util.HashMap;
import java.util.Map;
/**
* 存储结构:
*
* <pre>
* /consumers
* consumer-group
* ids
* consumerId
* offsets
* topic-0
* 0(partition编号节点内容表示)
* 1
* 2
* topic-1
* owners
* /brokers
* topics
* topic-0 (节点内容是 ("0",[0,1,2]))
* partitions
* 0
* state节点内容是leader的brokerId同步副本信息等
* 1
* 2
* topic-x
* ids
* 1(临时节点broker编号节点信息为broker相关信息如JMX端口host和port等)
* 2
* n
* </pre>
*
* @author tukun @ 2015-11-5
* @version 1.0.0
*/
public class ZkPathUtil {
public static final String ZOOKEEPER_SEPARATOR = "/";
public static final String BROKER_ROOT_NODE = ZOOKEEPER_SEPARATOR + "brokers";
public static final String CONTROLLER_ROOT_NODE = ZOOKEEPER_SEPARATOR + "controller";
public static final String BROKER_IDS_ROOT = BROKER_ROOT_NODE
+ ZOOKEEPER_SEPARATOR + "ids";
public static final String BROKER_TOPICS_ROOT = BROKER_ROOT_NODE
+ ZOOKEEPER_SEPARATOR + "topics";
public static final String CONSUMER_ROOT_NODE = ZOOKEEPER_SEPARATOR + "consumers";
public static final String CONFIG_ROOT_NODE = ZOOKEEPER_SEPARATOR + "config";
public static final String CONFIG_TOPICS_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "topics";
//存储监控的参数name到获取的object_name的映射关系图
private static Map<String, String> zkPathMap = new HashMap<String, String>();
static {
zkPathMap.put("ConusmerPartitionOffset", CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR
+ "${consumerGroup}" + ZOOKEEPER_SEPARATOR
+ "offsets" + ZOOKEEPER_SEPARATOR + "${topic}"
+ ZOOKEEPER_SEPARATOR + "${partition}");
}
//for broker目录
public static String getBrokerIdNodePath(long brokerId) {
return String.format(BROKER_IDS_ROOT + ZOOKEEPER_SEPARATOR + "%d", brokerId);
}
public static String getBrokerTopicRoot(String topic) {
return BROKER_TOPICS_ROOT + ZOOKEEPER_SEPARATOR + topic;
}
public static String getBrokerTopicPartitionRoot(String topic) {
return BROKER_TOPICS_ROOT + ZOOKEEPER_SEPARATOR + topic + ZOOKEEPER_SEPARATOR
+ "partitions";
}
public static String getBrokerTopicPartitionStatePath(String topic, int partitionId) {
return String.format(getBrokerTopicPartitionRoot(topic) + ZOOKEEPER_SEPARATOR + "%d"
+ ZOOKEEPER_SEPARATOR + "state", partitionId);
}
//for consumer
public static String getConsumerTopicPartitionOffsetNodePath(String consumerGroup,
String topic, int partitionId) {
return String.format(CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + "%s" + ZOOKEEPER_SEPARATOR
+ "offset" + "%s" + "%d", consumerGroup, topic, partitionId);
}
public static String getConsumerGroupRoot(String consumerGroup) {
return CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + consumerGroup;
}
public static String getConsumerGroupIdsRoot(String consumerGroup) {
return CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + consumerGroup + ZOOKEEPER_SEPARATOR
+ "ids";
}
public static String getConsumerGroupOffsetRoot(String consumerGroup) {
return CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + consumerGroup + ZOOKEEPER_SEPARATOR
+ "offsets";
}
public static String getConsumerGroupOwnersRoot(String consumerGroup) {
return CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + consumerGroup + ZOOKEEPER_SEPARATOR
+ "owners";
}
public static String getConsumerGroupConsumerIdsNodePath(String consumerGroup, String consumerId) {
return getConsumerGroupIdsRoot(consumerGroup) + ZOOKEEPER_SEPARATOR + consumerId;
}
public static String getConsumerGroupOffsetTopicNode(String consumerGroup, String topic) {
return getConsumerGroupOffsetRoot(consumerGroup) + ZOOKEEPER_SEPARATOR + topic;
}
public static String getConsumerGroupOffsetTopicPartitionNode(String consumerGroup,
String topic, int partitionId) {
return getConsumerGroupOffsetTopicNode(consumerGroup, topic) + ZOOKEEPER_SEPARATOR
+ partitionId;
}
public static String getConsumerGroupOwnersTopicNode(String consumerGroup, String topic) {
return getConsumerGroupOwnersRoot(consumerGroup) + ZOOKEEPER_SEPARATOR + topic;
}
public static String getConsumerGroupOwnersTopicPartitionNode(String consumerGroup,
String topic, int partitionId) {
return getConsumerGroupOwnersTopicNode(consumerGroup, topic) + ZOOKEEPER_SEPARATOR
+ partitionId;
}
public static String getConfigTopicNode(String topicName) {
return CONFIG_TOPICS_ROOT_NODE + ZOOKEEPER_SEPARATOR + topicName;
}
public static String parseLastPartFromZkPath(String zkPath) {
return zkPath.substring(zkPath.lastIndexOf("/") + 1);
}
public static Map<String, String> getZkPathMap() {
return zkPathMap;
}
public static void setZkPathMap(Map<String, String> zkPathMap) {
ZkPathUtil.zkPathMap = zkPathMap;
}
public static String getControllerRootNode() {
return CONTROLLER_ROOT_NODE;
}
public static String getEntityConfigPath(String entityType, String entity) {
return getEntityConfigRootPath(entityType) + "/" + entity;
}
public static String getEntityConfigRootPath(String entityType) {
return CONFIG_ROOT_NODE + "/" + entityType;
}
}

9535
console/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,47 +0,0 @@
{
"name": "mobx-ts-example",
"version": "1.0.0",
"description": "",
"scripts": {
"start": "webpack-dev-server",
"daily-build": "cross-env NODE_ENV=production webpack",
"pre-build": "cross-env NODE_ENV=production webpack",
"prod-build": "cross-env NODE_ENV=production webpack"
},
"author": "",
"license": "ISC",
"devDependencies": {
"@hot-loader/react-dom": "^16.8.6",
"@types/echarts": "^4.1.9",
"@types/react": "^16.8.8",
"@types/react-dom": "^16.8.2",
"@types/react-router-dom": "^4.3.1",
"antd": "^3.16.1",
"clean-webpack-plugin": "^3.0.0",
"cross-env": "^7.0.2",
"css-loader": "^2.1.0",
"echarts": "^4.2.1",
"file-loader": "^5.0.2",
"html-webpack-plugin": "^3.2.0",
"less": "^3.9.0",
"less-loader": "^4.1.0",
"mini-css-extract-plugin": "^0.6.0",
"mobx": "^5.9.4",
"mobx-react": "^5.4.3",
"moment": "^2.24.0",
"optimize-css-assets-webpack-plugin": "^5.0.1",
"react": "^16.8.4",
"react-hot-loader": "^4.8.4",
"react-router-dom": "^5.0.0",
"style-loader": "^0.23.1",
"terser-webpack-plugin": "^1.2.3",
"ts-loader": "^5.3.3",
"tsconfig-paths-webpack-plugin": "^3.2.0",
"tslint": "^5.13.1",
"tslint-react": "^3.6.0",
"typescript": "^3.3.3333",
"webpack": "^4.29.6",
"webpack-cli": "^3.2.3",
"webpack-dev-server": "^3.2.1"
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.5 KiB

View File

@@ -1,5 +0,0 @@
declare module '*.svg';
declare module '*.png';
declare module '*.jpg';
declare module '*.jpeg';
declare module '*.gif';

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 117 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 KiB

View File

@@ -1,104 +0,0 @@
import message from 'antd/es/message';
import 'antd/es/message/style';
import Input from 'antd/es/input';
import 'antd/es/input/style';
import InputNumber from 'antd/es/input-number';
import 'antd/es/input-number/style';
import Table from 'antd/es/table';
import 'antd/es/table/style';
import Tabs from 'antd/es/tabs';
import 'antd/es/tabs/style';
import Select from 'antd/es/select';
import 'antd/es/select/style';
import DatePicker from 'antd/es/date-picker';
import 'antd/es/date-picker/style';
import Button from 'antd/es/button';
import 'antd/es/button/style';
import Modal from 'antd/es/modal';
import 'antd/es/modal/style';
import Form from 'antd/es/form';
import 'antd/es/form/style';
import Row from 'antd/es/row';
import 'antd/es/row/style';
import Col from 'antd/es/col';
import 'antd/es/col/style';
import Switch from 'antd/es/switch';
import 'antd/es/switch/style';
import Alert from 'antd/es/alert';
import 'antd/es/alert/style';
import { PaginationConfig, ColumnProps } from 'antd/es/table/interface';
import notification from 'antd/es/notification';
import 'antd/es/notification/style';
import Tooltip from 'antd/es/tooltip';
import 'antd/es/tooltip/style';
import Radio from 'antd/es/radio';
import 'antd/es/radio';
import { RadioChangeEvent } from 'antd/es/radio';
import Collapse from 'antd/es/collapse';
import 'antd/es/collapse/style';
import Icon from 'antd/es/icon';
import 'antd/es/icon/style';
import Dropdown from 'antd/es/dropdown';
import 'antd/es/dropdown/style';
import Spin from 'antd/es/spin';
import 'antd/es/spin/style';
import Drawer from 'antd/es/drawer';
import 'antd/es/drawer/style';
import Checkbox from 'antd/es/checkbox';
import 'antd/es/checkbox/style';
import Affix from 'antd/es/affix';
import 'antd/es/affix/style';
export {
PaginationConfig,
notification,
ColumnProps,
DatePicker,
message,
Tooltip,
Button,
Select,
Switch,
Modal,
Input,
Table,
Radio,
Alert,
Tabs,
Form,
Row,
Col,
RadioChangeEvent,
InputNumber,
Collapse,
Icon,
Dropdown,
Spin,
Drawer,
Checkbox,
Affix,
};

View File

@@ -1,82 +0,0 @@
import * as React from 'react';
import { Table } from 'component/antd';
interface IFlow {
key: string;
avr: number;
pre1: number;
pre5: number;
pre15: number;
}
const flowColumns = [{
title: '名称',
dataIndex: 'key',
key: 'name',
sorter: (a: IFlow, b: IFlow) => a.key.charCodeAt(0) - b.key.charCodeAt(0),
render(t: string) {
return t === 'byteRejected' ? 'byteRejected(B/s)' : (t === 'byteIn' || t === 'byteOut' ? `${t}(KB/s)` : t);
},
},
{
title: '平均数',
dataIndex: 'avr',
key: 'partition_num',
sorter: (a: IFlow, b: IFlow) => a.avr - b.avr,
},
{
title: '前1分钟',
dataIndex: 'pre1',
key: 'byte_input',
sorter: (a: IFlow, b: IFlow) => a.pre1 - b.pre1,
},
{
title: '前5分钟',
dataIndex: 'pre5',
key: 'byte_output',
sorter: (a: IFlow, b: IFlow) => a.pre5 - b.pre5,
},
{
title: '前15分钟',
dataIndex: 'pre15',
key: 'message',
sorter: (a: IFlow, b: IFlow) => a.pre15 - b.pre15,
}];
export interface IFlowInfo {
byteIn: number[];
byteOut: number[];
byteRejected: number[];
failedFetchRequest: number[];
failedProduceRequest: number[];
messageIn: number[];
[key: string]: number[];
}
export class StatusGraghCom<T extends IFlowInfo> extends React.Component {
public getData(): T {
return null;
}
public render() {
const statusData = this.getData();
if (!statusData) return null;
const data: any[] = [];
Object.keys(statusData).map((key) => {
const v = key === 'byteIn' || key === 'byteOut' ? statusData[key].map(i => (i / 1024).toFixed(2)) :
statusData[key].map(i => i.toFixed(2));
const obj = {
key,
avr: v[0],
pre1: v[1],
pre5: v[2],
pre15: v[3],
};
data.push(obj);
});
return (
<Table columns={flowColumns} dataSource={data} pagination={false} />
);
}
}

Some files were not shown because too many files have changed in this diff Show More