Compare commits

..

53 Commits

Author SHA1 Message Date
EricZeng
b74612fa41 Merge pull request #134 from didi/dev_2.2.0
merge dev 2.2.0
2021-01-25 17:30:26 +08:00
EricZeng
22e0c20dcd Merge pull request #133 from lucasun/dev_2.2.0_fe
fix txt
2021-01-25 17:21:42 +08:00
孙超
08f92e1100 fix txt 2021-01-25 17:02:07 +08:00
zengqiao
bb12ece46e modify zk example 2021-01-25 17:01:54 +08:00
EricZeng
0065438305 Merge pull request #132 from lucasun/dev_2.2.0_fe
add fe page
2021-01-25 16:47:02 +08:00
孙超
7f115c1b3e add fe page 2021-01-25 15:34:07 +08:00
EricZeng
0ef64fa4bd Merge pull request #126 from ZHAOYINRUI/patch-8
Create alarm_rules.md
2021-01-25 11:09:21 +08:00
ZHAOYINRUI
84dbc17c22 Update alarm_rules.md 2021-01-25 11:04:30 +08:00
EricZeng
16e16e356d Merge pull request #130 from xuehaipeng/patch-1
Update faq.md
2021-01-25 10:35:12 +08:00
xuehaipeng
978ee885c4 Update faq.md 2021-01-24 20:06:29 +08:00
zengqiao
850d43df63 add v2.2.0 feature & fix 2021-01-23 13:19:29 +08:00
zengqiao
fc109fd1b1 bump version to 2.2.0 2021-01-23 12:41:38 +08:00
EricZeng
2829947b93 Merge pull request #129 from didi/master
merge master
2021-01-23 11:09:52 +08:00
EricZeng
0c2af89a1c Merge pull request #125 from ZHAOYINRUI/patch-7
create kafka_metrics_desc.md
2021-01-23 11:03:14 +08:00
EricZeng
14c2dc9624 update kafka_metrics.md 2021-01-23 11:01:44 +08:00
EricZeng
4f35d710a6 Update and rename metric.md to kafka_metrics_desc.md 2021-01-23 10:58:11 +08:00
EricZeng
fdb5e018e5 Merge pull request #122 from ZHAOYINRUI/patch-4
Update README.md
2021-01-23 10:51:26 +08:00
EricZeng
6001fde25c Update dynamic_config_manager.md 2021-01-23 10:21:47 +08:00
EricZeng
ae63c0adaf Merge pull request #128 from didi/dev
add sync topic to db doc
2021-01-23 10:20:27 +08:00
zengqiao
ad1539c8f6 add sync topic to db doc 2021-01-23 10:17:59 +08:00
EricZeng
634a0c8cd0 Update faq.md 2021-01-22 20:42:13 +08:00
ZHAOYINRUI
773f9a0c63 Create alarm_rules.md 2021-01-22 18:16:51 +08:00
ZHAOYINRUI
e4e320e9e3 Create metric.md 2021-01-22 18:06:35 +08:00
ZHAOYINRUI
3b4b400e6b Update README.md 2021-01-22 15:56:53 +08:00
mike.zhangliang
f3a5e3f5ed Update README.md 2021-01-18 19:06:43 +08:00
mike.zhangliang
e685e621f3 Update README.md 2021-01-18 19:05:44 +08:00
EricZeng
2cd2be9b67 Merge pull request #112 from didi/dev
监控告警系统对接说明文档
2021-01-17 18:21:16 +08:00
zengqiao
e73d9e8a03 add monitor_system_integrate_with_self file 2021-01-17 18:18:07 +08:00
zengqiao
476f74a604 rename file 2021-01-17 16:49:02 +08:00
EricZeng
ab0d1d99e6 Merge pull request #111 from didi/dev
Dev
2021-01-17 16:11:08 +08:00
zengqiao
d5680ffd5d 增加Topic同步任务&Bug修复 2021-01-16 16:26:38 +08:00
EricZeng
3c091a88d4 Merge pull request #110 from didi/master
合并master分支上的改动
2021-01-16 13:37:31 +08:00
EricZeng
49b70b33de Merge pull request #108 from didi/dev
增加application.yml文件说明 & 修改版本
2021-01-16 13:34:07 +08:00
zengqiao
c5ff2716fb 优化build.sh & yaml 2021-01-16 12:39:56 +08:00
ZQKC
400fdf0896 修复图片地址错误问题
修复图片地址错误问题
2021-01-16 12:04:20 +08:00
ZQKC
cbb8c7323c Merge pull request #109 from ZHAOYINRUI/master
架构图更新、钉钉群ID更新
2021-01-16 09:33:19 +08:00
ZHAOYINRUI
60e79f8f77 Update README.md 2021-01-16 00:25:06 +08:00
ZHAOYINRUI
0e829d739a Add files via upload 2021-01-16 00:22:31 +08:00
ZQKC
62abb274e0 增加application.yml文件说明
增加application.yml文件说明
2021-01-15 19:14:48 +08:00
ZQKC
e4028785de Update README.md
change km address
2021-01-09 15:30:30 +08:00
mrazkong
2bb44bcb76 Update Intergration_n9e_monitor.md 2021-01-07 17:09:15 +08:00
mike.zhangliang
684599f81b Update README.md 2021-01-07 15:44:17 +08:00
mike.zhangliang
b56d28f5df Update README.md 2021-01-07 15:43:07 +08:00
ZHAOYINRUI
02b9ac04c8 Update user_guide_cn.md 2020-12-30 22:44:23 +08:00
ZQKC
abb652ebd5 Merge pull request #104 from didi/dev
v2.1版本合并
2020-12-19 01:14:26 +08:00
ZQKC
ff78a9cc35 Merge pull request #101 from didi/dev
use mysql 8
2020-12-11 11:49:06 +08:00
ZQKC
aea63cad52 Merge pull request #94 from didi/dev
增加FAQ
2020-11-22 21:49:48 +08:00
ZQKC
dd6069e41a Merge pull request #93 from didi/dev
夜莺Mon集成配置说明
2020-11-22 20:09:34 +08:00
ZQKC
4d9a327b1f Merge pull request #92 from didi/dev
FIX N9e Mon
2020-11-22 18:15:49 +08:00
ZQKC
76c2477387 Merge pull request #91 from didi/dev
修复上报夜莺功能
2020-11-22 17:00:39 +08:00
ZQKC
edfd84a8e3 Merge pull request #88 from didi/dev
增加build.sh
2020-11-15 17:02:26 +08:00
ZQKC
abbe47f6b9 Merge pull request #87 from didi/dev
初始化SQL优化&KCM修复&连接信息修复
2020-11-15 16:55:42 +08:00
ZQKC
f70cfabede Merge pull request #84 from didi/dev
fix 前端资源加载问题
2020-11-14 16:56:16 +08:00
177 changed files with 3464 additions and 882 deletions

View File

@@ -9,6 +9,8 @@
## 主要功能特性
### 快速体验
- 体验地址 http://117.51.146.109:8080 账号密码 admin/admin
### 集群监控维度
@@ -32,7 +34,7 @@
## kafka-manager架构图
![kafka-manager-arch](./docs/assets/images/common/arch.png)
![kafka-manager-arch](https://img-ys011.didistatic.com/static/dicloudpub/do1_xgDHNDLj2ChKxctSuf72)
## 相关文档
@@ -45,13 +47,17 @@
## 钉钉交流群
![dingding_group](./docs/assets/images/common/dingding_group.jpg)
钉钉群ID32821440
## OCE认证
OCE是一个认证机制和交流平台为Logi-KafkaManager生产用户量身打造我们会为OCE企业提供更好的技术支持比如专属的技术沙龙、企业一对一的交流机会、专属的答疑群等如果贵司Logi-KafkaManager上了生产[快来加入吧](http://obsuite.didiyun.com/open/openAuth)
## 项目成员
### 内部核心人员
`iceyuhui``liuyaguang``limengmonty``zhangliangmike``nullhuangyiming``zengqiao``eilenexuzhe``huangjiaweihjw`
`iceyuhui``liuyaguang``limengmonty``zhangliangmike``nullhuangyiming``zengqiao``eilenexuzhe``huangjiaweihjw``zhaoyinrui``marzkonglingxu``joysunchao`
### 外部贡献者

View File

@@ -3,72 +3,53 @@ workspace=$(cd $(dirname $0) && pwd -P)
cd $workspace
## constant
km_version=2.1.0
app_name=kafka-manager-$km_version
OUTPUT_DIR=./output
KM_VERSION=2.2.0
APP_NAME=kafka-manager
APP_DIR=${APP_NAME}-${KM_VERSION}
gitversion=.gitversion
control=./control.sh
create_mysql_table=./docs/install_guide/create_mysql_table.sql
app_config_file=./kafka-manager-web/src/main/resources/application.yml
MYSQL_TABLE_SQL_FILE=./docs/install_guide/create_mysql_table.sql
CONFIG_FILE=./kafka-manager-web/src/main/resources/application.yml
## function
function build() {
# 进行编译
# # cmd 设置使用的JDK, 按需选择, 默认已安装了JDK 8
# JVERSION=`java -version 2>&1 | awk 'NR==1{gsub(/"/,"");print $3}'`
# major=`echo $JVERSION | awk -F. '{print $1}'`
# mijor=`echo $JVERSION | awk -F. '{print $2}'`
# if [ $major -le 1 ] && [ $mijor -lt 8 ]; then
# export JAVA_HOME=/usr/local/jdk1.8.0_65 #(使用jdk8请设置)
# export PATH=$JAVA_HOME/bin:$PATH
# fi
# 编译命令
mvn -U clean package -Dmaven.test.skip=true
mvn -U clean package -Dmaven.test.skip=true
local sc=$?
if [ $sc -ne 0 ];then
## 编译失败, 退出码为 非0
echo "$app_name build error"
echo "$APP_NAME build error"
exit $sc
else
echo -n "$app_name build ok, vsn="`gitversion`
echo "$APP_NAME build ok"
fi
}
function make_output() {
# 新建output目录
rm -rf $app_name &>/dev/null
mkdir -p $app_name &>/dev/null
# 新建output目录
rm -rf ${OUTPUT_DIR} &>/dev/null
mkdir -p ${OUTPUT_DIR}/${APP_DIR} &>/dev/null
# 填充output目录, output内的内容 即为 线上部署内容
(
# cp -rf $control $output_dir && # 拷贝 control.sh 脚本 至output目录
cp -rf $create_mysql_table $app_name && # 拷贝 sql 初始化脚本 至output目录
cp -rf $app_config_file $app_name && # 拷贝 application.yml 至output目录
# 填充output目录, output内的内容
(
cp -rf ${MYSQL_TABLE_SQL_FILE} ${OUTPUT_DIR}/${APP_DIR} && # 拷贝 sql 初始化脚本 至output目录
cp -rf ${CONFIG_FILE} ${OUTPUT_DIR}/${APP_DIR} && # 拷贝 application.yml 至output目录
# 拷贝程序包到output路径
cp kafka-manager-web/target/kafka-manager-web-$km_version-SNAPSHOT.jar ${app_name}/${app_name}-SNAPSHOT.jar
echo -e "make output ok."
) || { echo -e "make output error"; exit 2; } # 填充output目录失败后, 退出码为 非0
# 拷贝程序包到output路径
cp kafka-manager-web/target/kafka-manager-web-${KM_VERSION}-SNAPSHOT.jar ${OUTPUT_DIR}/${APP_DIR}/${APP_NAME}.jar
echo -e "make output ok."
) || { echo -e "make output error"; exit 2; } # 填充output目录失败后, 退出码为 非0
}
function make_package() {
# 压缩output目录
(
tar cvzf ${app_name}.tar.gz ${app_name}
echo -e "make package ok."
cd ${OUTPUT_DIR} && tar cvzf ${APP_DIR}.tar.gz ${APP_DIR}
echo -e "make package ok."
) || { echo -e "make package error"; exit 2; } # 压缩output目录失败后, 退出码为 非0
}
## internals
function gitversion() {
git log -1 --pretty=%h > $gitversion
local gv=`cat $gitversion`
echo "$gv"
}
##########################################
## main
## 其中,
@@ -88,4 +69,4 @@ make_package
# 编译成功
echo -e "build done"
exit 0
exit 0

Binary file not shown.

After

Width:  |  Height:  |  Size: 270 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 589 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 652 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 511 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 672 KiB

View File

@@ -0,0 +1,65 @@
---
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# 动态配置管理
## 1、Topic定时同步任务
### 1.1、配置的用途
`Logi-KafkaManager`在设计上,所有的资源都是挂在应用(app)下面。 如果接入的Kafka集群已经存在Topic了那么会导致这些Topic不属于任何的应用从而导致很多管理上的不便。
因此需要有一个方式将这些无主的Topic挂到某个应用下面。
这里提供了一个配置会定时自动将集群无主的Topic挂到某个应用下面下面。
### 1.2、相关实现
就是一个定时任务,该任务会定期做同步的工作。具体代码的位置在`com.xiaojukeji.kafka.manager.task.dispatch.op`包下面的`SyncTopic2DB`类。
### 1.3、配置说明
**步骤一:开启该功能**
在application.yml文件中增加如下配置已经有该配置的话直接把false修改为true即可
```yml
# 任务相关的开关
task:
op:
sync-topic-enabled: true # 无主的Topic定期同步到DB中
```
**步骤二:配置管理中指定挂在那个应用下面**
配置的位置:
![sync_topic_to_db](./assets/dynamic_config_manager/sync_topic_to_db.jpg)
配置键:`SYNC_TOPIC_2_DB_CONFIG_KEY`
配置值(JSON数组)
- clusterId需要进行定时同步的集群ID
- defaultAppId该集群无主的Topic将挂在哪个应用下面
- addAuthority是否需要加上权限, 默认是false。因为考虑到这个挂载只是临时的我们不希望用户使用这个App同时后续可能移交给真正的所属的应用因此默认是不加上权限。
**注意这里的集群ID或者是应用ID不存在的话会导致配置不生效。该任务对已经在DB中的Topic不会进行修改**
```json
[
{
"clusterId": 1234567,
"defaultAppId": "ANONYMOUS",
"addAuthority": false
},
{
"clusterId": 7654321,
"defaultAppId": "ANONYMOUS",
"addAuthority": false
}
]
```

View File

@@ -7,7 +7,7 @@
---
# 夜莺监控集成
# 监控系统集成——夜莺
- `Kafka-Manager`通过将 监控的数据 以及 监控的规则 都提交给夜莺,然后依赖夜莺的监控系统从而实现监控告警功能。
@@ -22,10 +22,13 @@ monitor:
n9e:
nid: 2
user-token: 123456
# 夜莺 mon监控服务 地址
mon:
base-url: http://127.0.0.1:8032
sink:
base-url: http://127.0.0.1:8006
# 夜莺 transfer上传服务 地址
sink:
base-url: http://127.0.0.1:8008
# 夜莺 rdb资源服务 地址
rdb:
base-url: http://127.0.0.1:80

View File

@@ -0,0 +1,54 @@
---
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# 监控系统集成
- 监控系统默认与 [夜莺] (https://github.com/didi/nightingale) 进行集成;
- 对接自有的监控系统需要进行简单的二次开发,即实现部分监控告警模块的相关接口即可;
- 集成会有两块内容,一个是指标数据上报的集成,还有一个是监控告警规则的集成;
## 1、指标数据上报集成
仅完成这一步的集成之后,即可将监控数据上报到监控系统中,此时已能够在自己的监控系统进行监控告警规则的配置了。
**步骤一:实现指标上报的接口**
- 按照自己内部监控系统的数据格式要求,将数据进行组装成符合自己内部监控系统要求的数据进行上报,具体的可以参考夜莺集成的实现代码。
- 至于会上报哪些指标,可以查看有哪些地方调用了该接口。
![sink_metrics](./assets/monitor_system_integrate_with_self/sink_metrics.jpg)
**步骤二:相关配置修改**
![change_config](./assets/monitor_system_integrate_with_self/change_config.jpg)
**步骤三:开启上报任务**
![open_sink_schedule](./assets/monitor_system_integrate_with_self/open_sink_schedule.jpg)
## 2、监控告警规则集成
完成**1、指标数据上报集成**之后,即可在自己的监控系统进行监控告警规则的配置了。完成该步骤的集成之后,可以在`Logi-KafkaManager`中进行监控告警规则的增删改查等等。
大体上和**1、指标数据上报集成**一致,
**步骤一:实现相关接口**
![integrate_ms](./assets/monitor_system_integrate_with_self/integrate_ms.jpg)
实现完成步骤一之后,接下来的步骤和**1、指标数据上报集成**中的步骤二、步骤三一致,都需要进行相关配置的修改即可。
## 3、总结
简单介绍了一下监控告警的集成,嫌麻烦的同学可以仅做 **1、指标数据上报集成** 这一节的内容即可满足一定场景下的需求。
**集成过程中有任何觉得文档没有说清楚的地方或者建议欢迎入群交流也欢迎贡献代码觉得好也辛苦给个star。**

View File

@@ -0,0 +1,27 @@
---
![kafka-manager-logo](../../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# 升级至`2.2.0`版本
`2.2.0`版本在`cluster`表及`logical_cluster`各增加了一个字段因此需要执行下面的sql进行字段的增加。
```sql
# cluster表中增加jmx_properties字段, 这个字段会用于存储jmx相关的认证以及配置信息
ALTER TABLE `cluster` ADD COLUMN `jmx_properties` TEXT NULL COMMENT 'JMX配置' AFTER `security_properties`;
# logical_cluster中增加identification字段, 同时数据和原先name数据相同, 最后增加一个唯一键.
# 此后, name字段还是表示集群名称, identification字段表示的是集群标识, 只能是字母数字及下划线组成,
# 数据上报到监控系统时, 集群这个标识采用的字段就是identification字段, 之前使用的是name字段.
ALTER TABLE `logical_cluster` ADD COLUMN `identification` VARCHAR(192) NOT NULL DEFAULT '' COMMENT '逻辑集群标识' AFTER `name`;
UPDATE `logical_cluster` SET `identification`=`name` WHERE id>=0;
ALTER TABLE `logical_cluster` ADD INDEX `uniq_identification` (`identification` ASC);
```

View File

@@ -0,0 +1,104 @@
---
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# 配置说明
```yaml
server:
port: 8080 # 服务端口
tomcat:
accept-count: 1000
max-connections: 10000
max-threads: 800
min-spare-threads: 100
spring:
application:
name: kafkamanager
datasource:
kafka-manager: # 数据库连接配置
jdbc-url: jdbc:mysql://127.0.0.1:3306/kafka_manager?characterEncoding=UTF-8&serverTimezone=GMT%2B8 #数据库的地址
username: admin # 用户名
password: admin # 密码
driver-class-name: com.mysql.jdbc.Driver
main:
allow-bean-definition-overriding: true
profiles:
active: dev # 启用的配置
servlet:
multipart:
max-file-size: 100MB
max-request-size: 100MB
logging:
config: classpath:logback-spring.xml
custom:
idc: cn # 部署的数据中心, 忽略该配置, 后续会进行删除
jmx:
max-conn: 10 # 和单台 broker 的最大JMX连接数
store-metrics-task:
community:
broker-metrics-enabled: true # 社区部分broker metrics信息收集开关, 关闭之后metrics信息将不会进行收集及写DB
topic-metrics-enabled: true # 社区部分topic的metrics信息收集开关, 关闭之后metrics信息将不会进行收集及写DB
didi:
app-topic-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标因此默认关闭
topic-request-time-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标因此默认关闭
topic-throttled-metrics: false # 滴滴埋入的指标, 社区AK不存在该指标因此默认关闭
save-days: 7 #指标在DB中保持的天数-1表示永久保存7表示保存近7天的数据
# 任务相关的开关
task:
op:
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
account: # ldap相关的配置, 社区版本暂时支持不够完善,可以先忽略,欢迎贡献代码对这块做优化
ldap:
kcm: # 集群升级部署相关的功能需要配合夜莺及S3进行使用这块我们后续专门补充一个文档细化一下牵扯到kcm_script.sh脚本的修改
enabled: false # 默认关闭
storage:
base-url: http://127.0.0.1 # 存储地址
n9e:
base-url: http://127.0.0.1:8004 # 夜莺任务中心的地址
user-token: 12345678 # 夜莺用户的token
timeout: 300 # 集群任务的超时时间,单位秒
account: root # 集群任务使用的账号
script-file: kcm_script.sh # 集群任务的脚本
monitor: # 监控告警相关的功能,需要配合夜莺进行使用
enabled: false # 默认关闭true就是开启
n9e:
nid: 2
user-token: 1234567890
mon:
# 夜莺 mon监控服务 地址
base-url: http://127.0.0.1:8032
sink:
# 夜莺 transfer上传服务 地址
base-url: http://127.0.0.1:8006
rdb:
# 夜莺 rdb资源服务 地址
base-url: http://127.0.0.1:80
# enabled: 表示是否开启监控告警的功能, true: 开启, false: 不开启
# n9e.nid: 夜莺的节点ID
# n9e.user-token: 用户的密钥,在夜莺的个人设置中
# n9e.mon.base-url: 监控地址
# n9e.sink.base-url: 数据上报地址
# n9e.rdb.base-url: 用户资源中心地址
notify: # 通知的功能
kafka: # 默认通知发送到kafka的指定Topic中
cluster-id: 95 # Topic的集群ID
topic-name: didi-kafka-notify # Topic名称
order: # 部署的KM的地址
detail-url: http://127.0.0.1
```

View File

@@ -1,3 +1,8 @@
-- create database
CREATE DATABASE logi_kafka_manager;
USE logi_kafka_manager;
--
-- Table structure for table `account`
--
@@ -104,7 +109,8 @@ CREATE TABLE `cluster` (
`zookeeper` varchar(512) NOT NULL DEFAULT '' COMMENT 'zk地址',
`bootstrap_servers` varchar(512) NOT NULL DEFAULT '' COMMENT 'server地址',
`kafka_version` varchar(32) NOT NULL DEFAULT '' COMMENT 'kafka版本',
`security_properties` text COMMENT '安全认证参数',
`security_properties` text COMMENT 'Kafka安全认证参数',
`jmx_properties` text COMMENT 'JMX配置',
`status` tinyint(4) NOT NULL DEFAULT '1' COMMENT ' 监控标记, 0表示未监控, 1表示监控中',
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
@@ -302,20 +308,22 @@ INSERT INTO kafka_user(app_id, password, user_type, operation) VALUES ('dkm_admi
-- Table structure for table `logical_cluster`
--
-- DROP TABLE IF EXISTS `logical_cluster`;
CREATE TABLE `logical_cluster` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
`name` varchar(192) NOT NULL DEFAULT '' COMMENT '逻辑集群名称',
`mode` int(16) NOT NULL DEFAULT '0' COMMENT '逻辑集群类型, 0:共享集群, 1:独享集群, 2:独立集群',
`app_id` varchar(64) NOT NULL DEFAULT '' COMMENT '所属应用',
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
`region_list` varchar(256) NOT NULL DEFAULT '' COMMENT 'regionid列表',
`description` text COMMENT '备注说明',
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uniq_name` (`name`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='逻辑集群信息表';
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
`name` varchar(192) NOT NULL DEFAULT '' COMMENT '逻辑集群名称',
`identification` varchar(192) NOT NULL DEFAULT '' COMMENT '逻辑集群标识',
`mode` int(16) NOT NULL DEFAULT '0' COMMENT '逻辑集群类型, 0:共享集群, 1:独享集群, 2:独立集群',
`app_id` varchar(64) NOT NULL DEFAULT '' COMMENT '所属应用',
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
`region_list` varchar(256) NOT NULL DEFAULT '' COMMENT 'regionid列表',
`description` text COMMENT '备注说明',
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uniq_name` (`name`),
UNIQUE KEY `uniq_identification` (`identification`)
) ENGINE=InnoDB AUTO_INCREMENT=7 DEFAULT CHARSET=utf8 COMMENT='逻辑集群信息表';
--
-- Table structure for table `monitor_rule`

View File

@@ -9,19 +9,39 @@
# 安装手册
## 1、环境依赖
## 环境依赖
如果是以Release包进行安装的则仅安装`Java``MySQL`即可。如果是要先进行源码包进行打包,然后再使用,则需要安装`Maven``Node`环境。
- `Maven 3.5+`(后端打包依赖)
- `node v12+`(前端打包依赖)
- `Java 8+`(运行环境需要)
- `MySQL 5.7`(数据存储)
- `Maven 3.5+`(后端打包依赖)
- `Node 10+`(前端打包依赖)
---
## 环境初始化
## 2、获取安装包
执行[create_mysql_table.sql](create_mysql_table.sql)中的SQL命令从而创建所需的MySQL库及表默认创建的库名是`kafka_manager`
**1、Release直接下载**
这里如果觉得麻烦然后也不想进行二次开发则可以直接下载Release包下载地址[Github Release包下载地址](https://github.com/didi/Logi-KafkaManager/releases)
如果觉得Github的下载地址太慢了也可以进入`Logi-KafkaManager`的用户群获取群地址在README中。
**2、源代码进行打包**
下载好代码之后,进入`Logi-KafkaManager`的主目录,执行`sh build.sh`命令即可,执行完成之后会在`output/kafka-manager-xxx`目录下面生成一个jar包。
对于`windows`环境的用户,估计执行不了`sh build.sh`命令,因此可以直接执行`mvn install`,然后在`kafka-manager-web/target`目录下生成一个kafka-manager-web-xxx.jar的包。
获取到jar包之后我们继续下面的步骤。
---
## 3、MySQL-DB初始化
执行[create_mysql_table.sql](create_mysql_table.sql)中的SQL命令从而创建所需的MySQL库及表默认创建的库名是`logi_kafka_manager`
```
# 示例:
@@ -30,29 +50,15 @@ mysql -uXXXX -pXXX -h XXX.XXX.XXX.XXX -PXXXX < ./create_mysql_table.sql
---
## 打包
```bash
# 一次性打包
cd ..
mvn install
## 4、启动
```
# application.yml 是配置文件最简单的是仅修改MySQL相关的配置即可启动
---
## 启动
```
# application.yml 是配置文件
cp kafka-manager-web/src/main/resources/application.yml kafka-manager-web/target/
cd kafka-manager-web/target/
nohup java -jar kafka-manager-web-2.1.0-SNAPSHOT.jar --spring.config.location=./application.yml > /dev/null 2>&1 &
nohup java -jar kafka-manager.jar --spring.config.location=./application.yml > /dev/null 2>&1 &
```
## 使用
### 5、使用
本地启动的话,访问`http://localhost:8080`,输入帐号及密码(默认`admin/admin`)进行登录。更多参考:[kafka-manager 用户使用手册](../user_guide/user_guide_cn.md)

View File

@@ -0,0 +1,25 @@
![kafka-manager-logo](../assets/images/common/logo_name.png))
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
## 报警策略-报警函数介绍
| 类别 | 函数 | 含义 |函数文案 |备注 |
| --- | --- | --- | --- | --- |
| 发生次数 |alln | 最近$n个周期内全发生 | 连续发生(all) | |
| 发生次数 | happen, n, m | 最近$n个周期内发生m次 | 出现(happen) | null点也计算在n内 |
| 数学统计 | sum, n | 最近$n个周期取值 的 和 | 求和(sum) | sum_over_time |
| 数学统计 | avg, n | 最近$n个周期取值 的 平均值 | 平均值(avg) | avg_over_time |
| 数学统计 | min, n | 最近$n个周期取值 的 最小值 | 最小值(min) | min_over_time |
| 数学统计 | max, n | 最近$n个周期取值 的 最大值 | 最大值(max | max_over_time |
| 变化率 | pdiff, n | 最近$n个点的变化率, 有一个满足 则触发 | 突增突降率(pdiff) | 假设, 最近3个周期的值分别为 v, v2, v3v为最新值那么计算公式为 any( (v-v2)/v2, (v-v3)/v3 )**区分正负** |
| 变化量 | diff, n | 最近$n个点的变化量, 有一个满足 则触发 | 突增突降值(diff) | 假设, 最近3个周期的值分别为 v, v2, v3v为最新值那么计算公式为 any( (v-v2), (v-v3) )**区分正负** |
| 变化量 | ndiff | 最近n个周期发生m次 v(t) - v(t-1) $OP threshold其中 v(t) 为最新值 | 连续变化(区分正负) - ndiff | |
| 数据中断 | nodata, t | 最近 $t 秒内 无数据上报 | 数据上报中断(nodata) | |
| 同环比 | c_avg_rate_abs, n | 最近$n个周期的取值相比 1天或7天前取值 的变化率 的绝对值 | 同比变化率(c_avg_rate_abs) | 假设最近的n个值为 v1, v2, v3历史取到的对应n'个值为 v1', v2'那么计算公式为abs((avg(v1,v2,v3) / avg(v1',v2') -1)* 100%) |
| 同环比 | c_avg_rate, n | 最近$n个周期的取值相比 1天或7天前取值 的变化率(**区分正负**) | 同比变化率(c_avg_rate) | 假设最近的n个值为 v1, v2, v3历史取到的对应n'个值为 v1', v2'那么计算公式为(avg(v1,v2,v3) / avg(v1',v2') -1)* 100% |

View File

@@ -29,7 +29,7 @@
主要用途是进行大集群的管理 & 集群细节的屏蔽。
- 逻辑集群通过逻辑集群概念将集群Broker按业务进行归类方便管理
- Region通过引入Region同时Topic按Region度创建减少Broker间的连接
- Region通过引入Region同时Topic按Region度创建减少Broker间的连接
---
@@ -53,13 +53,13 @@
- 3、数据库时区问题。
检查MySQL的topic表查看是否有数据如果有数据那么再检查设置的时区是否正确。
检查MySQL的topic_metrics、broker_metrics表,查看是否有数据,如果有数据,那么再检查设置的时区是否正确。
---
### 5、如何对接夜莺的监控告警功能
- 参看 [kafka-manager 对接夜莺监控](../dev_guide/Intergration_n9e_monitor.md) 说明。
- 参看 [kafka-manager 对接夜莺监控](../dev_guide/monitor_system_integrate_with_n9e.md) 说明。
---

View File

@@ -0,0 +1,72 @@
---
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# Topic 指标说明
## 1. 实时流量指标说明
| 指标名称| 单位| 指标含义|
|-- |---- |---|
| messagesIn| 条/s | 每秒发送到kafka的消息条数 |
| byteIn| B/s | 每秒发送到kafka的字节数 |
| byteOut| B/s | 每秒流出kafka的字节数所有消费组消费的流量如果是Kafka版本较低这个还包括副本同步的流量 |
| byteRejected| B/s | 每秒被拒绝的字节数 |
| failedFetchRequest| qps | 每秒拉取失败的请求数 |
| failedProduceRequest| qps | 每秒发送失败的请求数 |
| totalProduceRequest| qps | 每秒总共发送的请求数与messagesIn的区别是一个是发送请求里面可能会有多条消息 |
| totalFetchRequest| qps | 每秒总共拉取消息的请求数 |
&nbsp;
## 2. 历史流量指标说明
| 指标名称| 单位| 指标含义|
|-- |---- |---|
| messagesIn| 条/s | 近一分钟每秒发送到kafka的消息条数 |
| byteIn| B/s | 近一分钟每秒发送到kafka的字节数 |
| byteOut| B/s | 近一分钟每秒流出kafka的字节数所有消费组消费的流量如果是Kafka版本较低副本同步的流量 |
| byteRejected| B/s | 近一分钟每秒被拒绝的字节数 |
| totalProduceRequest| qps | 近一分钟每秒总共发送的请求数与messagesIn的区别是一个是发送请求里面可能会有多条消息 |
&nbsp;
## 3. 实时耗时指标说明
**基于滴滴加强版Kafka引擎的特性可以获取Broker的实时耗时信息和历史耗时信息**
| 指标名称| 单位 | 指标含义 | 耗时高原因 | 解决方案|
|-- |-- |-- |-- |--|
| RequestQueueTimeMs| ms | 请求队列排队时间 | 请求多,服务端处理不过来 | 联系运维人员处理 |
| LocalTimeMs| ms | Broker本地处理时间 | 服务端读写数据慢,可能是读写锁竞争 | 联系运维人员处理 |
| RemoteTimeMs| ms | 请求等待远程完成时间对于发送请求如果ack=-1该时间表示副本同步时间对于消费请求如果当前没有数据该时间为等待新数据时间如果请求的版本与topic存储的版本不同需要做版本转换也会拉高该时间 | 对于生产ack=-1必然会导致该指标耗时高对于消费如果topic数据写入很慢该指标高也正常。如果需要版本转换该指标耗时也会高 | 对于生产可以考虑修改ack=1消费端问题可以联系运维人员具体分析 |
| ThrottleTimeMs| ms | 请求限流时间 | 生产/消费被限流 | 申请提升限流值 |
| ResponseQueueTimeMs| ms | 响应队列排队时间 | 响应多,服务端处理不过来 | 联系运维人员处理 |
| ResponseSendTimeMs| ms | 响应返回客户端时间 | 1下游消费能力差导致向consumer发送数据时写网络缓冲区过慢2消费lag过大一直从磁盘读取数据 | 1:提升客户端消费性能2: 联系运维人员确认是否读取磁盘问题 |
| TotalTimeMs| ms | 接收到请求到完成总时间,理论上该时间等于上述六项时间之和,但由于各时间都是单独统计,总时间只是约等于上述六部分时间之和 | 上面六项有些耗时高 | 具体针对高的指标解决 |
**备注由于kafka消费端实现方式消费端一次会发送多个Fetch请求在接收到一个Response之后就会开始处理数据使Broker端返回其他Response等待因此ResponseSendTimeMs并不完全是服务端发送时间有时会包含一部分消费端处理数据时间**
## 4. 历史耗时指标说明
**基于滴滴加强版Kafka引擎的特性可以获取Broker的实时耗时信息和历史耗时信息**
| 指标名称| 单位| 指标含义|
|-- | ---- |---|
| produceRequestTime99thPercentile|ms|Topic近一分钟发送99分位耗时|
| fetchRequestTime99thPercentile|ms|Topic近一分钟拉取99分位耗时|
| produceRequestTime95thPercentile|ms|Topic近一分钟发送95分位耗时|
| fetchRequestTime95thPercentile|ms|Topic近一分钟拉取95分位耗时|
| produceRequestTime75thPercentile|ms|Topic近一分钟发送75分位耗时|
| fetchRequestTime75thPercentile|ms|Topic近一分钟拉取75分位耗时|
| produceRequestTime50thPercentile|ms|Topic近一分钟发送50分位耗时|
| fetchRequestTime50thPercentile|ms|Topic近一分钟拉取50分位耗时|

View File

@@ -622,6 +622,9 @@ Lag表示该消费客户端是否有堆积等于 partition offset-consume
<font size=2>步骤3</font>填写完成后,点击提交即可提交申请。
备注说明集群创建后还需在此基础上创建region、逻辑集群。具体操作可参照 [集群接入手册](https://github.com/didi/Logi-KafkaManager/blob/master/docs/user_guide/add_cluster/add_cluster.md)
![applycluster](./assets/applycluster.png)
#### 申请集群下线 ####

View File

@@ -5,13 +5,13 @@
<modelVersion>4.0.0</modelVersion>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>kafka-manager-common</artifactId>
<version>2.1.0-SNAPSHOT</version>
<version>${kafka-manager.revision}</version>
<packaging>jar</packaging>
<parent>
<artifactId>kafka-manager</artifactId>
<groupId>com.xiaojukeji.kafka</groupId>
<version>2.1.0-SNAPSHOT</version>
<version>${kafka-manager.revision}</version>
</parent>
<properties>

View File

@@ -6,8 +6,6 @@ package com.xiaojukeji.kafka.manager.common.bizenum;
*/
public enum IDCEnum {
CN("cn", "国内"),
US("us", "美东"),
RU("ru", "俄罗斯"),
;
private String idc;

View File

@@ -21,6 +21,8 @@ public enum ModuleEnum {
PARTITION(5, "分区"),
GATEWAY_CONFIG(6, "Gateway配置"),
UNKNOWN(-1, "未知")
;
ModuleEnum(int code, String message) {

View File

@@ -10,6 +10,7 @@ public enum RebalanceDimensionEnum {
REGION(1, "Region维度"),
BROKER(2, "Broker维度"),
TOPIC(3, "Topic维度"),
PARTITION(4, "Partition维度"),
;
private Integer code;

View File

@@ -45,4 +45,13 @@ public enum GatewayConfigKeyEnum {
", configName='" + configName + '\'' +
'}';
}
public static GatewayConfigKeyEnum getByConfigType(String configType) {
for (GatewayConfigKeyEnum configKeyEnum: GatewayConfigKeyEnum.values()) {
if (configKeyEnum.getConfigType().equals(configType)) {
return configKeyEnum;
}
}
return null;
}
}

View File

@@ -7,6 +7,8 @@ package com.xiaojukeji.kafka.manager.common.constant;
public class KafkaConstant {
public static final String COORDINATOR_TOPIC_NAME = "__consumer_offsets";
public static final String TRANSACTION_TOPIC_NAME = "__transaction_state";
public static final String BROKER_HOST_NAME_SUFFIX = ".diditaxi.com";
public static final String CLIENT_VERSION_CODE_UNKNOWN = "-1";

View File

@@ -12,11 +12,6 @@ public class TopicCreationConstant {
*/
public static final String LOG_X_CREATE_TOPIC_CONFIG_KEY_NAME = "LOG_X_CREATE_TOPIC_CONFIG";
/**
* 治理平台创建Topic配置KEY
*/
public static final String CHORUS_CREATE_TOPIC_CONFIG_KEY_NAME = "CHORUS_CREATE_TOPIC_CONFIG";
/**
* 内部创建Topic配置KEY
*/
@@ -30,6 +25,8 @@ public class TopicCreationConstant {
public static final String TOPIC_RETENTION_TIME_KEY_NAME = "retention.ms";
public static final Long DEFAULT_QUOTA = 3 * 1024 * 1024L;
public static Properties createNewProperties(Long retentionTime) {
Properties properties = new Properties();
properties.put(TOPIC_RETENTION_TIME_KEY_NAME, String.valueOf(retentionTime));

View File

@@ -3,7 +3,6 @@ package com.xiaojukeji.kafka.manager.common.entity;
import kafka.admin.AdminClient;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
/**
* @author zengqiao
@@ -16,17 +15,12 @@ public class ConsumerMetadata {
private Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap = new HashMap<>();
private Map<String, List<String>> consumerGroupAppMap = new ConcurrentHashMap<>();
public ConsumerMetadata(Set<String> consumerGroupSet,
Map<String, Set<String>> topicNameConsumerGroupMap,
Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap,
Map<String, List<String>> consumerGroupAppMap) {
Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap) {
this.consumerGroupSet = consumerGroupSet;
this.topicNameConsumerGroupMap = topicNameConsumerGroupMap;
this.consumerGroupSummaryMap = consumerGroupSummaryMap;
this.consumerGroupAppMap = consumerGroupAppMap;
}
public Set<String> getConsumerGroupSet() {
@@ -40,8 +34,4 @@ public class ConsumerMetadata {
public Map<String, AdminClient.ConsumerGroupSummary> getConsumerGroupSummaryMap() {
return consumerGroupSummaryMap;
}
public Map<String, List<String>> getConsumerGroupAppMap() {
return consumerGroupAppMap;
}
}

View File

@@ -1,6 +1,7 @@
package com.xiaojukeji.kafka.manager.common.entity;
import com.alibaba.fastjson.JSON;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import java.io.Serializable;
@@ -118,4 +119,9 @@ public class Result<T> implements Serializable {
result.setData(data);
return result;
}
public boolean failed() {
return !Constant.SUCCESS.equals(code);
}
}

View File

@@ -0,0 +1,53 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.cluster;
public class ControllerPreferredCandidate {
private Integer brokerId;
private String host;
private Long startTime;
private Integer status;
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Long getStartTime() {
return startTime;
}
public void setStartTime(Long startTime) {
this.startTime = startTime;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
@Override
public String toString() {
return "ControllerPreferredBroker{" +
"brokerId=" + brokerId +
", host='" + host + '\'' +
", startTime=" + startTime +
", status=" + status +
'}';
}
}

View File

@@ -9,6 +9,8 @@ public class LogicalCluster {
private String logicalClusterName;
private String logicalClusterIdentification;
private Integer mode;
private Integer topicNum;
@@ -41,6 +43,14 @@ public class LogicalCluster {
this.logicalClusterName = logicalClusterName;
}
public String getLogicalClusterIdentification() {
return logicalClusterIdentification;
}
public void setLogicalClusterIdentification(String logicalClusterIdentification) {
this.logicalClusterIdentification = logicalClusterIdentification;
}
public Integer getMode() {
return mode;
}
@@ -81,6 +91,14 @@ public class LogicalCluster {
this.bootstrapServers = bootstrapServers;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Long getGmtCreate() {
return gmtCreate;
}
@@ -97,19 +115,12 @@ public class LogicalCluster {
this.gmtModify = gmtModify;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Override
public String toString() {
return "LogicalCluster{" +
"logicalClusterId=" + logicalClusterId +
", logicalClusterName='" + logicalClusterName + '\'' +
", logicalClusterIdentification='" + logicalClusterIdentification + '\'' +
", mode=" + mode +
", topicNum=" + topicNum +
", clusterVersion='" + clusterVersion + '\'' +

View File

@@ -2,30 +2,18 @@ package com.xiaojukeji.kafka.manager.common.entity.ao.consumer;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetLocationEnum;
import java.util.List;
import java.util.Objects;
/**
* 消费组信息
* @author zengqiao
* @date 19/4/18
*/
public class ConsumerGroupDTO {
public class ConsumerGroup {
private Long clusterId;
private String consumerGroup;
private List<String> appIdList;
private OffsetLocationEnum offsetStoreLocation;
public ConsumerGroupDTO(Long clusterId,
String consumerGroup,
List<String> appIdList,
OffsetLocationEnum offsetStoreLocation) {
public ConsumerGroup(Long clusterId, String consumerGroup, OffsetLocationEnum offsetStoreLocation) {
this.clusterId = clusterId;
this.consumerGroup = consumerGroup;
this.appIdList = appIdList;
this.offsetStoreLocation = offsetStoreLocation;
}
@@ -45,14 +33,6 @@ public class ConsumerGroupDTO {
this.consumerGroup = consumerGroup;
}
public List<String> getAppIdList() {
return appIdList;
}
public void setAppIdList(List<String> appIdList) {
this.appIdList = appIdList;
}
public OffsetLocationEnum getOffsetStoreLocation() {
return offsetStoreLocation;
}
@@ -63,10 +43,9 @@ public class ConsumerGroupDTO {
@Override
public String toString() {
return "ConsumerGroupDTO{" +
return "ConsumerGroup{" +
"clusterId=" + clusterId +
", consumerGroup='" + consumerGroup + '\'' +
", appIdList=" + appIdList +
", offsetStoreLocation=" + offsetStoreLocation +
'}';
}
@@ -79,7 +58,7 @@ public class ConsumerGroupDTO {
if (o == null || getClass() != o.getClass()) {
return false;
}
ConsumerGroupDTO that = (ConsumerGroupDTO) o;
ConsumerGroup that = (ConsumerGroup) o;
return clusterId.equals(that.clusterId)
&& consumerGroup.equals(that.consumerGroup)
&& offsetStoreLocation == that.offsetStoreLocation;

View File

@@ -0,0 +1,68 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.consumer;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetLocationEnum;
import java.util.List;
public class ConsumerGroupSummary {
private Long clusterId;
private String consumerGroup;
private OffsetLocationEnum offsetStoreLocation;
private List<String> appIdList;
private String state;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
public OffsetLocationEnum getOffsetStoreLocation() {
return offsetStoreLocation;
}
public void setOffsetStoreLocation(OffsetLocationEnum offsetStoreLocation) {
this.offsetStoreLocation = offsetStoreLocation;
}
public List<String> getAppIdList() {
return appIdList;
}
public void setAppIdList(List<String> appIdList) {
this.appIdList = appIdList;
}
public String getState() {
return state;
}
public void setState(String state) {
this.state = state;
}
@Override
public String toString() {
return "ConsumerGroupSummary{" +
"clusterId=" + clusterId +
", consumerGroup='" + consumerGroup + '\'' +
", offsetStoreLocation=" + offsetStoreLocation +
", appIdList=" + appIdList +
", state='" + state + '\'' +
'}';
}
}

View File

@@ -25,7 +25,10 @@ public class RebalanceDTO {
@ApiModelProperty(value = "TopicName")
private String topicName;
@ApiModelProperty(value = "维度[0: Cluster维度, 1: Region维度, 2:Broker维度, 3:Topic维度]")
@ApiModelProperty(value = "分区ID")
private Integer partitionId;
@ApiModelProperty(value = "维度[0: Cluster维度, 1: Region维度, 2:Broker维度, 3:Topic维度, 4:Partition纬度]")
private Integer dimension;
public Long getClusterId() {
@@ -60,6 +63,14 @@ public class RebalanceDTO {
this.topicName = topicName;
}
public Integer getPartitionId() {
return partitionId;
}
public void setPartitionId(Integer partitionId) {
this.partitionId = partitionId;
}
public Integer getDimension() {
return dimension;
}
@@ -68,22 +79,12 @@ public class RebalanceDTO {
this.dimension = dimension;
}
@Override
public String toString() {
return "RebalanceDTO{" +
"clusterId=" + clusterId +
", regionId=" + regionId +
", brokerId=" + brokerId +
", topicName='" + topicName + '\'' +
", dimension=" + dimension +
'}';
}
public boolean paramLegal() {
if (ValidateUtils.isNull(clusterId)
|| RebalanceDimensionEnum.REGION.getCode().equals(dimension) && ValidateUtils.isNull(regionId)
|| RebalanceDimensionEnum.BROKER.getCode().equals(dimension) && ValidateUtils.isNull(brokerId)
|| RebalanceDimensionEnum.TOPIC.getCode().equals(dimension) && ValidateUtils.isNull(topicName) ) {
|| (RebalanceDimensionEnum.REGION.getCode().equals(dimension) && ValidateUtils.isNull(regionId))
|| (RebalanceDimensionEnum.BROKER.getCode().equals(dimension) && ValidateUtils.isNull(brokerId))
|| (RebalanceDimensionEnum.TOPIC.getCode().equals(dimension) && ValidateUtils.isNull(topicName))
|| (RebalanceDimensionEnum.PARTITION.getCode().equals(dimension) && (ValidateUtils.isNull(topicName) || ValidateUtils.isNull(partitionId))) ) {
return false;
}
return true;

View File

@@ -27,9 +27,12 @@ public class ClusterDTO {
@ApiModelProperty(value="数据中心")
private String idc;
@ApiModelProperty(value="安全配置参数")
@ApiModelProperty(value="Kafka安全配置")
private String securityProperties;
@ApiModelProperty(value="Jmx配置")
private String jmxProperties;
public Long getClusterId() {
return clusterId;
}
@@ -78,6 +81,14 @@ public class ClusterDTO {
this.securityProperties = securityProperties;
}
public String getJmxProperties() {
return jmxProperties;
}
public void setJmxProperties(String jmxProperties) {
this.jmxProperties = jmxProperties;
}
@Override
public String toString() {
return "ClusterDTO{" +
@@ -87,6 +98,7 @@ public class ClusterDTO {
", bootstrapServers='" + bootstrapServers + '\'' +
", idc='" + idc + '\'' +
", securityProperties='" + securityProperties + '\'' +
", jmxProperties='" + jmxProperties + '\'' +
'}';
}

View File

@@ -21,6 +21,9 @@ public class LogicalClusterDTO {
@ApiModelProperty(value = "名称")
private String name;
@ApiModelProperty(value = "集群标识, 用于告警的上报")
private String identification;
@ApiModelProperty(value = "集群模式")
private Integer mode;
@@ -52,6 +55,14 @@ public class LogicalClusterDTO {
this.name = name;
}
public String getIdentification() {
return identification;
}
public void setIdentification(String identification) {
this.identification = identification;
}
public Integer getMode() {
return mode;
}
@@ -97,6 +108,7 @@ public class LogicalClusterDTO {
return "LogicalClusterDTO{" +
"id=" + id +
", name='" + name + '\'' +
", identification='" + identification + '\'' +
", mode=" + mode +
", clusterId=" + clusterId +
", regionIdList=" + regionIdList +
@@ -117,6 +129,7 @@ public class LogicalClusterDTO {
}
appId = ValidateUtils.isNull(appId)? "": appId;
description = ValidateUtils.isNull(description)? "": description;
identification = ValidateUtils.isNull(identification)? name: identification;
return true;
}
}

View File

@@ -17,6 +17,8 @@ public class ClusterDO implements Comparable<ClusterDO> {
private String securityProperties;
private String jmxProperties;
private Integer status;
private Date gmtCreate;
@@ -31,30 +33,6 @@ public class ClusterDO implements Comparable<ClusterDO> {
this.id = id;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
public Date getGmtCreate() {
return gmtCreate;
}
public void setGmtCreate(Date gmtCreate) {
this.gmtCreate = gmtCreate;
}
public Date getGmtModify() {
return gmtModify;
}
public void setGmtModify(Date gmtModify) {
this.gmtModify = gmtModify;
}
public String getClusterName() {
return clusterName;
}
@@ -87,6 +65,38 @@ public class ClusterDO implements Comparable<ClusterDO> {
this.securityProperties = securityProperties;
}
public String getJmxProperties() {
return jmxProperties;
}
public void setJmxProperties(String jmxProperties) {
this.jmxProperties = jmxProperties;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
public Date getGmtCreate() {
return gmtCreate;
}
public void setGmtCreate(Date gmtCreate) {
this.gmtCreate = gmtCreate;
}
public Date getGmtModify() {
return gmtModify;
}
public void setGmtModify(Date gmtModify) {
this.gmtModify = gmtModify;
}
@Override
public String toString() {
return "ClusterDO{" +
@@ -95,6 +105,7 @@ public class ClusterDO implements Comparable<ClusterDO> {
", zookeeper='" + zookeeper + '\'' +
", bootstrapServers='" + bootstrapServers + '\'' +
", securityProperties='" + securityProperties + '\'' +
", jmxProperties='" + jmxProperties + '\'' +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +

View File

@@ -11,6 +11,8 @@ public class LogicalClusterDO {
private String name;
private String identification;
private Integer mode;
private String appId;
@@ -41,6 +43,14 @@ public class LogicalClusterDO {
this.name = name;
}
public String getIdentification() {
return identification;
}
public void setIdentification(String identification) {
this.identification = identification;
}
public Integer getMode() {
return mode;
}
@@ -102,6 +112,7 @@ public class LogicalClusterDO {
return "LogicalClusterDO{" +
"id=" + id +
", name='" + name + '\'' +
", identification='" + identification + '\'' +
", mode=" + mode +
", appId='" + appId + '\'' +
", clusterId=" + clusterId +

View File

@@ -15,6 +15,9 @@ public class LogicClusterVO {
@ApiModelProperty(value="逻辑集群名称")
private String clusterName;
@ApiModelProperty(value="逻辑标识")
private String clusterIdentification;
@ApiModelProperty(value="逻辑集群类型, 0:共享集群, 1:独享集群, 2:独立集群")
private Integer mode;
@@ -24,9 +27,6 @@ public class LogicClusterVO {
@ApiModelProperty(value="集群版本")
private String clusterVersion;
@ApiModelProperty(value="物理集群ID")
private Long physicalClusterId;
@ApiModelProperty(value="集群服务地址")
private String bootstrapServers;
@@ -55,6 +55,22 @@ public class LogicClusterVO {
this.clusterName = clusterName;
}
public String getClusterIdentification() {
return clusterIdentification;
}
public void setClusterIdentification(String clusterIdentification) {
this.clusterIdentification = clusterIdentification;
}
public Integer getMode() {
return mode;
}
public void setMode(Integer mode) {
this.mode = mode;
}
public Integer getTopicNum() {
return topicNum;
}
@@ -71,14 +87,6 @@ public class LogicClusterVO {
this.clusterVersion = clusterVersion;
}
public Long getPhysicalClusterId() {
return physicalClusterId;
}
public void setPhysicalClusterId(Long physicalClusterId) {
this.physicalClusterId = physicalClusterId;
}
public String getBootstrapServers() {
return bootstrapServers;
}
@@ -87,6 +95,14 @@ public class LogicClusterVO {
this.bootstrapServers = bootstrapServers;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Long getGmtCreate() {
return gmtCreate;
}
@@ -103,32 +119,15 @@ public class LogicClusterVO {
this.gmtModify = gmtModify;
}
public Integer getMode() {
return mode;
}
public void setMode(Integer mode) {
this.mode = mode;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Override
public String toString() {
return "LogicClusterVO{" +
"clusterId=" + clusterId +
", clusterName='" + clusterName + '\'' +
", clusterIdentification='" + clusterIdentification + '\'' +
", mode=" + mode +
", topicNum=" + topicNum +
", clusterVersion='" + clusterVersion + '\'' +
", physicalClusterId=" + physicalClusterId +
", bootstrapServers='" + bootstrapServers + '\'' +
", description='" + description + '\'' +
", gmtCreate=" + gmtCreate +

View File

@@ -0,0 +1,67 @@
package com.xiaojukeji.kafka.manager.common.entity.vo.normal.consumer;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.util.List;
/**
* @author zengqiao
* @date 21/01/14
*/
@ApiModel(value = "Topic消费组概要信息")
public class ConsumerGroupSummaryVO {
@ApiModelProperty(value = "消费组名称")
private String consumerGroup;
@ApiModelProperty(value = "使用的AppID")
private String appIds;
@ApiModelProperty(value = "offset存储位置")
private String location;
@ApiModelProperty(value = "消费组状态")
private String state;
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
public String getAppIds() {
return appIds;
}
public void setAppIds(String appIds) {
this.appIds = appIds;
}
public String getLocation() {
return location;
}
public void setLocation(String location) {
this.location = location;
}
public String getState() {
return state;
}
public void setState(String state) {
this.state = state;
}
@Override
public String toString() {
return "ConsumerGroupSummaryVO{" +
"consumerGroup='" + consumerGroup + '\'' +
", appIds=" + appIds +
", location='" + location + '\'' +
", state='" + state + '\'' +
'}';
}
}

View File

@@ -0,0 +1,103 @@
package com.xiaojukeji.kafka.manager.common.entity.vo.rd;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.util.Date;
/**
* @author zengqiao
* @date 20/3/19
*/
@ApiModel(value = "GatewayConfigVO", description = "Gateway配置信息")
public class GatewayConfigVO {
@ApiModelProperty(value="ID")
private Long id;
@ApiModelProperty(value="配置类型")
private String type;
@ApiModelProperty(value="配置名称")
private String name;
@ApiModelProperty(value="配置值")
private String value;
@ApiModelProperty(value="版本")
private Long version;
@ApiModelProperty(value="创建时间")
private Date createTime;
@ApiModelProperty(value="修改时间")
private Date modifyTime;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
public Long getVersion() {
return version;
}
public void setVersion(Long version) {
this.version = version;
}
public Date getCreateTime() {
return createTime;
}
public void setCreateTime(Date createTime) {
this.createTime = createTime;
}
public Date getModifyTime() {
return modifyTime;
}
public void setModifyTime(Date modifyTime) {
this.modifyTime = modifyTime;
}
@Override
public String toString() {
return "GatewayConfigVO{" +
"id=" + id +
", type='" + type + '\'' +
", name='" + name + '\'' +
", value='" + value + '\'' +
", version=" + version +
", createTime=" + createTime +
", modifyTime=" + modifyTime +
'}';
}
}

View File

@@ -32,9 +32,12 @@ public class ClusterBaseVO {
@ApiModelProperty(value="集群类型")
private Integer mode;
@ApiModelProperty(value="安全配置参数")
@ApiModelProperty(value="Kafka安全配置")
private String securityProperties;
@ApiModelProperty(value="Jmx配置")
private String jmxProperties;
@ApiModelProperty(value="1:监控中, 0:暂停监控")
private Integer status;
@@ -108,6 +111,14 @@ public class ClusterBaseVO {
this.securityProperties = securityProperties;
}
public String getJmxProperties() {
return jmxProperties;
}
public void setJmxProperties(String jmxProperties) {
this.jmxProperties = jmxProperties;
}
public Integer getStatus() {
return status;
}
@@ -141,8 +152,9 @@ public class ClusterBaseVO {
", bootstrapServers='" + bootstrapServers + '\'' +
", kafkaVersion='" + kafkaVersion + '\'' +
", idc='" + idc + '\'' +
", mode='" + mode + '\'' +
", mode=" + mode +
", securityProperties='" + securityProperties + '\'' +
", jmxProperties='" + jmxProperties + '\'' +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +

View File

@@ -0,0 +1,61 @@
package com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
@ApiModel(description = "Broker基本信息")
public class ControllerPreferredCandidateVO {
@ApiModelProperty(value = "brokerId")
private Integer brokerId;
@ApiModelProperty(value = "主机名")
private String host;
@ApiModelProperty(value = "启动时间")
private Long startTime;
@ApiModelProperty(value = "broker状态[0:在线, -1:不在线]")
private Integer status;
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Long getStartTime() {
return startTime;
}
public void setStartTime(Long startTime) {
this.startTime = startTime;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
@Override
public String toString() {
return "ControllerPreferredBrokerVO{" +
"brokerId=" + brokerId +
", host='" + host + '\'' +
", startTime=" + startTime +
", status=" + status +
'}';
}
}

View File

@@ -18,6 +18,9 @@ public class LogicalClusterVO {
@ApiModelProperty(value = "逻辑集群名称")
private String logicalClusterName;
@ApiModelProperty(value = "逻辑集群标识")
private String logicalClusterIdentification;
@ApiModelProperty(value = "物理集群ID")
private Long physicalClusterId;
@@ -55,6 +58,14 @@ public class LogicalClusterVO {
this.logicalClusterName = logicalClusterName;
}
public String getLogicalClusterIdentification() {
return logicalClusterIdentification;
}
public void setLogicalClusterIdentification(String logicalClusterIdentification) {
this.logicalClusterIdentification = logicalClusterIdentification;
}
public Long getPhysicalClusterId() {
return physicalClusterId;
}
@@ -116,6 +127,7 @@ public class LogicalClusterVO {
return "LogicalClusterVO{" +
"logicalClusterId=" + logicalClusterId +
", logicalClusterName='" + logicalClusterName + '\'' +
", logicalClusterIdentification='" + logicalClusterIdentification + '\'' +
", physicalClusterId=" + physicalClusterId +
", regionIdList=" + regionIdList +
", mode=" + mode +

View File

@@ -9,6 +9,7 @@ import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.TopicConnectionDO
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
/**
@@ -52,7 +53,14 @@ public class JsonUtils {
return JSON.toJSONString(obj);
}
public static List<TopicConnectionDO> parseTopicConnections(Long clusterId, JSONObject jsonObject) {
public static <T> T stringToObj(String src, Class<T> clazz) {
if (ValidateUtils.isBlank(src)) {
return null;
}
return JSON.parseObject(src, clazz);
}
public static List<TopicConnectionDO> parseTopicConnections(Long clusterId, JSONObject jsonObject, long postTime) {
List<TopicConnectionDO> connectionDOList = new ArrayList<>();
for (String clientType: jsonObject.keySet()) {
JSONObject topicObject = jsonObject.getJSONObject(clientType);
@@ -73,6 +81,7 @@ public class JsonUtils {
connectionDO.setClusterId(clusterId);
connectionDO.setTopicName(topicName);
connectionDO.setType(clientType);
connectionDO.setCreateTime(new Date(postTime));
connectionDOList.add(connectionDO);
}
}

View File

@@ -1,7 +1,5 @@
package com.xiaojukeji.kafka.manager.common.utils;
import com.xiaojukeji.kafka.manager.common.bizenum.IDCEnum;
import com.xiaojukeji.kafka.manager.common.constant.TopicCreationConstant;
import org.apache.commons.lang.StringUtils;
import java.util.List;
@@ -83,23 +81,4 @@ public class ValidateUtils {
public static boolean isNullOrLessThanZero(Double value) {
return value == null || value < 0;
}
public static boolean topicNameLegal(String idc, String topicName) {
if (ValidateUtils.isNull(idc) || ValidateUtils.isNull(topicName)) {
return false;
}
// 校验Topic的长度
if (topicName.length() >= TopicCreationConstant.TOPIC_NAME_MAX_LENGTH) {
return false;
}
// 校验前缀
if (IDCEnum.CN.getIdc().equals(idc) ||
(IDCEnum.US.getIdc().equals(idc) && topicName.startsWith(TopicCreationConstant.TOPIC_NAME_PREFIX_US)) ||
(IDCEnum.RU.getIdc().equals(idc) && topicName.startsWith(TopicCreationConstant.TOPIC_NAME_PREFIX_RU))) {
return true;
}
return false;
}
}

View File

@@ -0,0 +1,65 @@
package com.xiaojukeji.kafka.manager.common.utils.jmx;
public class JmxConfig {
/**
* 单台最大连接数
*/
private Integer maxConn;
/**
* 用户名
*/
private String username;
/**
* 密码
*/
private String password;
/**
* 开启SSL
*/
private Boolean openSSL;
public Integer getMaxConn() {
return maxConn;
}
public void setMaxConn(Integer maxConn) {
this.maxConn = maxConn;
}
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public Boolean isOpenSSL() {
return openSSL;
}
public void setOpenSSL(Boolean openSSL) {
this.openSSL = openSSL;
}
@Override
public String toString() {
return "JmxConfig{" +
"maxConn=" + maxConn +
", username='" + username + '\'' +
", password='" + password + '\'' +
", openSSL=" + openSSL +
'}';
}
}

View File

@@ -1,5 +1,6 @@
package com.xiaojukeji.kafka.manager.common.utils.jmx;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -7,8 +8,14 @@ import javax.management.*;
import javax.management.remote.JMXConnector;
import javax.management.remote.JMXConnectorFactory;
import javax.management.remote.JMXServiceURL;
import javax.management.remote.rmi.RMIConnectorServer;
import javax.naming.Context;
import javax.rmi.ssl.SslRMIClientSocketFactory;
import java.io.IOException;
import java.net.MalformedURLException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
@@ -28,13 +35,19 @@ public class JmxConnectorWrap {
private AtomicInteger atomicInteger;
public JmxConnectorWrap(String host, int port, int maxConn) {
private JmxConfig jmxConfig;
public JmxConnectorWrap(String host, int port, JmxConfig jmxConfig) {
this.host = host;
this.port = port;
if (maxConn <= 0) {
maxConn = 1;
this.jmxConfig = jmxConfig;
if (ValidateUtils.isNull(this.jmxConfig)) {
this.jmxConfig = new JmxConfig();
}
this.atomicInteger = new AtomicInteger(maxConn);
if (ValidateUtils.isNullOrLessThanZero(this.jmxConfig.getMaxConn())) {
this.jmxConfig.setMaxConn(1);
}
this.atomicInteger = new AtomicInteger(this.jmxConfig.getMaxConn());
}
public boolean checkJmxConnectionAndInitIfNeed() {
@@ -64,8 +77,18 @@ public class JmxConnectorWrap {
}
String jmxUrl = String.format("service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi", host, port);
try {
JMXServiceURL url = new JMXServiceURL(jmxUrl);
jmxConnector = JMXConnectorFactory.connect(url, null);
Map<String, Object> environment = new HashMap<String, Object>();
if (!ValidateUtils.isBlank(this.jmxConfig.getUsername()) && !ValidateUtils.isBlank(this.jmxConfig.getPassword())) {
environment.put(javax.management.remote.JMXConnector.CREDENTIALS, Arrays.asList(this.jmxConfig.getUsername(), this.jmxConfig.getPassword()));
}
if (jmxConfig.isOpenSSL() != null && this.jmxConfig.isOpenSSL()) {
environment.put(Context.SECURITY_PROTOCOL, "ssl");
SslRMIClientSocketFactory clientSocketFactory = new SslRMIClientSocketFactory();
environment.put(RMIConnectorServer.RMI_CLIENT_SOCKET_FACTORY_ATTRIBUTE, clientSocketFactory);
environment.put("com.sun.jndi.rmi.factory.socket", clientSocketFactory);
}
jmxConnector = JMXConnectorFactory.connect(new JMXServiceURL(jmxUrl), environment);
LOGGER.info("JMX connect success, host:{} port:{}.", host, port);
return true;
} catch (MalformedURLException e) {

View File

@@ -18,6 +18,8 @@ public class ZkPathUtil {
public static final String CONSUMER_ROOT_NODE = ZOOKEEPER_SEPARATOR + "consumers";
public static final String REASSIGN_PARTITIONS_ROOT_NODE = "/admin/reassign_partitions";
/**
* config
*/
@@ -27,11 +29,11 @@ public class ZkPathUtil {
public static final String CONFIG_CLIENTS_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "clients";
public static final String CONFIG_ENTITY_CHANGES_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "changes/config_change_";
public static final String CONFIG_ENTITY_CHANGES_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "changes/config_change_";
public static final String REASSIGN_PARTITIONS_ROOT_NODE = "/admin/reassign_partitions";
private static final String D_METRICS_CONFIG_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "KafkaExMetrics";
private static final String D_METRICS_CONFIG_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "KafkaExMetrics";
public static final String D_CONTROLLER_CANDIDATES = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "extension/candidates";
public static String getBrokerIdNodePath(Integer brokerId) {
return BROKER_IDS_ROOT + ZOOKEEPER_SEPARATOR + String.valueOf(brokerId);

View File

@@ -8,7 +8,7 @@
<parent>
<artifactId>kafka-manager</artifactId>
<groupId>com.xiaojukeji.kafka</groupId>
<version>2.1.0-SNAPSHOT</version>
<version>${kafka-manager.revision}</version>
</parent>
<build>

View File

@@ -2,6 +2,7 @@ import * as React from 'react';
import { Select, Input, InputNumber, Form, Switch, Checkbox, DatePicker, Radio, Upload, Button, Icon, Tooltip } from 'component/antd';
import Monacoeditor from 'component/editor/monacoEditor';
import { searchProps } from 'constants/table';
import { version } from 'store/version';
import './index.less';
const TextArea = Input.TextArea;
@@ -189,7 +190,7 @@ class XForm extends React.Component<IXFormProps> {
case FormItemType.upload:
return (
<Upload beforeUpload={(file: any) => false} {...item.attrs}>
<Button><Icon type="upload" /></Button>
<Button><Icon type="upload" /></Button>{version.fileSuffix && <span style={{ color: '#fb3939', padding: '0 0 0 10px' }}>{`请上传${version.fileSuffix}文件`}</span>}
</Upload>
);
}

View File

@@ -19,7 +19,7 @@ export const cellStyle = {
overflow: 'hidden',
whiteSpace: 'nowrap',
textOverflow: 'ellipsis',
cursor: 'pointer',
// cursor: 'pointer',
};
export const searchProps = {

View File

@@ -38,7 +38,7 @@ export class ClusterConsumer extends SearchAndFilterContainer {
key: 'operation',
width: '10%',
render: (t: string, item: IOffset) => {
return (<a onClick={() => this.getConsumeDetails(item)}></a>);
return (<a onClick={() => this.getConsumeDetails(item)}></a>);
},
}];
private xFormModal: IXFormWrapper;
@@ -110,7 +110,7 @@ export class ClusterConsumer extends SearchAndFilterContainer {
/>
</div>
<Modal
title="消费的Topic"
title="消费详情"
visible={this.state.detailsVisible}
onOk={() => this.handleDetailsOk()}
onCancel={() => this.handleDetailsCancel()}

View File

@@ -2,7 +2,7 @@ import * as React from 'react';
import Url from 'lib/url-parser';
import { region } from 'store';
import { admin } from 'store/admin';
import { topic } from 'store/topic';
import { app } from 'store/app';
import { Table, notification, Tooltip, Popconfirm } from 'antd';
import { pagination, cellStyle } from 'constants/table';
import { observer } from 'mobx-react';
@@ -56,8 +56,6 @@ export class ClusterTopic extends SearchAndFilterContainer {
public expandPartition(item: IClusterTopics) {
// getTopicBasicInfo
admin.getTopicsBasicInfo(item.clusterId, item.topicName).then(data => {
console.log(admin.topicsBasic);
console.log(admin.basicInfo);
this.clusterTopicsFrom = item;
this.setState({
expandVisible: true,
@@ -114,6 +112,7 @@ export class ClusterTopic extends SearchAndFilterContainer {
public componentDidMount() {
admin.getClusterTopics(this.clusterId);
app.getAdminAppList()
}
public renderClusterTopicList() {

View File

@@ -159,7 +159,6 @@ export class ExclusiveCluster extends SearchAndFilterContainer {
public handleDeleteRegion = (record: IBrokersRegions) => {
const filterRegion = admin.logicalClusters.filter(item => item.regionIdList.includes(record.id));
if (!filterRegion) {
return;
}
@@ -335,6 +334,7 @@ export class ExclusiveCluster extends SearchAndFilterContainer {
{this.renderSearch('', '请输入Region名称broker ID')}
</ul>
{this.renderRegion()}
{this.renderDeleteRegionModal()}
</div >
);
}

View File

@@ -40,15 +40,15 @@ export class LogicalCluster extends SearchAndFilterContainer {
key: 'logicalClusterId',
},
{
title: '逻辑集群中文名称',
title: '逻辑集群名称',
dataIndex: 'logicalClusterName',
key: 'logicalClusterName',
width: '150px'
},
{
title: '逻辑集群英文名称',
dataIndex: 'logicalClusterName',
key: 'logicalClusterName1',
title: '逻辑集群标识',
dataIndex: 'logicalClusterIdentification',
key: 'logicalClusterIdentification',
width: '150px'
},
{

View File

@@ -1,5 +1,5 @@
import * as React from 'react';
import { Modal, Table, Button, notification, message, Tooltip, Icon, Popconfirm, Alert } from 'component/antd';
import { Modal, Table, Button, notification, message, Tooltip, Icon, Popconfirm, Alert, Popover } from 'component/antd';
import { wrapper } from 'store';
import { observer } from 'mobx-react';
import { IXFormWrapper, IMetaData, IRegister } from 'types/base-type';
@@ -58,7 +58,7 @@ export class ClusterList extends SearchAndFilterContainer {
message: '请输入zookeeper地址',
}],
attrs: {
placeholder: '请输入zookeeper地址',
placeholder: '请输入zookeeper地址例如192.168.0.1:2181,192.168.0.2:2181/logi-kafka',
rows: 2,
disabled: item ? true : false,
},
@@ -72,7 +72,7 @@ export class ClusterList extends SearchAndFilterContainer {
message: '请输入bootstrapServers',
}],
attrs: {
placeholder: '请输入bootstrapServers',
placeholder: '请输入bootstrapServers例如192.168.1.1:9092,192.168.1.2:9092',
rows: 2,
disabled: item ? true : false,
},
@@ -131,7 +131,7 @@ export class ClusterList extends SearchAndFilterContainer {
{
"security.protocol": "SASL_PLAINTEXT",
"sasl.mechanism": "PLAIN",
"sasl.jaas.config": "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"xxxxxx\" password=\"xxxxxx\";"
"sasl.jaas.config": "org.apache.kafka.common.security.plain.PlainLoginModule required username=\\"xxxxxx\\" password=\\"xxxxxx\\";"
}`,
rows: 8,
},
@@ -271,11 +271,13 @@ export class ClusterList extends SearchAndFilterContainer {
cancelText="取消"
okText="确认"
>
<a
className="action-button"
>
{item.status === 1 ? '暂停监控' : '开始监控'}
</a>
<Tooltip title="暂停监控将无法正常监控指标信息,建议开启监控">
<a
className="action-button"
>
{item.status === 1 ? '暂停监控' : '开始监控'}
</a>
</Tooltip>
</Popconfirm>
<a onClick={this.showMonitor.bind(this, item)}>

View File

@@ -79,7 +79,7 @@ export class IndividualBill extends React.Component {
}
public renderTableList() {
const adminUrl=`${urlPrefix}/admin/bill-detail`
const adminUrl = `${urlPrefix}/admin/bill-detail`
return (
<Table
rowKey="key"
@@ -89,11 +89,11 @@ export class IndividualBill extends React.Component {
/>
);
}
public renderChart() {
return (
<div className="chart-box">
<BarChartComponet ref={(ref) => this.chart = ref } getChartData={this.getData.bind(this, null)} />
<BarChartComponet ref={(ref) => this.chart = ref} getChartData={this.getData.bind(this, null)} />
</div>
);
}
@@ -132,7 +132,7 @@ export class IndividualBill extends React.Component {
<>
<div className="container">
<Tabs defaultActiveKey="1" type="card">
<TabPane
<TabPane
tab={<>
<span></span>&nbsp;
<a
@@ -142,7 +142,7 @@ export class IndividualBill extends React.Component {
>
<Icon type="question-circle" />
</a>
</>}
</>}
key="1"
>
{this.renderDatePick()}

View File

@@ -11,6 +11,7 @@ import { filterKeys } from 'constants/strategy';
import { VirtualScrollSelect } from 'component/virtual-scroll-select';
import { IsNotNaN } from 'lib/utils';
import { searchProps } from 'constants/table';
import { toJS } from 'mobx';
interface IDynamicProps {
form?: any;
@@ -33,6 +34,7 @@ export class DynamicSetFilter extends React.Component<IDynamicProps> {
public monitorType: string = null;
public clusterId: number = null;
public clusterName: string = null;
public clusterIdentification: string | number = null;
public topicName: string = null;
public consumerGroup: string = null;
public location: string = null;
@@ -45,16 +47,18 @@ export class DynamicSetFilter extends React.Component<IDynamicProps> {
this.props.form.validateFields((err: Error, values: any) => {
if (!err) {
monitorType = values.monitorType;
const index = cluster.clusterData.findIndex(item => item.clusterId === values.cluster);
const index = cluster.clusterData.findIndex(item => item.clusterIdentification === values.cluster);
if (index > -1) {
values.clusterIdentification = cluster.clusterData[index].clusterIdentification;
values.clusterName = cluster.clusterData[index].clusterName;
}
for (const key of Object.keys(values)) {
if (filterKeys.indexOf(key) > -1) { // 只有这几种值可以设置
filterList.push({
tkey: key === 'clusterName' ? 'cluster' : key, // 传参需要将clusterName转成cluster
tkey: key === 'clusterName' ? 'cluster' : key, // clusterIdentification
topt: '=',
tval: [values[key]],
clusterIdentification: values.clusterIdentification
});
}
}
@@ -74,13 +78,13 @@ export class DynamicSetFilter extends React.Component<IDynamicProps> {
public resetFormValue(
monitorType: string = null,
clusterId: number = null,
clusterIdentification: any = null,
topicName: string = null,
consumerGroup: string = null,
location: string = null) {
const { setFieldsValue } = this.props.form;
setFieldsValue({
cluster: clusterId,
cluster: clusterIdentification,
topic: topicName,
consumerGroup,
location,
@@ -88,18 +92,18 @@ export class DynamicSetFilter extends React.Component<IDynamicProps> {
});
}
public getClusterId = (clusterName: string) => {
public getClusterId = async (clusterIdentification: any) => {
let clusterId = null;
const index = cluster.clusterData.findIndex(item => item.clusterName === clusterName);
const index = cluster.clusterData.findIndex(item => item.clusterIdentification === clusterIdentification);
if (index > -1) {
clusterId = cluster.clusterData[index].clusterId;
}
if (clusterId) {
cluster.getClusterMetaTopics(clusterId);
await cluster.getClusterMetaTopics(clusterId);
this.clusterId = clusterId;
return this.clusterId;
}
return this.clusterId = clusterName as any;
};
return this.clusterId = clusterId as any;
}
public async initFormValue(monitorRule: IRequestParams) {
@@ -108,17 +112,19 @@ export class DynamicSetFilter extends React.Component<IDynamicProps> {
const topicFilter = strategyFilterList.filter(item => item.tkey === 'topic')[0];
const consumerFilter = strategyFilterList.filter(item => item.tkey === 'consumerGroup')[0];
const clusterName = clusterFilter ? clusterFilter.tval[0] : null;
const clusterIdentification = clusterFilter ? clusterFilter.tval[0] : null;
const topic = topicFilter ? topicFilter.tval[0] : null;
const consumerGroup = consumerFilter ? consumerFilter.tval[0] : null;
const location: string = null;
const monitorType = monitorRule.strategyExpressionList[0].metric;
alarm.changeMonitorStrategyType(monitorType);
await this.getClusterId(clusterName);
//增加clusterIdentification替代原来的clusterName
this.clusterIdentification = clusterIdentification;
await this.getClusterId(this.clusterIdentification);
//
await this.handleSelectChange(topic, 'topic');
await this.handleSelectChange(consumerGroup, 'consumerGroup');
this.resetFormValue(monitorType, this.clusterId, topic, consumerGroup, location);
this.resetFormValue(monitorType, this.clusterIdentification, topic, consumerGroup, location);
}
public clearFormData() {
@@ -130,11 +136,12 @@ export class DynamicSetFilter extends React.Component<IDynamicProps> {
this.resetFormValue();
}
public async handleClusterChange(e: number) {
this.clusterId = e;
public async handleClusterChange(e: any) {
this.clusterIdentification = e;
this.topicName = null;
topic.setLoading(true);
await cluster.getClusterMetaTopics(e);
const clusterId = await this.getClusterId(e);
await cluster.getClusterMetaTopics(clusterId);
this.resetFormValue(this.monitorType, e, null, this.consumerGroup, this.location);
topic.setLoading(false);
}
@@ -170,7 +177,7 @@ export class DynamicSetFilter extends React.Component<IDynamicProps> {
}
this.consumerGroup = null;
this.location = null;
this.resetFormValue(this.monitorType, this.clusterId, this.topicName);
this.resetFormValue(this.monitorType, this.clusterIdentification, this.topicName);
topic.setLoading(false);
}
@@ -213,17 +220,24 @@ export class DynamicSetFilter extends React.Component<IDynamicProps> {
},
rules: [{ required: true, message: '请选择监控指标' }],
} as IVritualScrollSelect;
const clusterData = toJS(cluster.clusterData);
const options = clusterData?.length ? clusterData.map(item => {
return {
label: `${item.clusterName}${item.description ? '' + item.description + '' : ''}`,
value: item.clusterIdentification
}
}) : null;
const clusterItem = {
label: '集群',
options: cluster.clusterData,
defaultValue: this.clusterId,
options,
defaultValue: this.clusterIdentification,
rules: [{ required: true, message: '请选择集群' }],
attrs: {
placeholder: '请选择集群',
className: 'middle-size',
className: 'large-size',
disabled: this.isDetailPage,
onChange: (e: number) => this.handleClusterChange(e),
onChange: (e: any) => this.handleClusterChange(e),
},
key: 'cluster',
} as unknown as IVritualScrollSelect;
@@ -241,7 +255,7 @@ export class DynamicSetFilter extends React.Component<IDynamicProps> {
}),
attrs: {
placeholder: '请选择Topic',
className: 'middle-size',
className: 'large-size',
disabled: this.isDetailPage,
onChange: (e: string) => this.handleSelectChange(e, 'topic'),
},
@@ -329,7 +343,7 @@ export class DynamicSetFilter extends React.Component<IDynamicProps> {
key={v.value || v.key || index}
value={v.value}
>
{v.label.length > 25 ? <Tooltip placement="bottomLeft" title={v.label}>
{v.label?.length > 25 ? <Tooltip placement="bottomLeft" title={v.label}>
{v.label}
</Tooltip> : v.label}
</Select.Option>

View File

@@ -43,21 +43,23 @@
Icon {
margin-left: 8px;
}
.ant-form-item-label {
// padding-left: 10px;
width: 118px;
text-align: right !important;
}
&.type-form {
padding-top: 10px;
padding-top: 10px;
.ant-form{
min-width: 755px;
}
.ant-form-item {
width: 30%;
width: 45%;
min-width: 360px;
}
.ant-form-item-label {
padding-left: 10px;
}
.ant-form-item-control {
width: 220px;
width: 300px;
}
}

View File

@@ -12,7 +12,6 @@ import { alarm } from 'store/alarm';
import { app } from 'store/app';
import Url from 'lib/url-parser';
import { IStrategyExpression, IRequestParams } from 'types/alarm';
@observer
export class AddAlarm extends SearchAndFilterContainer {
public isDetailPage = window.location.pathname.includes('/alarm-detail'); // 判断是否为详情
@@ -90,8 +89,8 @@ export class AddAlarm extends SearchAndFilterContainer {
const filterObj = this.typeForm.getFormData().filterObj;
// tslint:disable-next-line:max-line-length
if (!actionValue || !timeValue || !typeValue || !strategyList.length || !filterObj || !filterObj.filterList.length) {
message.error('请正确填写必填项');
return null;
message.error('请正确填写必填项');
return null;
}
if (filterObj.monitorType === 'online-kafka-topic-throttled') {
@@ -101,13 +100,17 @@ export class AddAlarm extends SearchAndFilterContainer {
tval: [typeValue.app],
});
}
this.id && filterObj.filterList.forEach((item: any) => {
if (item.tkey === 'cluster') {
item.tval = [item.clusterIdentification]
}
})
strategyList = strategyList.map((row: IStrategyExpression) => {
return {
...row,
metric: filterObj.monitorType,
};
});
return {
appId: typeValue.app,
name: typeValue.alarmName,
@@ -129,7 +132,7 @@ export class AddAlarm extends SearchAndFilterContainer {
public renderAlarmStrategy() {
return (
<div className="config-wrapper">
<span className="span-tag"></span>
<span className="span-tag" data-set={alarm.monitorType}></span>
<div className="info-wrapper">
<WrappedDynamicSetStrategy wrappedComponentRef={(form: any) => this.strategyForm = form} />
</div>
@@ -139,9 +142,9 @@ export class AddAlarm extends SearchAndFilterContainer {
public renderTimeForm() {
return (
<>
<WrappedTimeForm wrappedComponentRef={(form: any) => this.timeForm = form} />
</>
<>
<WrappedTimeForm wrappedComponentRef={(form: any) => this.timeForm = form} />
</>
);
}
@@ -164,7 +167,7 @@ export class AddAlarm extends SearchAndFilterContainer {
{this.renderAlarmStrategy()}
{this.renderTimeForm()}
<ActionForm ref={(actionForm) => this.actionForm = actionForm} />
</div>
</div>
</Spin>
);
}

View File

@@ -5,6 +5,7 @@ import { IStringMap } from 'types/base-type';
import { IRequestParams } from 'types/alarm';
import { IFormSelect, IFormItem, FormItemType } from 'component/x-form';
import { searchProps } from 'constants/table';
import { alarm } from 'store/alarm';
interface IDynamicProps {
form: any;
@@ -27,6 +28,7 @@ class DynamicSetStrategy extends React.Component<IDynamicProps> {
public crudList = [] as ICRUDItem[];
public state = {
shouldUpdate: false,
monitorType: alarm.monitorType
};
public componentDidMount() {
@@ -130,7 +132,7 @@ class DynamicSetStrategy extends React.Component<IDynamicProps> {
if (lineValue.func === 'happen' && paramsArray.length > 1 && paramsArray[0] < paramsArray[1]) {
strategyList = []; // 清空赋值
return message.error('周期值应大于次数') ;
return message.error('周期值应大于次数');
}
lineValue.params = paramsArray.join(',');
@@ -292,8 +294,39 @@ class DynamicSetStrategy extends React.Component<IDynamicProps> {
}
return element;
}
public renderFormList(row: ICRUDItem) {
public unit(monitorType: string) {
let element = null;
switch (monitorType) {
case 'online-kafka-topic-msgIn':
element = "条/秒"
break;
case 'online-kafka-topic-bytesIn':
element = "字节/秒"
break;
case 'online-kafka-topic-bytesRejected':
element = "字节/秒"
break;
case 'online-kafka-topic-produce-throttled':
element = "1表示被限流"
break;
case 'online-kafka-topic-fetch-throttled':
element = "1表示被限流"
break;
case 'online-kafka-consumer-maxLag':
element = "条"
break;
case 'online-kafka-consumer-lag':
element = "条"
break;
case 'online-kafka-consumer-maxDelayTime':
element = "秒"
break;
}
return (
<span>{element}</span>
)
}
public renderFormList(row: ICRUDItem, monitorType: string) {
const key = row.id;
const funcType = row.func;
@@ -309,6 +342,7 @@ class DynamicSetStrategy extends React.Component<IDynamicProps> {
key: key + '-func',
} as IFormSelect)}
{this.getFuncItem(row)}
{row.func !== 'c_avg_rate_abs' && row.func !== 'pdiff' ? this.unit(monitorType) : null}
</div>
);
}
@@ -340,8 +374,8 @@ class DynamicSetStrategy extends React.Component<IDynamicProps> {
<Form>
{crudList.map((row, index) => {
return (
<div key={index}>
{this.renderFormList(row)}
<div key={`${index}-${this.state.monitorType}`}>
{this.renderFormList(row, alarm.monitorType)}
{
crudList.length > 1 ? (
<Icon

View File

@@ -50,23 +50,23 @@ export class TypeForm extends React.Component {
return (
<>
<div className="config-wrapper">
<span className="span-tag"></span>
<div className="alarm-x-form type-form">
<XFormComponent
ref={form => this.$form = form}
formData={formData}
formMap={xTypeFormMap}
layout="inline"
/>
</div>
</div >
<div className="config-wrapper">
<span className="span-tag"></span>
<div className="alarm-x-form type-form">
<WrappedDynamicSetFilter wrappedComponentRef={(form: any) => this.filterForm = form} />
</div>
</div >
<div className="config-wrapper">
<span className="span-tag"></span>
<div className="alarm-x-form type-form">
<XFormComponent
ref={form => this.$form = form}
formData={formData}
formMap={xTypeFormMap}
layout="inline"
/>
</div>
</div >
<div className="config-wrapper">
<span className="span-tag"></span>
<div className="alarm-x-form type-form">
<WrappedDynamicSetFilter wrappedComponentRef={(form: any) => this.filterForm = form} />
</div>
</div >
</>
);
}

View File

@@ -31,11 +31,11 @@ export class ClusterOverview extends React.Component<IOverview> {
const content = this.props.basicInfo as IBasicInfo;
const clusterContent = [{
value: content.clusterName,
label: '集群中文名称',
label: '集群名称',
},
{
value: content.clusterName,
label: '集群英文名称',
value: content.clusterIdentification,
label: '集群标识',
},
{
value: clusterTypeMap[content.mode],
@@ -44,8 +44,8 @@ export class ClusterOverview extends React.Component<IOverview> {
value: moment(content.gmtCreate).format(timeFormat),
label: '接入时间',
}, {
value: content.physicalClusterId,
label: '物理集群ID',
value: content.clusterId,
label: '集群ID',
}];
const clusterInfo = [{
value: content.clusterVersion,

View File

@@ -13,32 +13,14 @@ const { confirm } = Modal;
export const getClusterColumns = (urlPrefix: string) => {
return [
{
title: '逻辑集群ID',
title: '集群ID',
dataIndex: 'clusterId',
key: 'clusterId',
width: '9%',
sorter: (a: IClusterData, b: IClusterData) => b.clusterId - a.clusterId,
},
{
title: '逻辑集群中文名称',
dataIndex: 'clusterName',
key: 'clusterName',
width: '13%',
onCell: () => ({
style: {
maxWidth: 120,
...cellStyle,
},
}),
sorter: (a: IClusterData, b: IClusterData) => a.clusterName.charCodeAt(0) - b.clusterName.charCodeAt(0),
render: (text: string, record: IClusterData) => (
<Tooltip placement="bottomLeft" title={text} >
<a href={`${urlPrefix}/cluster/cluster-detail?clusterId=${record.clusterId}`}> {text} </a>
</Tooltip>
),
},
{
title: '逻辑集群英文名称',
title: '集群名称',
dataIndex: 'clusterName',
key: 'clusterName',
width: '13%',
@@ -55,6 +37,24 @@ export const getClusterColumns = (urlPrefix: string) => {
</Tooltip>
),
},
// {
// title: '逻辑集群英文名称',
// dataIndex: 'clusterName',
// key: 'clusterName',
// width: '13%',
// onCell: () => ({
// style: {
// maxWidth: 120,
// ...cellStyle,
// },
// }),
// sorter: (a: IClusterData, b: IClusterData) => a.clusterName.charCodeAt(0) - b.clusterName.charCodeAt(0),
// render: (text: string, record: IClusterData) => (
// <Tooltip placement="bottomLeft" title={text} >
// <a href={`${urlPrefix}/cluster/cluster-detail?clusterId=${record.clusterId}`}> {text} </a>
// </Tooltip>
// ),
// },
{
title: 'Topic数量',
dataIndex: 'topicNum',

View File

@@ -78,7 +78,7 @@ export class MyCluster extends SearchAndFilterContainer {
rules: [
{
required: true,
pattern: /^.{5,}.$/,
pattern: /^.{4,}.$/,
message: '请输入至少5个字符',
},
],
@@ -160,7 +160,7 @@ export class MyCluster extends SearchAndFilterContainer {
data = searchKey ? origin.filter((item: IClusterData) =>
(item.clusterName !== undefined && item.clusterName !== null) && item.clusterName.toLowerCase().includes(searchKey as string),
) : origin ;
) : origin;
return data;
}

View File

@@ -127,7 +127,7 @@ class DataMigrationFormTable extends React.Component<IFormTableProps> {
key: 'retentionTime', // originalRetentionTime
width: '132px',
sorter: (a: IRenderData, b: IRenderData) => b.retentionTime - a.retentionTime,
render: (time: any) => transMSecondToHour(time),
render: (time: any) => transMSecondToHour(time),
}, {
title: 'BrokerID',
dataIndex: 'brokerIdList',
@@ -254,7 +254,7 @@ class DataMigrationFormTable extends React.Component<IFormTableProps> {
dataSource={this.props.data}
columns={columns}
pagination={false}
scroll={{y: 520}}
scroll={{ y: 520 }}
className="migration-table"
/>
</EditableContext.Provider>
@@ -316,7 +316,7 @@ export class InfoForm extends React.Component<IFormTableProps> {
<Form.Item label="迁移说明" key={2} className="form-item">
{getFieldDecorator('description', {
initialValue: '',
rules: [{ required: true, message: '请输入至少5个字符', pattern: /^.{5,}.$/ }],
rules: [{ required: true, message: '请输入至少5个字符', pattern: /^.{4,}.$/ }],
})(
<Input.TextArea rows={5} placeholder="请输入至少5个字符" />,
)}

View File

@@ -23,13 +23,22 @@ export const showEditClusterTopic = (item: IClusterTopics) => {
{
key: 'appId',
label: '应用ID',
type: 'select',
options: app.adminAppData.map(item => {
return {
label: item.appId,
value: item.appId,
};
}),
rules: [{
required: true,
message: '请输入应用ID',
// message: '请输入应用ID',
// message: '请输入应用ID应用名称只支持字母、数字、下划线、短划线长度限制在3-64字符',
// pattern: /[_a-zA-Z0-9_-]{3,64}$/,
}],
attrs: {
placeholder: '请输入应用ID',
disabled: true,
// disabled: true,
},
},
{
@@ -104,7 +113,7 @@ export const showLogicalClusterOpModal = (clusterId: number, record?: ILogicalCl
}
const updateFormModal = (isShow: boolean) => {
const formMap = wrapper.xFormWrapper.formMap;
isShow ? formMap.splice(2, 0,
isShow ? formMap.splice(3, 0,
{
key: 'appId',
label: '所属应用',
@@ -119,7 +128,7 @@ export const showLogicalClusterOpModal = (clusterId: number, record?: ILogicalCl
attrs: {
placeholder: '请选择所属应用',
},
}) : formMap.splice(2, 1);
}) : formMap.splice(3, 1);
const formData = wrapper.xFormWrapper.formData;
wrapper.ref && wrapper.ref.updateFormMap$(formMap, formData || {});
};
@@ -129,30 +138,30 @@ export const showLogicalClusterOpModal = (clusterId: number, record?: ILogicalCl
formMap: [
{
key: 'logicalClusterName',
label: '逻辑集群中文名称',
label: '逻辑集群名称',
// defaultValue:'',
rules: [{
required: true,
message: '请输入逻辑集群中文名称,支持中文、字母、数字、下划线(_)和短划线(-)组成长度在3-128字符之间', // 不能以下划线_和短划线(-)开头和结尾
rules: [{
required: true,
message: '请输入逻辑集群名称,支持中文、字母、数字、下划线(_)和短划线(-)组成长度在3-128字符之间', // 不能以下划线_和短划线(-)开头和结尾
pattern: /^[a-zA-Z0-9_\-\u4e00-\u9fa5]{3,128}$/g, //(?!(_|\-))(?!.*?(_|\-)$)
}],
attrs: {
// disabled: record ? true : false,
placeholder:'请输入逻辑集群中文名称'
placeholder: '请输入逻辑集群名称'
},
},
{
key: 'logicalClusterName1',
label: '逻辑集群英文名称',
key: 'logicalClusterIdentification',
label: '逻辑集群标识',
// defaultValue:'',
rules: [{
required: true,
message: '请输入逻辑集群英文名称,支持字母、数字、下划线(_)和短划线(-)组成长度在3-128字符之间', //不能以下划线_和短划线(-)开头和结尾
pattern:/^[a-zA-Z0-9_\-]{3,128}$/g, //(?!(_|\-))(?!.*?(_|\-)$)
rules: [{
required: true,
message: '请输入逻辑集群标识,支持字母、数字、下划线(_)和短划线(-)组成长度在3-128字符之间', //不能以下划线_和短划线(-)开头和结尾
pattern: /^[a-zA-Z0-9_\-]{3,128}$/g, //(?!(_|\-))(?!.*?(_|\-)$)
}],
attrs: {
disabled: record ? true : false,
placeholder:'请输入逻辑集群英文名称,创建后无法修改'
placeholder: '请输入逻辑集标识,创建后无法修改'
},
},
{
@@ -233,7 +242,7 @@ export const showLogicalClusterOpModal = (clusterId: number, record?: ILogicalCl
id: record ? record.logicalClusterId : '',
mode: value.mode,
name: value.logicalClusterName,
englishName:value.logicalClusterEName, // 存储逻辑集群英文名称
identification: value.logicalClusterIdentification,
regionIdList: value.regionIdList,
} as INewLogical;
if (record) {
@@ -246,7 +255,25 @@ export const showLogicalClusterOpModal = (clusterId: number, record?: ILogicalCl
});
},
};
if (record && record.mode != 0) {
isShow = true;
let formMap: any = xFormModal.formMap
formMap.splice(3, 0, {
key: 'appId',
label: '所属应用',
rules: [{ required: true, message: '请选择所属应用' }],
type: 'select',
options: app.adminAppData.map(item => {
return {
label: item.name,
value: item.appId,
};
}),
attrs: {
placeholder: '请选择所属应用',
},
})
}
wrapper.open(xFormModal);
};

View File

@@ -50,7 +50,10 @@ class CustomForm extends React.Component<IXFormProps> {
notification.success({ message: '扩分成功' });
this.props.form.resetFields();
admin.getClusterTopics(this.props.clusterId);
});
}).catch(err => {
notification.error({ message: '扩分成功' });
})
}
});
}
@@ -93,7 +96,7 @@ class CustomForm extends React.Component<IXFormProps> {
{/* 运维管控-topic信息-扩分区操作 */}
<Form.Item label="所属region" >
{getFieldDecorator('regionNameList', {
initialValue: admin.topicsBasic ? admin.topicsBasic.regionNameList : '',
initialValue: admin.topicsBasic && admin.topicsBasic.regionNameList.length > 0 ? admin.topicsBasic.regionNameList.join(',') : ' ',
rules: [{ required: true, message: '请输入所属region' }],
})(<Input disabled={true} />)}
</Form.Item>

View File

@@ -186,10 +186,10 @@ export const createMigrationTasks = () => {
label: '初始限流',
rules: [{
required: true,
message: '请输入初始限流',
message: '请输入初始限流,并按照:“限流上限>初始限流>限流下限”的大小顺序',
}],
attrs: {
placeholder: '请输入初始限流',
placeholder: '请输入初始限流,并按照:“限流上限>初始限流>限流下限”的大小顺序',
suffix: 'MB/s',
},
},
@@ -198,10 +198,10 @@ export const createMigrationTasks = () => {
label: '限流上限',
rules: [{
required: true,
message: '请输入限流上限',
message: '请输入限流上限,并按照:“限流上限>初始限流>限流下限”的大小顺序',
}],
attrs: {
placeholder: '请输入限流上限',
placeholder: '请输入限流上限,并按照:“限流上限>初始限流>限流下限”的大小顺序',
suffix: 'MB/s',
},
},
@@ -210,10 +210,10 @@ export const createMigrationTasks = () => {
label: '限流下限',
rules: [{
required: true,
message: '请输入限流下限',
message: '请输入限流下限,并按照:“限流上限>初始限流>限流下限”的大小顺序',
}],
attrs: {
placeholder: '请输入限流下限',
placeholder: '请输入限流下限,并按照:“限流上限>初始限流>限流下限”的大小顺序',
suffix: 'MB/s',
},
},
@@ -224,7 +224,7 @@ export const createMigrationTasks = () => {
rules: [{
required: false,
message: '请输入至少5个字符',
pattern: /^.{5,}.$/,
pattern: /^.{4,}.$/,
}],
attrs: {
placeholder: '请输入备注',

View File

@@ -29,7 +29,7 @@ export const showEditModal = (record?: IAppItem, from?: string, isDisabled?: boo
rules: [{
required: isDisabled ? false : true,
message: '应用名称只支持中文、字母、数字、下划线、短划线长度限制在3-64字符',
pattern: /[\u4e00-\u9fa5_a-zA-Z0-9_-]{3,64}/,
pattern: /[\u4e00-\u9fa5_a-zA-Z0-9_-]{3,64}$/,
}],
attrs: { disabled: isDisabled },
}, {

View File

@@ -29,7 +29,7 @@ export const showCpacityModal = (item: IClusterData) => {
key: 'description',
label: '申请原因',
type: 'text_area',
rules: [{ required: true, pattern: /^.{5,}.$/, message: '请输入至少5个字符' }],
rules: [{ required: true, pattern: /^.{4,}.$/, message: '请输入至少5个字符' }],
attrs: {
placeholder: '请输入至少5个字符',
},
@@ -44,12 +44,12 @@ export const showCpacityModal = (item: IClusterData) => {
type: value.type,
applicant: users.currentUser.username,
description: value.description,
extensions: JSON.stringify({clusterId: item.clusterId}),
extensions: JSON.stringify({ clusterId: item.clusterId }),
};
cluster.applyCpacity(cpacityParams).then(data => {
notification.success({
message: `申请${value.type === 5 ? '扩容' : '缩容'}成功`,
});
});
window.location.href = `${urlPrefix}/user/order-detail/?orderId=${data.id}&region=${region.currentRegion}`;
});
},

View File

@@ -22,7 +22,7 @@ export const applyTopic = () => {
formMap: [
{
key: 'clusterId',
label: '所属逻辑集群:',
label: '所属集群:',
type: 'select',
options: cluster.clusterData,
rules: [{ required: true, message: '请选择' }],
@@ -75,7 +75,7 @@ export const applyTopic = () => {
key: 'description',
label: '申请原因',
type: 'text_area',
rules: [{ required: true, pattern: /^.{5,}.$/s, message: '5' }],
rules: [{ required: true, pattern: /^.{4,}.$/s, message: '5' }],
attrs: {
placeholder: `概要描述Topic的数据源, Topic数据的生产者/消费者, Topic的申请原因及备注信息等。最多100个字
例如:
@@ -180,13 +180,14 @@ export const showApplyQuatoModal = (item: ITopic | IAppsIdInfo, record: IQuotaQu
const isConsume = item.access === 0 || item.access === 2;
const xFormModal = {
formMap: [
// {
// key: 'clusterName',
// label: '逻辑集群名称',
// rules: [{ required: true, message: '' }],
// attrs: { disabled: true },
// invisible: !item.hasOwnProperty('clusterName'),
// },
{
key: 'clusterName',
label: '逻辑集群名称',
rules: [{ required: true, message: '' }],
attrs: { disabled: true },
invisible: !item.hasOwnProperty('clusterName'),
}, {
key: 'topicName',
label: 'Topic名称',
rules: [{ required: true, message: '' }],
@@ -225,7 +226,7 @@ export const showApplyQuatoModal = (item: ITopic | IAppsIdInfo, record: IQuotaQu
key: 'description',
label: '申请原因',
type: 'text_area',
rules: [{ required: true, pattern: /^.{5,}.$/, message: quotaRemarks }],
rules: [{ required: true, pattern: /^.{4,}.$/, message: quotaRemarks }],
attrs: {
placeholder: quotaRemarks,
},
@@ -292,13 +293,15 @@ const updateFormModal = (appId: string) => {
export const showTopicApplyQuatoModal = (item: ITopic) => {
const xFormModal = {
formMap: [
// {
// key: 'clusterName',
// label: '逻辑集群名称',
// rules: [{ required: true, message: '' }],
// attrs: { disabled: true },
// defaultValue: item.clusterName,
// // invisible: !item.hasOwnProperty('clusterName'),
// },
{
key: 'clusterName',
label: '逻辑集群名称',
rules: [{ required: true, message: '' }],
attrs: { disabled: true },
// invisible: !item.hasOwnProperty('clusterName'),
}, {
key: 'topicName',
label: 'Topic名称',
rules: [{ required: true, message: '' }],
@@ -530,7 +533,7 @@ const showAllPermission = (appId: string, item: ITopic, access: number) => {
rules: [{
required: true,
validator: (rule: any, value: string, callback: any) => {
const regexp = /^.{5,}.$/;
const regexp = /^.{4,}.$/;
value = value.trim();
if (!regexp.test(value)) {
callback('请输入至少5个字符');
@@ -629,7 +632,7 @@ export const showPermissionModal = (item: ITopic) => {
rules: [{
required: true,
validator: (rule: any, value: string, callback: any) => {
const regexp = /^.{5,}.$/;
const regexp = /^.{4,}.$/;
value = value.trim();
if (!regexp.test(value)) {
callback('请输入至少5个字符');
@@ -678,7 +681,7 @@ export const showTopicEditModal = (item: ITopic) => {
key: 'description',
label: '备注',
type: 'text_area',
rules: [{ required: false }, { pattern: /^.{5,}.$/, message: '请输入至少5个字符' }],
rules: [{ required: false }, { pattern: /^.{4,}.$/, message: '请输入至少5个字符' }],
},
],
formData: {

View File

@@ -85,7 +85,6 @@ export const applyQuotaQuery = (item: ITopic) => {
};
export const applyTopicQuotaQuery = async (item: ITopic) => {
console.log(item)
await app.getTopicAppQuota(item.clusterId, item.topicName);
await showTopicApplyQuatoModal(item);
};
@@ -142,7 +141,7 @@ export const getAllTopicColumns = (urlPrefix: string) => {
<Tooltip placement="bottomLeft" title={record.topicName} >
<a
// tslint:disable-next-line:max-line-length
href={`${urlPrefix}/topic/topic-detail?clusterId=${record.clusterId}&topic=${record.topicName}&region=${region.currentRegion}&needAuth=${record.needAuth}&clusterName=${record.clusterName}`}
href={`${urlPrefix}/topic/topic-detail?clusterId=${record.clusterId}&topic=${record.topicName}&region=${region.currentRegion}&needAuth=${record.needAuth}`}
>{text}</a>
</Tooltip>);
},

View File

@@ -60,7 +60,7 @@ export class AllTopic extends SearchAndFilterContainer {
if (cluster.allActive !== -1 || searchKey !== '') {
data = origin.filter(d =>
((d.topicName !== undefined && d.topicName !== null) && d.topicName.toLowerCase().includes(searchKey as string)
|| ((d.appPrincipals !== undefined && d.appPrincipals !== null) && d.appPrincipals.toLowerCase().includes(searchKey as string)))
|| ((d.appPrincipals !== undefined && d.appPrincipals !== null) && d.appPrincipals.toLowerCase().includes(searchKey as string)))
&& (cluster.allActive === -1 || d.clusterId === cluster.allActive),
);
} else {

View File

@@ -69,7 +69,7 @@ export class BaseInformation extends React.Component<IInfoProps> {
label: '压缩格式',
value: baseInfo.topicCodeC,
}, {
label: '所属物理集群ID',
label: '集群ID',
value: baseInfo.clusterId,
}, {
label: '所属region',

View File

@@ -95,23 +95,23 @@ export class BillInformation extends SearchAndFilterContainer {
}
public render() {
return(
return (
<>
<div className="k-row" >
<ul className="k-tab">
<li>&nbsp;
<div className="k-row" >
<ul className="k-tab">
<li>&nbsp;
<a
// tslint:disable-next-line:max-line-length
href="https://github.com/didi/kafka-manager"
target="_blank"
>
<Icon type="question-circle" />
</a>
</li>
{this.renderDatePick()}
</ul>
{this.renderChart()}
</div>
// tslint:disable-next-line:max-line-length
href="https://github.com/didi/kafka-manager"
target="_blank"
>
<Icon type="question-circle" />
</a>
</li>
{this.renderDatePick()}
</ul>
{this.renderChart()}
</div>
</>
);
}

View File

@@ -1,7 +1,7 @@
import * as React from 'react';
import './index.less';
import { wrapper, region } from 'store';
import { Tabs, PageHeader, Button, notification, Drawer, message, Icon } from 'antd';
import { Tabs, PageHeader, Button, notification, Drawer, message, Icon, Spin } from 'antd';
import { observer } from 'mobx-react';
import { BaseInformation } from './base-information';
import { StatusChart } from './status-chart';
@@ -44,6 +44,7 @@ export class TopicDetail extends React.Component<any> {
drawerVisible: false,
infoVisible: false,
infoTopicList: [] as IInfoData[],
isExecutionBtn: false
};
private $formRef: any;
@@ -54,7 +55,7 @@ export class TopicDetail extends React.Component<any> {
const url = Url();
this.clusterId = Number(url.search.clusterId);
this.needAuth = url.search.needAuth;
this.clusterName = url.search.clusterName;
this.clusterName = decodeURI(decodeURI(url.search.clusterName));
this.topicName = url.search.topic;
const isPhysical = Url().search.hasOwnProperty('isPhysicalClusterId');
this.isPhysicalTrue = isPhysical ? '&isPhysicalClusterId=true' : '';
@@ -197,7 +198,9 @@ export class TopicDetail extends React.Component<any> {
formData={formData}
formMap={formMap}
/>
<Button type="primary" onClick={this.drawerSubmit} className="sample-button"></Button>
<Button type="primary" onClick={this.drawerSubmit} className="sample-button" disabled={this.state.isExecutionBtn}>
{this.state.isExecutionBtn ? (<span><Spin indicator={this.antIcon} size="small" /></span>) : '采 样'}
</Button>
{infoVisible ? this.renderInfo() : null}
</Drawer>
</>
@@ -243,7 +246,11 @@ export class TopicDetail extends React.Component<any> {
);
}
// 执行加载图标
public antIcon = <Icon type="loading" style={{ fontSize: 12, color: '#cccccc', marginLeft: '5px' }} spin />
public drawerSubmit = (value: any) => {
this.setState({ isExecutionBtn: true })
this.$formRef.validateFields((error: Error, result: any) => {
if (error) {
return;
@@ -253,9 +260,12 @@ export class TopicDetail extends React.Component<any> {
this.setState({
infoTopicList: data,
infoVisible: true,
isExecutionBtn: false
});
message.success('采样成功');
});
}).catch(err => {
this.setState({ isExecutionBtn: false })
})
});
}
@@ -315,6 +325,7 @@ export class TopicDetail extends React.Component<any> {
public componentDidMount() {
topic.getTopicBasicInfo(this.clusterId, this.topicName);
topic.getTopicBusiness(this.clusterId, this.topicName);
app.getAppList();
}
public render() {
@@ -326,7 +337,6 @@ export class TopicDetail extends React.Component<any> {
topicName: this.topicName,
clusterName: this.clusterName
} as ITopic;
app.getAppList();
return (
<>
@@ -342,9 +352,9 @@ export class TopicDetail extends React.Component<any> {
{this.needAuth == "true" && <Button key="0" type="primary" onClick={() => showAllPermissionModal(topicRecord)} ></Button>}
<Button key="1" type="primary" onClick={() => applyTopicQuotaQuery(topicRecord)} ></Button>
<Button key="2" type="primary" onClick={() => applyExpandModal(topicRecord)} ></Button>
<Button key="3" type="primary" onClick={() => this.props.history.push(`/alarm/add`)} ></Button>
<Button key="4" type="primary" onClick={this.showDrawer.bind(this)} ></Button>
{showEditBtn && <Button key="5" onClick={() => this.compileDetails()} type="primary"></Button>}
<Button key="3" type="primary" onClick={() => this.props.history.push(`/alarm/add`)} ></Button>
<Button key="4" type="primary" onClick={this.showDrawer.bind(this)} ></Button>
{/* {showEditBtn && <Button key="5" onClick={() => this.compileDetails()} type="primary">编辑</Button>} */}
</>
}
/>

View File

@@ -30,7 +30,7 @@ export class MineTopic extends SearchAndFilterContainer {
if (cluster.active !== -1 || app.active !== '-1' || searchKey !== '') {
data = origin.filter(d =>
((d.topicName !== undefined && d.topicName !== null) && d.topicName.toLowerCase().includes(searchKey as string)
|| ((d.appName !== undefined && d.appName !== null) && d.appName.toLowerCase().includes(searchKey as string)))
|| ((d.appName !== undefined && d.appName !== null) && d.appName.toLowerCase().includes(searchKey as string)))
&& (cluster.active === -1 || d.clusterId === cluster.active)
&& (app.active === '-1' || d.appId === (app.active + '')),
);
@@ -152,18 +152,18 @@ export class MineTopic extends SearchAndFilterContainer {
public render() {
return (
<>
<div className="min-width">
<Tabs activeKey={location.hash.substr(1) || '1'} type="card" onChange={(key) => this.handleTabKey(key)}>
<TabPane tab="有效Topic" key="1" >
{this.renderOperationPanel(1)}
{this.renderMyTopicTable(this.getData(topic.mytopicData))}
</TabPane>
<TabPane tab="已过期Topic" key="2">
{this.renderOperationPanel(2)}
{this.renderDeprecatedTopicTable(this.getData(topic.expireData))}
</TabPane>
</Tabs>
</div>
<div className="min-width">
<Tabs activeKey={location.hash.substr(1) || '1'} type="card" onChange={(key) => this.handleTabKey(key)}>
<TabPane tab="有效Topic" key="1" >
{this.renderOperationPanel(1)}
{this.renderMyTopicTable(this.getData(topic.mytopicData))}
</TabPane>
<TabPane tab="已过期Topic" key="2">
{this.renderOperationPanel(2)}
{this.renderDeprecatedTopicTable(this.getData(topic.expireData))}
</TabPane>
</Tabs>
</div>
</>
);
}

View File

@@ -79,7 +79,7 @@ export class MyBill extends React.Component {
}
public renderTableList() {
const userUrl=`${urlPrefix}/user/bill-detail`
const userUrl = `${urlPrefix}/user/bill-detail`
return (
<Table
rowKey="key"
@@ -89,11 +89,11 @@ export class MyBill extends React.Component {
/>
);
}
public renderChart() {
return (
<div className="chart-box">
<BarChartComponet ref={(ref) => this.chart = ref } getChartData={this.getData.bind(this, null)} />
<BarChartComponet ref={(ref) => this.chart = ref} getChartData={this.getData.bind(this, null)} />
</div>
);
}
@@ -131,7 +131,7 @@ export class MyBill extends React.Component {
<>
<div className="container">
<Tabs defaultActiveKey="1" type="card">
<TabPane
<TabPane
tab={<>
<span></span>&nbsp;
<a
@@ -141,7 +141,7 @@ export class MyBill extends React.Component {
>
<Icon type="question-circle" />
</a>
</>}
</>}
key="1"
>
{this.renderDatePick()}

View File

@@ -33,6 +33,7 @@ const checkStatus = (res: Response) => {
};
const filter = (init: IInit) => (res: IRes) => {
if (res.code !== 0 && res.code !== 200) {
if (!init.errorNoTips) {
notification.error({

View File

@@ -96,7 +96,8 @@ class Alarm {
@action.bound
public setMonitorType(data: IMonitorMetricType) {
this.monitorTypeList = data.metricNames || [];
this.monitorType = this.monitorTypeList[0].metricName;
// this.monitorType = this.monitorTypeList[0].metricName;
this.monitorType = '';
}
@action.bound

View File

@@ -21,7 +21,7 @@ class Cluster {
public selectData: IClusterData[] = [{
value: -1,
label: '所有集群',
} as IClusterData,
} as IClusterData,
];
@observable
@@ -31,7 +31,7 @@ class Cluster {
public selectAllData: IClusterData[] = [{
value: -1,
label: '所有集群',
} as IClusterData,
} as IClusterData,
];
@observable
@@ -59,7 +59,7 @@ class Cluster {
public clusterMetrics: IClusterMetrics[] = [];
@observable
public type: IOptionType = 'byteIn/byteOut' ;
public type: IOptionType = 'byteIn/byteOut';
@observable
public clusterTopics: IClusterTopics[] = [];
@@ -130,11 +130,11 @@ class Cluster {
public setClusterCombos(data: IConfigInfo[]) {
this.clusterComboList = data || [];
this.clusterComboList = this.clusterComboList.map(item => {
return {
...item,
label: item.message,
value: item.code,
};
return {
...item,
label: item.message,
value: item.code,
};
});
}
@@ -148,7 +148,7 @@ class Cluster {
value: item.code,
};
});
this.clusterMode = (this.clusterModes && this.clusterModes.filter(ele => ele.code !== 0) ) || []; // 去除 0 共享集群
this.clusterMode = (this.clusterModes && this.clusterModes.filter(ele => ele.code !== 0)) || []; // 去除 0 共享集群
}
@action.bound
@@ -158,7 +158,7 @@ class Cluster {
@action.bound
public setClusterDetailRealTime(data: IClusterReal) {
this.clusterRealData = data;
this.clusterRealData = data;
this.setRealLoading(false);
}
@@ -192,9 +192,9 @@ class Cluster {
@action.bound
public setClusterDetailThrottles(data: IThrottles[]) {
this.clustersThrottles = data ? data.map((item, index) => {
item.key = index;
return item;
}) : [];
item.key = index;
return item;
}) : [];
}
@action.bound

View File

@@ -19,6 +19,7 @@ export interface IStrategyFilter {
tkey: string;
topt: string;
tval: string[];
clusterIdentification?: string;
}
export interface IRequestParams {
appId: string;

View File

@@ -23,6 +23,7 @@ export interface IBtn {
}
export interface IClusterData {
clusterIdentification: any;
clusterId: number;
mode: number;
clusterName: string;
@@ -598,10 +599,12 @@ export interface IClusterReal {
}
export interface IBasicInfo {
clusterIdentification: any;
bootstrapServers: string;
clusterId: number;
mode: number;
clusterName: string;
clusterNameCn: string;
clusterVersion: string;
gmtCreate: number;
gmtModify: number;
@@ -920,8 +923,9 @@ export interface INewLogical {
mode: number;
name: string;
logicalClusterName?: string;
logicalClusterEName?: string;
logicalClusterNameCn?: string;
regionIdList: number[];
logicalClusterIdentification?:string
}
export interface IPartitionsLocation {

View File

@@ -5,13 +5,13 @@
<modelVersion>4.0.0</modelVersion>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>kafka-manager-core</artifactId>
<version>2.1.0-SNAPSHOT</version>
<version>${kafka-manager.revision}</version>
<packaging>jar</packaging>
<parent>
<artifactId>kafka-manager</artifactId>
<groupId>com.xiaojukeji.kafka</groupId>
<version>2.1.0-SNAPSHOT</version>
<version>${kafka-manager.revision}</version>
</parent>
<properties>

View File

@@ -92,20 +92,4 @@ public class ConsumerMetadataCache {
}
return consumerMetadata.getTopicNameConsumerGroupMap().getOrDefault(topicName, new HashSet<>());
}
public static Map<String, List<String>> getConsumerGroupAppIdListInZk(Long clusterId) {
ConsumerMetadata consumerMetadata = CG_METADATA_IN_ZK_MAP.get(clusterId);
if(consumerMetadata == null){
return new HashMap<>(0);
}
return consumerMetadata.getConsumerGroupAppMap();
}
public static Map<String, List<String>> getConsumerGroupAppIdListInBK(Long clusterId) {
ConsumerMetadata consumerMetadata = CG_METADATA_IN_BK_MAP.get(clusterId);
if(consumerMetadata == null){
return new HashMap<>(0);
}
return consumerMetadata.getConsumerGroupAppMap();
}
}

View File

@@ -1,5 +1,6 @@
package com.xiaojukeji.kafka.manager.service.cache;
import com.google.common.collect.Sets;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.entity.pojo.LogicalClusterDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.RegionDO;
@@ -15,6 +16,7 @@ import org.springframework.stereotype.Service;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
/**
* 逻辑集群元信息
@@ -67,6 +69,19 @@ public class LogicalClusterMetadataManager {
return LOGICAL_CLUSTER_ID_BROKER_ID_MAP.getOrDefault(logicClusterId, new HashSet<>());
}
public Long getTopicLogicalClusterId(Long physicalClusterId, String topicName) {
if (!LOADED.get()) {
flush();
}
Map<String, Long> logicalClusterIdMap = TOPIC_LOGICAL_MAP.get(physicalClusterId);
if (ValidateUtils.isNull(logicalClusterIdMap)) {
return null;
}
return logicalClusterIdMap.get(topicName);
}
public LogicalClusterDO getTopicLogicalCluster(Long physicalClusterId, String topicName) {
if (!LOADED.get()) {
flush();
@@ -144,9 +159,16 @@ public class LogicalClusterMetadataManager {
@Scheduled(cron="0/30 * * * * ?")
public void flush() {
List<LogicalClusterDO> logicalClusterDOList = logicalClusterService.listAll();
if (ValidateUtils.isEmptyList(logicalClusterDOList)) {
return;
if (ValidateUtils.isNull(logicalClusterDOList)) {
logicalClusterDOList = Collections.EMPTY_LIST;
}
Set<Long> inDbLogicalClusterIds = logicalClusterDOList.stream()
.map(LogicalClusterDO::getId)
.collect(Collectors.toSet());
// inCache 和 inDb 取差集,差集结果为已删除的、新增的.
Sets.SetView<Long> diffLogicalClusterIds = Sets.difference(LOGICAL_CLUSTER_MAP.keySet(), inDbLogicalClusterIds);
diffLogicalClusterIds.forEach(logicalClusterId -> delLogicalClusterInCache(logicalClusterId));
Map<Long, RegionDO> regionMap = new HashMap<>();
List<RegionDO> regionDOList = regionService.listAll();
@@ -197,4 +219,11 @@ public class LogicalClusterMetadataManager {
}
TOPIC_LOGICAL_MAP.put(logicalClusterDO.getClusterId(), subMap);
}
private void delLogicalClusterInCache(Long logicalClusterId) {
LOGICAL_CLUSTER_ID_TOPIC_NAME_MAP.remove(logicalClusterId);
LOGICAL_CLUSTER_ID_BROKER_ID_MAP.remove(logicalClusterId);
LOGICAL_CLUSTER_MAP.remove(logicalClusterId);
TOPIC_LOGICAL_MAP.remove(logicalClusterId);
}
}

View File

@@ -4,15 +4,19 @@ import com.xiaojukeji.kafka.manager.common.bizenum.KafkaBrokerRoleEnum;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.constant.KafkaConstant;
import com.xiaojukeji.kafka.manager.common.entity.KafkaVersion;
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConfig;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.BrokerMetadata;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.ControllerData;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkConfigImpl;
import com.xiaojukeji.kafka.manager.dao.ControllerDao;
import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConnectorWrap;
import com.xiaojukeji.kafka.manager.dao.TopicDao;
import com.xiaojukeji.kafka.manager.dao.gateway.AuthorityDao;
import com.xiaojukeji.kafka.manager.service.service.JmxService;
import com.xiaojukeji.kafka.manager.service.utils.ConfigUtils;
import com.xiaojukeji.kafka.manager.service.zookeeper.*;
@@ -48,6 +52,12 @@ public class PhysicalClusterMetadataManager {
@Autowired
private ConfigUtils configUtils;
@Autowired
private TopicDao topicDao;
@Autowired
private AuthorityDao authorityDao;
private final static Map<Long, ClusterDO> CLUSTER_MAP = new ConcurrentHashMap<>();
private final static Map<Long, ControllerData> CONTROLLER_DATA_MAP = new ConcurrentHashMap<>();
@@ -110,13 +120,20 @@ public class PhysicalClusterMetadataManager {
return;
}
JmxConfig jmxConfig = null;
try {
jmxConfig = JsonUtils.stringToObj(clusterDO.getJmxProperties(), JmxConfig.class);
} catch (Exception e) {
LOGGER.error("class=PhysicalClusterMetadataManager||method=addNew||clusterDO={}||msg=parse jmx properties failed", JsonUtils.toJSONString(clusterDO));
}
//增加Broker监控
BrokerStateListener brokerListener = new BrokerStateListener(clusterDO.getId(), zkConfig, configUtils.getJmxMaxConn());
BrokerStateListener brokerListener = new BrokerStateListener(clusterDO.getId(), zkConfig, jmxConfig);
brokerListener.init();
zkConfig.watchChildren(ZkPathUtil.BROKER_IDS_ROOT, brokerListener);
//增加Topic监控
TopicStateListener topicListener = new TopicStateListener(clusterDO.getId(), zkConfig);
TopicStateListener topicListener = new TopicStateListener(clusterDO.getId(), zkConfig, topicDao, authorityDao);
topicListener.init();
zkConfig.watchChildren(ZkPathUtil.BROKER_TOPICS_ROOT, topicListener);
@@ -272,7 +289,7 @@ public class PhysicalClusterMetadataManager {
//---------------------------Broker元信息相关--------------
public static void putBrokerMetadata(Long clusterId, Integer brokerId, BrokerMetadata brokerMetadata, Integer jmxMaxConn) {
public static void putBrokerMetadata(Long clusterId, Integer brokerId, BrokerMetadata brokerMetadata, JmxConfig jmxConfig) {
Map<Integer, BrokerMetadata> metadataMap = BROKER_METADATA_MAP.get(clusterId);
if (metadataMap == null) {
return;
@@ -280,7 +297,7 @@ public class PhysicalClusterMetadataManager {
metadataMap.put(brokerId, brokerMetadata);
Map<Integer, JmxConnectorWrap> jmxMap = JMX_CONNECTOR_MAP.getOrDefault(clusterId, new ConcurrentHashMap<>());
jmxMap.put(brokerId, new JmxConnectorWrap(brokerMetadata.getHost(), brokerMetadata.getJmxPort(), jmxMaxConn));
jmxMap.put(brokerId, new JmxConnectorWrap(brokerMetadata.getHost(), brokerMetadata.getJmxPort(), jmxConfig));
JMX_CONNECTOR_MAP.put(clusterId, jmxMap);
Map<Integer, KafkaVersion> versionMap = KAFKA_VERSION_MAP.getOrDefault(clusterId, new ConcurrentHashMap<>());

View File

@@ -9,6 +9,19 @@ import java.util.List;
import java.util.Properties;
public interface AdminService {
/**
* 创建Topic
* @param clusterDO 集群DO
* @param topicDO TopicDO
* @param partitionNum 分区数
* @param replicaNum 副本数
* @param regionId RegionID
* @param brokerIdList BrokerId
* @param properties Topic属性
* @param applicant 申请人
* @param operator 操作人
* @return 操作状态
*/
ResultStatus createTopic(ClusterDO clusterDO,
TopicDO topicDO,
Integer partitionNum,
@@ -19,19 +32,86 @@ public interface AdminService {
String applicant,
String operator);
/**
* 删除Topic
* @param clusterDO 集群DO
* @param topicName Topic名称
* @param operator 操作人
* @return 操作状态
*/
ResultStatus deleteTopic(ClusterDO clusterDO,
String topicName,
String operator);
/**
* 优先副本选举状态
* @param clusterDO 集群DO
* @return 任务状态
*/
TaskStatusEnum preferredReplicaElectionStatus(ClusterDO clusterDO);
/**
* 集群纬度优先副本选举
* @param clusterDO 集群DO
* @return 任务状态
*/
ResultStatus preferredReplicaElection(ClusterDO clusterDO, String operator);
/**
* Broker纬度优先副本选举
* @param clusterDO 集群DO
* @param brokerId BrokerID
* @param operator 操作人
* @return 任务状态
*/
ResultStatus preferredReplicaElection(ClusterDO clusterDO, Integer brokerId, String operator);
/**
* Topic纬度优先副本选举
* @param clusterDO 集群DO
* @param topicName Topic名称
* @param operator 操作人
* @return 任务状态
*/
ResultStatus preferredReplicaElection(ClusterDO clusterDO, String topicName, String operator);
/**
* 分区纬度优先副本选举
* @param clusterDO 集群DO
* @param topicName Topic名称
* @param partitionId 分区ID
* @param operator 操作人
* @return 任务状态
*/
ResultStatus preferredReplicaElection(ClusterDO clusterDO, String topicName, Integer partitionId, String operator);
/**
* Topic扩分区
* @param clusterDO 集群DO
* @param topicName Topic名称
* @param partitionNum 新增? 分区数
* @param regionId RegionID
* @param brokerIdList 集群ID
* @param operator 操作人
* @return 任务状态
*/
ResultStatus expandPartitions(ClusterDO clusterDO, String topicName, Integer partitionNum, Long regionId, List<Integer> brokerIdList, String operator);
/**
* 获取Topic配置
* @param clusterDO 集群DO
* @param topicName Topic名称
* @return 任务状态
*/
Properties getTopicConfig(ClusterDO clusterDO, String topicName);
/**
* 修改Topic配置
* @param clusterDO 集群DO
* @param topicName Topic名称
* @param properties 新的属性
* @param operator 操作人
* @return 任务状态
*/
ResultStatus modifyTopicConfig(ClusterDO clusterDO, String topicName, Properties properties, String operator);
}

View File

@@ -1,7 +1,9 @@
package com.xiaojukeji.kafka.manager.service.service;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.ClusterDetailDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.cluster.ControllerPreferredCandidate;
import com.xiaojukeji.kafka.manager.common.entity.vo.normal.cluster.ClusterNameDTO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterMetricsDO;
@@ -43,5 +45,10 @@ public interface ClusterService {
ResultStatus deleteById(Long clusterId);
ClusterDO selectSuitableCluster(Long clusterId, String dataCenter);
/**
* 获取优先被选举为controller的broker
* @param clusterId 集群ID
* @return void
*/
Result<List<ControllerPreferredCandidate>> getControllerPreferredCandidates(Long clusterId);
}

View File

@@ -2,14 +2,14 @@ package com.xiaojukeji.kafka.manager.service.service;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetLocationEnum;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroup;
import com.xiaojukeji.kafka.manager.common.entity.ao.PartitionOffsetDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumeDetailDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupSummary;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* consumer相关的服务接口
@@ -20,33 +20,36 @@ public interface ConsumerService {
/**
* 获取消费组列表
*/
List<ConsumerGroupDTO> getConsumerGroupList(Long clusterId);
List<ConsumerGroup> getConsumerGroupList(Long clusterId);
/**
* 查询消费Topic的消费组
*/
List<ConsumerGroupDTO> getConsumerGroupList(Long clusterId, String topicName);
List<ConsumerGroup> getConsumerGroupList(Long clusterId, String topicName);
/**
* 获取消费Topic的消费组概要信息
*/
List<ConsumerGroupSummary> getConsumerGroupSummaries(Long clusterId, String topicName);
/**
* 查询消费详情
*/
List<ConsumeDetailDTO> getConsumeDetail(ClusterDO clusterDO, String topicName, ConsumerGroupDTO consumerGroupDTO);
List<ConsumeDetailDTO> getConsumeDetail(ClusterDO clusterDO, String topicName, ConsumerGroup consumerGroup);
/**
* 获取消费组消费的Topic列表
*/
List<String> getConsumerGroupConsumedTopicList(Long clusterId, String consumerGroup, String location);
Map<Integer, Long> getConsumerOffset(ClusterDO clusterDO,
String topicName,
ConsumerGroupDTO consumerGroupDTO);
Map<Integer, Long> getConsumerOffset(ClusterDO clusterDO, String topicName, ConsumerGroup consumerGroup);
/**
* 重置offset
*/
List<Result> resetConsumerOffset(ClusterDO clusterDO,
String topicName,
ConsumerGroupDTO consumerGroupDTO,
ConsumerGroup consumerGroup,
List<PartitionOffsetDTO> partitionOffsetDTOList);
Map<Long, Integer> getConsumerGroupNumMap(List<ClusterDO> clusterDOList);

View File

@@ -66,6 +66,19 @@ public interface TopicManagerService {
*/
ResultStatus modifyTopic(Long clusterId, String topicName, String description, String operator);
/**
* 修改Topic
* @param clusterId 集群ID
* @param topicName Topic名称
* @param appId 所属应用
* @param description 备注
* @param operator 操作人
* @author zengqiao
* @date 20/5/12
* @return ResultStatus
*/
ResultStatus modifyTopicByOp(Long clusterId, String topicName, String appId, String description, String operator);
/**
* 通过topictopic名称删除
* @param clusterId 集群id

View File

@@ -3,11 +3,27 @@ package com.xiaojukeji.kafka.manager.service.service;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.didi.TopicJmxSwitch;
import java.util.List;
/**
* ZK相关的接口
* @author tukun
* @date 2015/11/11.
*/
public interface ZookeeperService {
/**
* 开启JMX
* @param clusterId 集群ID
* @param topicName Topic名称
* @param jmxSwitch JMX开关
* @return 操作结果
*/
Result openTopicJmx(Long clusterId, String topicName, TopicJmxSwitch jmxSwitch);
/**
* 获取优先被选举为controller的broker
* @param clusterId 集群ID
* @return 操作结果
*/
Result<List<Integer>> getControllerPreferredCandidates(Long clusterId);
}

View File

@@ -60,4 +60,6 @@ public interface AuthorityService {
int addAuthorityAndQuota(AuthorityDO authorityDO, TopicQuota quota);
Map<String, Map<Long, Map<String, AuthorityDO>>> getAllAuthority();
int deleteAuthorityByTopic(Long clusterId, String topicName);
}

View File

@@ -1,18 +1,86 @@
package com.xiaojukeji.kafka.manager.service.service.gateway;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ao.gateway.*;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.GatewayConfigDO;
import java.util.List;
public interface GatewayConfigService {
/**
* 获取集群服务地址
* @param requestVersion 请求的版本
* @return
*/
KafkaBootstrapServerConfig getKafkaBootstrapServersConfig(Long requestVersion);
/**
* 获取服务发现的请求队列的配置
* @param requestVersion 请求的版本
* @return
*/
RequestQueueConfig getRequestQueueConfig(Long requestVersion);
/**
* 获取服务发现的App请求速度的配置
* @param requestVersion 请求的版本
* @return
*/
AppRateConfig getAppRateConfig(Long requestVersion);
/**
* 获取服务发现的IP请求速度的配置
* @param requestVersion 请求的版本
* @return
*/
IpRateConfig getIpRateConfig(Long requestVersion);
/**
* 获取服务发现的具体IP或者应用纬度的限速配置
* @param requestVersion 请求的版本
* @return
*/
SpRateConfig getSpRateConfig(Long requestVersion);
/**
* 获取配置
* @param configType 配置类型
* @param configName 配置名称
* @return
*/
GatewayConfigDO getByTypeAndName(String configType, String configName);
/**
* 获取配置
* @return
*/
List<GatewayConfigDO> list();
/**
* 新建配置
* @param gatewayConfigDO 配置信息
* @return
*/
Result insert(GatewayConfigDO gatewayConfigDO);
/**
* 删除配置
* @param id 配置ID
* @return
*/
Result deleteById(Long id);
/**
* 更新配置
* @param gatewayConfigDO 配置信息
* @return
*/
Result updateById(GatewayConfigDO gatewayConfigDO);
/**
* 获取配置
* @param id 配置ID
* @return
*/
GatewayConfigDO getById(Long id);
}

View File

@@ -196,8 +196,7 @@ public class AppServiceImpl implements AppService {
}
@Override
public List<AppTopicDTO> getAppTopicDTOList(String appId,
Boolean mine) {
public List<AppTopicDTO> getAppTopicDTOList(String appId, Boolean mine) {
// 查询AppID
AppDO appDO = appDao.getByAppId(appId);
if (ValidateUtils.isNull(appDO)) {
@@ -223,13 +222,17 @@ public class AppServiceImpl implements AppService {
TopicDO topicDO = topicMap
.getOrDefault(authorityDO.getClusterId(), new HashMap<>())
.get(authorityDO.getTopicName());
if (ValidateUtils.isNull(topicDO)) {
continue;
}
if (Boolean.TRUE.equals(mine)
&& (ValidateUtils.isNull(topicDO) || !topicDO.getAppId().equals(appId))) {
&& !topicDO.getAppId().equals(appId)) {
continue;
}
if (Boolean.FALSE.equals(mine)
&& !ValidateUtils.isNull(topicDO)
&& topicDO.getAppId().equals(appId)) {
continue;
}

View File

@@ -192,4 +192,10 @@ public class AuthorityServiceImpl implements AuthorityService {
public Map<String, Map<Long, Map<String, AuthorityDO>>> getAllAuthority() {
return authorityDao.getAllAuthority();
}
@Override
public int deleteAuthorityByTopic(Long clusterId, String topicName) {
return authorityDao.deleteAuthorityByTopic(clusterId, topicName);
}
}

View File

@@ -2,6 +2,8 @@ package com.xiaojukeji.kafka.manager.service.service.gateway.impl;
import com.alibaba.fastjson.JSON;
import com.xiaojukeji.kafka.manager.common.bizenum.gateway.GatewayConfigKeyEnum;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.gateway.*;
import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
@@ -13,6 +15,7 @@ import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -21,7 +24,7 @@ import java.util.Map;
* @author zengqiao
* @date 20/7/28
*/
@Service("gatewayConfigService")
@Service
public class GatewayConfigServiceImpl implements GatewayConfigService {
private final Logger LOGGER = LoggerFactory.getLogger(GatewayConfigServiceImpl.class);
@@ -52,7 +55,8 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
? new KafkaBootstrapServerConfig(maxVersion, clusterIdBootstrapServersMap)
: new KafkaBootstrapServerConfig(requestVersion, new HashMap<>(0));
} catch (Exception e) {
LOGGER.error("get kafka bootstrap servers config failed, data:{}.", JSON.toJSONString(doList), e);
LOGGER.error("class=GatewayConfigServiceImpl||method=getKafkaBootstrapServersConfig||data={}||errMsg={}||msg=get kafka bootstrap servers config failed",
JSON.toJSONString(doList), e.getMessage());
}
return null;
}
@@ -71,7 +75,8 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
return new RequestQueueConfig(configDO.getVersion(), Long.valueOf(configDO.getValue()));
} catch (Exception e) {
LOGGER.error("get request queue config failed, data:{}.", JSON.toJSONString(configDO), e);
LOGGER.error("class=GatewayConfigServiceImpl||method=getRequestQueueConfig||data={}||errMsg={}||msg=get request queue config failed",
JSON.toJSONString(configDO), e.getMessage());
}
return null;
}
@@ -90,7 +95,8 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
return new AppRateConfig(configDO.getVersion(), Long.valueOf(configDO.getValue()));
} catch (Exception e) {
LOGGER.error("get app rate config failed, data:{}.", JSON.toJSONString(configDO), e);
LOGGER.error("class=GatewayConfigServiceImpl||method=getAppRateConfig||data={}||errMsg={}||msg=get app rate config failed",
JSON.toJSONString(configDO), e.getMessage());
}
return null;
}
@@ -153,4 +159,94 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
}
return null;
}
@Override
public List<GatewayConfigDO> list() {
try {
return gatewayConfigDao.list();
} catch (Exception e) {
LOGGER.debug("class=GatewayConfigServiceImpl||method=list||errMsg={}||msg=list failed", e.getMessage());
}
return new ArrayList<>();
}
@Override
public Result insert(GatewayConfigDO gatewayConfigDO) {
try {
GatewayConfigKeyEnum configKeyEnum = GatewayConfigKeyEnum.getByConfigType(gatewayConfigDO.getType());
if (ValidateUtils.isNull(configKeyEnum)
&& ValidateUtils.isBlank(gatewayConfigDO.getName())
&& ValidateUtils.isBlank(gatewayConfigDO.getValue())) {
// 参数错误
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
// 获取当前同类配置, 插入之后需要增大这个version
List<GatewayConfigDO> gatewayConfigDOList = gatewayConfigDao.getByConfigType(gatewayConfigDO.getType());
Long version = 1L;
for (GatewayConfigDO elem: gatewayConfigDOList) {
if (elem.getVersion() > version) {
version = elem.getVersion() + 1L;
}
}
gatewayConfigDO.setVersion(version);
if (gatewayConfigDao.insert(gatewayConfigDO) > 0) {
return Result.buildSuc();
}
return Result.buildFrom(ResultStatus.MYSQL_ERROR);
} catch (Exception e) {
LOGGER.debug("class=GatewayConfigServiceImpl||method=insert||data={}||errMsg={}||msg=insert failed", gatewayConfigDO, e.getMessage());
}
return Result.buildFrom(ResultStatus.MYSQL_ERROR);
}
@Override
public Result deleteById(Long id) {
try {
if (gatewayConfigDao.deleteById(id) > 0) {
return Result.buildSuc();
}
return Result.buildFrom(ResultStatus.RESOURCE_NOT_EXIST);
} catch (Exception e) {
LOGGER.debug("class=GatewayConfigServiceImpl||method=deleteById||id={}||errMsg={}||msg=delete failed", id, e.getMessage());
}
return Result.buildFrom(ResultStatus.MYSQL_ERROR);
}
@Override
public Result updateById(GatewayConfigDO newGatewayConfigDO) {
try {
GatewayConfigDO oldGatewayConfigDO = this.getById(newGatewayConfigDO.getId());
if (ValidateUtils.isNull(oldGatewayConfigDO)) {
return Result.buildFrom(ResultStatus.RESOURCE_NOT_EXIST);
}
if (!oldGatewayConfigDO.getName().equals(newGatewayConfigDO.getName())
|| !oldGatewayConfigDO.getType().equals(newGatewayConfigDO.getType())
|| ValidateUtils.isBlank(newGatewayConfigDO.getValue())) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
newGatewayConfigDO.setVersion(oldGatewayConfigDO.getVersion() + 1);
if (gatewayConfigDao.updateById(oldGatewayConfigDO) > 0) {
return Result.buildSuc();
}
return Result.buildFrom(ResultStatus.MYSQL_ERROR);
} catch (Exception e) {
LOGGER.debug("class=GatewayConfigServiceImpl||method=updateById||data={}||errMsg={}||msg=update failed", newGatewayConfigDO, e.getMessage());
}
return Result.buildFrom(ResultStatus.MYSQL_ERROR);
}
@Override
public GatewayConfigDO getById(Long id) {
if (ValidateUtils.isNull(id)) {
return null;
}
try {
return gatewayConfigDao.getById(id);
} catch (Exception e) {
LOGGER.debug("class=GatewayConfigServiceImpl||method=getById||id={}||errMsg={}||msg=get failed", id, e.getMessage());
}
return null;
}
}

View File

@@ -13,6 +13,7 @@ import com.xiaojukeji.kafka.manager.common.zookeeper.ZkConfigImpl;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.BrokerMetadata;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.TopicDO;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.service.*;
import com.xiaojukeji.kafka.manager.service.service.gateway.AuthorityService;
@@ -139,6 +140,9 @@ public class AdminServiceImpl implements AdminService {
// 3. 数据库中删除topic
topicManagerService.deleteByTopicName(clusterDO.getId(), topicName);
// 4. 数据库中删除authority
authorityService.deleteAuthorityByTopic(clusterDO.getId(), topicName);
return rs;
}
@@ -191,15 +195,55 @@ public class AdminServiceImpl implements AdminService {
@Override
public ResultStatus preferredReplicaElection(ClusterDO clusterDO, Integer brokerId, String operator) {
BrokerMetadata brokerMetadata = PhysicalClusterMetadataManager.getBrokerMetadata(clusterDO.getId(), brokerId);
if (null == brokerMetadata) {
if (ValidateUtils.isNull(brokerMetadata)) {
return ResultStatus.PARAM_ILLEGAL;
}
Map<String, List<Integer>> partitionMap = topicService.getTopicPartitionIdMap(clusterDO.getId(), brokerId);
if (ValidateUtils.isEmptyMap(partitionMap)) {
return ResultStatus.SUCCESS;
}
return preferredReplicaElection(clusterDO, partitionMap, operator);
}
@Override
public ResultStatus preferredReplicaElection(ClusterDO clusterDO, String topicName, String operator) {
TopicMetadata topicMetadata = PhysicalClusterMetadataManager.getTopicMetadata(clusterDO.getId(), topicName);
if (ValidateUtils.isNull(topicMetadata)) {
return ResultStatus.TOPIC_NOT_EXIST;
}
Map<String, List<Integer>> partitionMap = new HashMap<>();
partitionMap.put(topicName, new ArrayList<>(topicMetadata.getPartitionMap().getPartitions().keySet()));
return preferredReplicaElection(clusterDO, partitionMap, operator);
}
@Override
public ResultStatus preferredReplicaElection(ClusterDO clusterDO, String topicName, Integer partitionId, String operator) {
TopicMetadata topicMetadata = PhysicalClusterMetadataManager.getTopicMetadata(clusterDO.getId(), topicName);
if (ValidateUtils.isNull(topicMetadata)) {
return ResultStatus.TOPIC_NOT_EXIST;
}
if (!topicMetadata.getPartitionMap().getPartitions().containsKey(partitionId)) {
return ResultStatus.PARTITION_NOT_EXIST;
}
Map<String, List<Integer>> partitionMap = new HashMap<>();
partitionMap.put(topicName, Arrays.asList(partitionId));
return preferredReplicaElection(clusterDO, partitionMap, operator);
}
private ResultStatus preferredReplicaElection(ClusterDO clusterDO, Map<String, List<Integer>> partitionMap, String operator) {
if (ValidateUtils.isEmptyMap(partitionMap)) {
return ResultStatus.SUCCESS;
}
ZkUtils zkUtils = null;
try {
Map<String, List<Integer>> partitionMap = topicService.getTopicPartitionIdMap(clusterDO.getId(), brokerId);
if (partitionMap == null || partitionMap.isEmpty()) {
return ResultStatus.SUCCESS;
}
String preferredReplicaElectString = convert2preferredReplicaElectString(partitionMap);
zkUtils = ZkUtils.apply(clusterDO.getZookeeper(),

View File

@@ -1,11 +1,16 @@
package com.xiaojukeji.kafka.manager.service.service.impl;
import com.xiaojukeji.kafka.manager.common.bizenum.DBStatusEnum;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.ClusterDetailDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.cluster.ControllerPreferredCandidate;
import com.xiaojukeji.kafka.manager.common.entity.vo.normal.cluster.ClusterNameDTO;
import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
import com.xiaojukeji.kafka.manager.common.entity.pojo.*;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.BrokerMetadata;
import com.xiaojukeji.kafka.manager.dao.ClusterDao;
import com.xiaojukeji.kafka.manager.dao.ClusterMetricsDao;
import com.xiaojukeji.kafka.manager.dao.ControllerDao;
@@ -14,6 +19,7 @@ import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
import com.xiaojukeji.kafka.manager.service.service.ConsumerService;
import com.xiaojukeji.kafka.manager.service.service.RegionService;
import com.xiaojukeji.kafka.manager.service.service.ZookeeperService;
import com.xiaojukeji.kafka.manager.service.utils.ConfigUtils;
import org.apache.zookeeper.ZooKeeper;
import org.slf4j.Logger;
@@ -57,6 +63,9 @@ public class ClusterServiceImpl implements ClusterService {
@Autowired
private ConfigUtils configUtils;
@Autowired
private ZookeeperService zookeeperService;
@Override
public ResultStatus addNew(ClusterDO clusterDO, String operator) {
if (ValidateUtils.isNull(clusterDO) || ValidateUtils.isNull(operator)) {
@@ -194,6 +203,7 @@ public class ClusterServiceImpl implements ClusterService {
zk.close();
}
} catch (Throwable t) {
return false;
}
}
return true;
@@ -262,21 +272,6 @@ public class ClusterServiceImpl implements ClusterService {
return ResultStatus.SUCCESS;
}
@Override
public ClusterDO selectSuitableCluster(Long clusterId, String dataCenter) {
if (!ValidateUtils.isNullOrLessThanZero(clusterId)) {
return getById(clusterId);
}
if (ValidateUtils.isBlank(dataCenter)) {
return null;
}
List<ClusterDO> clusterDOList = this.listAll();
if (ValidateUtils.isEmptyList(clusterDOList)) {
return null;
}
return clusterDOList.get(0);
}
private ClusterDetailDTO getClusterDetailDTO(ClusterDO clusterDO, Boolean needDetail) {
if (ValidateUtils.isNull(clusterDO)) {
return null;
@@ -300,4 +295,31 @@ public class ClusterServiceImpl implements ClusterService {
dto.setControllerId(PhysicalClusterMetadataManager.getControllerId(clusterDO.getId()));
return dto;
}
@Override
public Result<List<ControllerPreferredCandidate>> getControllerPreferredCandidates(Long clusterId) {
Result<List<Integer>> candidateResult = zookeeperService.getControllerPreferredCandidates(clusterId);
if (candidateResult.failed()) {
return new Result<>(candidateResult.getCode(), candidateResult.getMessage());
}
if (ValidateUtils.isEmptyList(candidateResult.getData())) {
return Result.buildSuc(new ArrayList<>());
}
List<ControllerPreferredCandidate> controllerPreferredCandidateList = new ArrayList<>();
for (Integer brokerId: candidateResult.getData()) {
ControllerPreferredCandidate controllerPreferredCandidate = new ControllerPreferredCandidate();
controllerPreferredCandidate.setBrokerId(brokerId);
BrokerMetadata brokerMetadata = PhysicalClusterMetadataManager.getBrokerMetadata(clusterId, brokerId);
if (ValidateUtils.isNull(brokerMetadata)) {
controllerPreferredCandidate.setStatus(DBStatusEnum.DEAD.getStatus());
} else {
controllerPreferredCandidate.setHost(brokerMetadata.getHost());
controllerPreferredCandidate.setStartTime(brokerMetadata.getTimestamp());
controllerPreferredCandidate.setStatus(DBStatusEnum.ALIVE.getStatus());
}
controllerPreferredCandidateList.add(controllerPreferredCandidate);
}
return Result.buildSuc(controllerPreferredCandidateList);
}
}

View File

@@ -2,13 +2,14 @@ package com.xiaojukeji.kafka.manager.service.service.impl;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetPosEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetLocationEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.SinkMonitorSystemEnum;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumeDetailDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroup;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupSummary;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.PartitionOffsetDTO;
import com.xiaojukeji.kafka.manager.common.exception.ConfigException;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
@@ -23,6 +24,7 @@ import kafka.admin.AdminClient;
import org.apache.commons.lang.StringUtils;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.protocol.types.SchemaException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@@ -44,70 +46,116 @@ public class ConsumerServiceImpl implements ConsumerService {
private TopicService topicService;
@Override
public List<ConsumerGroupDTO> getConsumerGroupList(Long clusterId) {
List<ConsumerGroupDTO> consumerGroupDTOList = new ArrayList<>();
public List<ConsumerGroup> getConsumerGroupList(Long clusterId) {
List<ConsumerGroup> consumerGroupList = new ArrayList<>();
for (OffsetLocationEnum location: OffsetLocationEnum.values()) {
Map<String, List<String>> consumerGroupAppIdMap = null;
Set<String> consumerGroupSet = null;
if (OffsetLocationEnum.ZOOKEEPER.equals(location)) {
// 获取ZK中的消费组
consumerGroupAppIdMap = ConsumerMetadataCache.getConsumerGroupAppIdListInZk(clusterId);
consumerGroupSet = ConsumerMetadataCache.getGroupInZkMap(clusterId);
} else if (OffsetLocationEnum.BROKER.equals(location)) {
// 获取Broker中的消费组
consumerGroupAppIdMap = ConsumerMetadataCache.getConsumerGroupAppIdListInBK(clusterId);
consumerGroupSet = ConsumerMetadataCache.getGroupInBrokerMap(clusterId);
}
if (consumerGroupSet == null || consumerGroupAppIdMap == null) {
if (ValidateUtils.isEmptySet(consumerGroupSet)) {
continue;
}
for (String consumerGroup : consumerGroupSet) {
consumerGroupDTOList.add(new ConsumerGroupDTO(
clusterId,
consumerGroup,
consumerGroupAppIdMap.getOrDefault(consumerGroup, new ArrayList<>()),
location)
); }
consumerGroupList.add(new ConsumerGroup(clusterId, consumerGroup, location));
}
}
return consumerGroupDTOList;
return consumerGroupList;
}
@Override
public List<ConsumerGroupDTO> getConsumerGroupList(Long clusterId, String topicName) {
List<ConsumerGroupDTO> consumerGroupDTOList = new ArrayList<>();
public List<ConsumerGroup> getConsumerGroupList(Long clusterId, String topicName) {
List<ConsumerGroup> consumerGroupList = new ArrayList<>();
for (OffsetLocationEnum location: OffsetLocationEnum.values()) {
Map<String, List<String>> consumerGroupAppIdMap = null;
Set<String> consumerGroupSet = null;
if (OffsetLocationEnum.ZOOKEEPER.equals(location)) {
// 获取ZK中的消费组
consumerGroupAppIdMap = ConsumerMetadataCache.getConsumerGroupAppIdListInZk(clusterId);
consumerGroupSet = ConsumerMetadataCache.getTopicConsumerGroupInZk(clusterId, topicName);
} else if (OffsetLocationEnum.BROKER.equals(location)) {
// 获取Broker中的消费组
consumerGroupAppIdMap = ConsumerMetadataCache.getConsumerGroupAppIdListInBK(clusterId);
consumerGroupSet = ConsumerMetadataCache.getTopicConsumerGroupInBroker(clusterId, topicName);
}
if (consumerGroupSet == null || consumerGroupAppIdMap == null) {
if (ValidateUtils.isEmptySet(consumerGroupSet)) {
continue;
}
for (String consumerGroup : consumerGroupSet) {
consumerGroupDTOList.add(new ConsumerGroupDTO(
clusterId,
consumerGroup,
consumerGroupAppIdMap.getOrDefault(consumerGroup, new ArrayList<>()),
location
)
);
consumerGroupList.add(new ConsumerGroup(clusterId, consumerGroup, location));
}
}
return consumerGroupDTOList;
return consumerGroupList;
}
@Override
public List<ConsumeDetailDTO> getConsumeDetail(ClusterDO clusterDO,
String topicName,
ConsumerGroupDTO consumeGroupDTO) {
public List<ConsumerGroupSummary> getConsumerGroupSummaries(Long clusterId, String topicName) {
List<ConsumerGroup> consumerGroupList = this.getConsumerGroupList(clusterId, topicName);
if (ValidateUtils.isEmptyList(consumerGroupList)) {
return Collections.emptyList();
}
List<ConsumerGroupSummary> summaryList = new ArrayList<>();
for (ConsumerGroup consumerGroup: consumerGroupList) {
ConsumerGroupSummary consumerGroupSummary = null;
if (OffsetLocationEnum.ZOOKEEPER.equals(consumerGroup.getOffsetStoreLocation())) {
consumerGroupSummary = new ConsumerGroupSummary();
consumerGroupSummary.setClusterId(consumerGroup.getClusterId());
consumerGroupSummary.setConsumerGroup(consumerGroup.getConsumerGroup());
consumerGroupSummary.setOffsetStoreLocation(consumerGroup.getOffsetStoreLocation());
} else {
consumerGroupSummary = getConsumerGroupSummary(clusterId, topicName, consumerGroup.getConsumerGroup());
}
summaryList.add(consumerGroupSummary);
}
return summaryList;
}
private ConsumerGroupSummary getConsumerGroupSummary(Long clusterId, String topicName, String consumerGroup) {
ConsumerGroupSummary summary = new ConsumerGroupSummary();
summary.setClusterId(clusterId);
summary.setConsumerGroup(consumerGroup);
summary.setOffsetStoreLocation(OffsetLocationEnum.BROKER);
summary.setAppIdList(new ArrayList<>());
summary.setState("");
try {
AdminClient adminClient = KafkaClientPool.getAdminClient(clusterId);
AdminClient.ConsumerGroupSummary consumerGroupSummary = adminClient.describeConsumerGroup(consumerGroup);
if (ValidateUtils.isNull(consumerGroupSummary)) {
return summary;
}
summary.setState(consumerGroupSummary.state());
java.util.Iterator<scala.collection.immutable.List<AdminClient.ConsumerSummary>> it = JavaConversions.asJavaIterator(consumerGroupSummary.consumers().iterator());
while (it.hasNext()) {
List<AdminClient.ConsumerSummary> consumerSummaryList = JavaConversions.asJavaList(it.next());
for (AdminClient.ConsumerSummary consumerSummary: consumerSummaryList) {
List<TopicPartition> topicPartitionList = JavaConversions.asJavaList(consumerSummary.assignment());
if (ValidateUtils.isEmptyList(topicPartitionList)) {
continue;
}
if (topicPartitionList.stream().anyMatch(elem -> elem.topic().equals(topicName)) && consumerSummary.clientId().contains(".")) {
String [] splitArray = consumerSummary.clientId().split("\\.");
summary.getAppIdList().add(splitArray[0]);
}
}
}
} catch (SchemaException e) {
logger.error("class=ConsumerServiceImpl||method=getConsumerGroupSummary||clusterId={}||topicName={}||consumerGroup={}||errMsg={}||schema exception",
clusterId, topicName, consumerGroup, e.getMessage());
} catch (Exception e) {
logger.error("class=ConsumerServiceImpl||method=getConsumerGroupSummary||clusterId={}||topicName={}||consumerGroup={}||errMsg={}||throws exception",
clusterId, topicName, consumerGroup, e.getMessage());
}
summary.setAppIdList(new ArrayList<>(new HashSet<>(summary.getAppIdList())));
return summary;
}
@Override
public List<ConsumeDetailDTO> getConsumeDetail(ClusterDO clusterDO, String topicName, ConsumerGroup consumerGroup) {
TopicMetadata topicMetadata = PhysicalClusterMetadataManager.getTopicMetadata(clusterDO.getId(), topicName);
if (topicMetadata == null) {
logger.warn("class=ConsumerServiceImpl||method=getConsumeDetail||clusterId={}||topicName={}||msg=topicMetadata is null!",
@@ -116,10 +164,10 @@ public class ConsumerServiceImpl implements ConsumerService {
}
List<ConsumeDetailDTO> consumerGroupDetailDTOList = null;
if (OffsetLocationEnum.ZOOKEEPER.equals(consumeGroupDTO.getOffsetStoreLocation())) {
consumerGroupDetailDTOList = getConsumerPartitionStateInZK(clusterDO, topicMetadata, consumeGroupDTO);
} else if (OffsetLocationEnum.BROKER.equals(consumeGroupDTO.getOffsetStoreLocation())){
consumerGroupDetailDTOList = getConsumerPartitionStateInBroker(clusterDO, topicMetadata, consumeGroupDTO);
if (OffsetLocationEnum.ZOOKEEPER.equals(consumerGroup.getOffsetStoreLocation())) {
consumerGroupDetailDTOList = getConsumerPartitionStateInZK(clusterDO, topicMetadata, consumerGroup);
} else if (OffsetLocationEnum.BROKER.equals(consumerGroup.getOffsetStoreLocation())){
consumerGroupDetailDTOList = getConsumerPartitionStateInBroker(clusterDO, topicMetadata, consumerGroup);
}
if (consumerGroupDetailDTOList == null) {
logger.info("class=ConsumerServiceImpl||method=getConsumeDetail||msg=consumerGroupDetailDTOList is null!");
@@ -147,7 +195,7 @@ public class ConsumerServiceImpl implements ConsumerService {
}
@Override
public List<Result> resetConsumerOffset(ClusterDO clusterDO, String topicName, ConsumerGroupDTO consumerGroupDTO, List<PartitionOffsetDTO> partitionOffsetDTOList) {
public List<Result> resetConsumerOffset(ClusterDO clusterDO, String topicName, ConsumerGroup consumerGroup, List<PartitionOffsetDTO> partitionOffsetDTOList) {
Map<TopicPartition, Long> offsetMap = partitionOffsetDTOList.stream().collect(Collectors.toMap(elem -> {return new TopicPartition(topicName, elem.getPartitionId());}, PartitionOffsetDTO::getOffset));
List<Result> resultList = new ArrayList<>();
@@ -155,12 +203,12 @@ public class ConsumerServiceImpl implements ConsumerService {
KafkaConsumer<String, String> kafkaConsumer = null;
try {
Properties properties = KafkaClientPool.createProperties(clusterDO, false);
properties.setProperty("group.id", consumerGroupDTO.getConsumerGroup());
properties.setProperty("group.id", consumerGroup.getConsumerGroup());
kafkaConsumer = new KafkaConsumer<>(properties);
checkAndCorrectPartitionOffset(kafkaConsumer, offsetMap);
return resetConsumerOffset(clusterDO, kafkaConsumer, consumerGroupDTO, offsetMap);
return resetConsumerOffset(clusterDO, kafkaConsumer, consumerGroup, offsetMap);
} catch (Exception e) {
logger.error("create kafka consumer failed, clusterId:{} topicName:{} consumerGroup:{} partition:{}.", clusterDO.getId(), topicName, consumerGroupDTO, partitionOffsetDTOList, e);
logger.error("create kafka consumer failed, clusterId:{} topicName:{} consumerGroup:{} partition:{}.", clusterDO.getId(), topicName, consumerGroup, partitionOffsetDTOList, e);
resultList.add(new Result(
ResultStatus.OPERATION_FAILED.getCode(),
"reset failed, create KafkaConsumer or check offset failed"
@@ -173,20 +221,20 @@ public class ConsumerServiceImpl implements ConsumerService {
return resultList;
}
private List<Result> resetConsumerOffset(ClusterDO cluster, KafkaConsumer<String, String> kafkaConsumer, ConsumerGroupDTO consumerGroupDTO, Map<TopicPartition, Long> offsetMap) {
private List<Result> resetConsumerOffset(ClusterDO cluster, KafkaConsumer<String, String> kafkaConsumer, ConsumerGroup consumerGroup, Map<TopicPartition, Long> offsetMap) {
List<Result> resultList = new ArrayList<>();
for(Map.Entry<TopicPartition, Long> entry: offsetMap.entrySet()){
TopicPartition tp = entry.getKey();
Long offset = entry.getValue();
try {
if (consumerGroupDTO.getOffsetStoreLocation().equals(OffsetLocationEnum.ZOOKEEPER)) {
resetConsumerOffsetInZK(cluster, consumerGroupDTO.getConsumerGroup(), tp, offset);
} else if (consumerGroupDTO.getOffsetStoreLocation().equals(OffsetLocationEnum.BROKER)) {
if (consumerGroup.getOffsetStoreLocation().equals(OffsetLocationEnum.ZOOKEEPER)) {
resetConsumerOffsetInZK(cluster, consumerGroup.getConsumerGroup(), tp, offset);
} else if (consumerGroup.getOffsetStoreLocation().equals(OffsetLocationEnum.BROKER)) {
resetConsumerOffsetInBroker(kafkaConsumer, tp, offset);
}
} catch (Exception e) {
logger.error("reset failed, clusterId:{} consumerGroup:{} topic-partition:{}.", cluster.getId(), consumerGroupDTO, tp, e);
logger.error("reset failed, clusterId:{} consumerGroup:{} topic-partition:{}.", cluster.getId(), consumerGroup, tp, e);
resultList.add(new Result(
ResultStatus.OPERATION_FAILED.getCode(),
"reset failed..."));
@@ -232,14 +280,14 @@ public class ConsumerServiceImpl implements ConsumerService {
@Override
public Map<Integer, Long> getConsumerOffset(ClusterDO clusterDO,
String topicName,
ConsumerGroupDTO consumerGroupDTO) {
if (ValidateUtils.isNull(clusterDO) || ValidateUtils.isBlank(topicName) || ValidateUtils.isNull(consumerGroupDTO)) {
ConsumerGroup consumerGroup) {
if (ValidateUtils.isNull(clusterDO) || ValidateUtils.isBlank(topicName) || ValidateUtils.isNull(consumerGroup)) {
return null;
}
if (OffsetLocationEnum.BROKER.equals(consumerGroupDTO.getOffsetStoreLocation())) {
return getConsumerOffsetFromBK(clusterDO, topicName, consumerGroupDTO.getConsumerGroup());
} else if (OffsetLocationEnum.ZOOKEEPER.equals(consumerGroupDTO.getOffsetStoreLocation())) {
return getConsumerOffsetFromZK(clusterDO.getId(), topicName, consumerGroupDTO.getConsumerGroup());
if (OffsetLocationEnum.BROKER.equals(consumerGroup.getOffsetStoreLocation())) {
return getConsumerOffsetFromBK(clusterDO, topicName, consumerGroup.getConsumerGroup());
} else if (OffsetLocationEnum.ZOOKEEPER.equals(consumerGroup.getOffsetStoreLocation())) {
return getConsumerOffsetFromZK(clusterDO.getId(), topicName, consumerGroup.getConsumerGroup());
}
return null;
}
@@ -306,9 +354,9 @@ public class ConsumerServiceImpl implements ConsumerService {
return consumerIdMap;
}
private List<ConsumeDetailDTO> getConsumerPartitionStateInBroker(ClusterDO clusterDO, TopicMetadata topicMetadata, ConsumerGroupDTO consumerGroupDTO) {
Map<Integer, String> consumerIdMap = getConsumeIdMap(clusterDO.getId(), topicMetadata.getTopic(), consumerGroupDTO.getConsumerGroup());
Map<Integer, String> consumeOffsetMap = getOffsetByGroupAndTopicFromBroker(clusterDO, consumerGroupDTO.getConsumerGroup(), topicMetadata.getTopic());
private List<ConsumeDetailDTO> getConsumerPartitionStateInBroker(ClusterDO clusterDO, TopicMetadata topicMetadata, ConsumerGroup consumerGroup) {
Map<Integer, String> consumerIdMap = getConsumeIdMap(clusterDO.getId(), topicMetadata.getTopic(), consumerGroup.getConsumerGroup());
Map<Integer, String> consumeOffsetMap = getOffsetByGroupAndTopicFromBroker(clusterDO, consumerGroup.getConsumerGroup(), topicMetadata.getTopic());
List<ConsumeDetailDTO> consumeDetailDTOList = new ArrayList<>();
for (int partitionId : topicMetadata.getPartitionMap().getPartitions().keySet()) {
@@ -318,7 +366,7 @@ public class ConsumerServiceImpl implements ConsumerService {
try {
consumeDetailDTO.setConsumeOffset(StringUtils.isEmpty(consumeOffsetStr)? null: Long.valueOf(consumeOffsetStr));
} catch (Exception e) {
logger.error("illegal consumer offset, clusterId:{} topicName:{} consumerGroup:{} offset:{}.", clusterDO.getId(), topicMetadata.getTopic(), consumerGroupDTO.getConsumerGroup(), consumeOffsetStr, e);
logger.error("illegal consumer offset, clusterId:{} topicName:{} consumerGroup:{} offset:{}.", clusterDO.getId(), topicMetadata.getTopic(), consumerGroup.getConsumerGroup(), consumeOffsetStr, e);
}
consumeDetailDTO.setConsumerId(consumerIdMap.get(partitionId));
consumeDetailDTOList.add(consumeDetailDTO);
@@ -326,21 +374,19 @@ public class ConsumerServiceImpl implements ConsumerService {
return consumeDetailDTOList;
}
private List<ConsumeDetailDTO> getConsumerPartitionStateInZK(ClusterDO clusterDO,
TopicMetadata topicMetadata,
ConsumerGroupDTO consumerGroupDTO) {
private List<ConsumeDetailDTO> getConsumerPartitionStateInZK(ClusterDO clusterDO, TopicMetadata topicMetadata, ConsumerGroup consumerGroup) {
ZkConfigImpl zkConfig = PhysicalClusterMetadataManager.getZKConfig(clusterDO.getId());
List<ConsumeDetailDTO> consumeDetailDTOList = new ArrayList<>();
for (Integer partitionId : topicMetadata.getPartitionMap().getPartitions().keySet()) {
String consumeGroupPath = ZkPathUtil.getConsumerGroupOffsetTopicPartitionNode(consumerGroupDTO.getConsumerGroup(), topicMetadata.getTopic(), partitionId);
String consumeGroupPath = ZkPathUtil.getConsumerGroupOffsetTopicPartitionNode(consumerGroup.getConsumerGroup(), topicMetadata.getTopic(), partitionId);
String consumeOffset = null;
try {
consumeOffset = zkConfig.get(consumeGroupPath);
} catch (ConfigException e) {
logger.error("get consumeOffset error for zk path:{}", consumeGroupPath, e);
}
String consumeIdZkPath = ZkPathUtil.getConsumerGroupOwnersTopicPartitionNode(consumerGroupDTO.getConsumerGroup(), topicMetadata.getTopic(), partitionId);
String consumeIdZkPath = ZkPathUtil.getConsumerGroupOwnersTopicPartitionNode(consumerGroup.getConsumerGroup(), topicMetadata.getTopic(), partitionId);
String consumerId = null;
try {
consumerId = zkConfig.get(consumeIdZkPath);
@@ -394,7 +440,7 @@ public class ConsumerServiceImpl implements ConsumerService {
@Override
public boolean checkConsumerGroupExist(OffsetLocationEnum offsetLocation, Long clusterId, String topicName, String consumerGroup) {
List<ConsumerGroupDTO> consumerGroupList = getConsumerGroupList(clusterId, topicName).stream()
List<ConsumerGroup> consumerGroupList = getConsumerGroupList(clusterId, topicName).stream()
.filter(group -> offsetLocation.location.equals(group.getOffsetStoreLocation().location) && consumerGroup.equals(group.getConsumerGroup()))
.collect(Collectors.toList());
return !ValidateUtils.isEmptyList(consumerGroupList);

Some files were not shown because too many files have changed in this diff Show More