Compare commits

..

44 Commits

Author SHA1 Message Date
EricZeng
0ef64fa4bd Merge pull request #126 from ZHAOYINRUI/patch-8
Create alarm_rules.md
2021-01-25 11:09:21 +08:00
ZHAOYINRUI
84dbc17c22 Update alarm_rules.md 2021-01-25 11:04:30 +08:00
EricZeng
16e16e356d Merge pull request #130 from xuehaipeng/patch-1
Update faq.md
2021-01-25 10:35:12 +08:00
xuehaipeng
978ee885c4 Update faq.md 2021-01-24 20:06:29 +08:00
EricZeng
0c2af89a1c Merge pull request #125 from ZHAOYINRUI/patch-7
create kafka_metrics_desc.md
2021-01-23 11:03:14 +08:00
EricZeng
14c2dc9624 update kafka_metrics.md 2021-01-23 11:01:44 +08:00
EricZeng
4f35d710a6 Update and rename metric.md to kafka_metrics_desc.md 2021-01-23 10:58:11 +08:00
EricZeng
fdb5e018e5 Merge pull request #122 from ZHAOYINRUI/patch-4
Update README.md
2021-01-23 10:51:26 +08:00
EricZeng
6001fde25c Update dynamic_config_manager.md 2021-01-23 10:21:47 +08:00
EricZeng
ae63c0adaf Merge pull request #128 from didi/dev
add sync topic to db doc
2021-01-23 10:20:27 +08:00
zengqiao
ad1539c8f6 add sync topic to db doc 2021-01-23 10:17:59 +08:00
EricZeng
634a0c8cd0 Update faq.md 2021-01-22 20:42:13 +08:00
ZHAOYINRUI
773f9a0c63 Create alarm_rules.md 2021-01-22 18:16:51 +08:00
ZHAOYINRUI
e4e320e9e3 Create metric.md 2021-01-22 18:06:35 +08:00
ZHAOYINRUI
3b4b400e6b Update README.md 2021-01-22 15:56:53 +08:00
mike.zhangliang
f3a5e3f5ed Update README.md 2021-01-18 19:06:43 +08:00
mike.zhangliang
e685e621f3 Update README.md 2021-01-18 19:05:44 +08:00
EricZeng
2cd2be9b67 Merge pull request #112 from didi/dev
监控告警系统对接说明文档
2021-01-17 18:21:16 +08:00
zengqiao
e73d9e8a03 add monitor_system_integrate_with_self file 2021-01-17 18:18:07 +08:00
zengqiao
476f74a604 rename file 2021-01-17 16:49:02 +08:00
EricZeng
ab0d1d99e6 Merge pull request #111 from didi/dev
Dev
2021-01-17 16:11:08 +08:00
zengqiao
d5680ffd5d 增加Topic同步任务&Bug修复 2021-01-16 16:26:38 +08:00
EricZeng
3c091a88d4 Merge pull request #110 from didi/master
合并master分支上的改动
2021-01-16 13:37:31 +08:00
EricZeng
49b70b33de Merge pull request #108 from didi/dev
增加application.yml文件说明 & 修改版本
2021-01-16 13:34:07 +08:00
zengqiao
c5ff2716fb 优化build.sh & yaml 2021-01-16 12:39:56 +08:00
ZQKC
400fdf0896 修复图片地址错误问题
修复图片地址错误问题
2021-01-16 12:04:20 +08:00
ZQKC
cbb8c7323c Merge pull request #109 from ZHAOYINRUI/master
架构图更新、钉钉群ID更新
2021-01-16 09:33:19 +08:00
ZHAOYINRUI
60e79f8f77 Update README.md 2021-01-16 00:25:06 +08:00
ZHAOYINRUI
0e829d739a Add files via upload 2021-01-16 00:22:31 +08:00
ZQKC
62abb274e0 增加application.yml文件说明
增加application.yml文件说明
2021-01-15 19:14:48 +08:00
ZQKC
e4028785de Update README.md
change km address
2021-01-09 15:30:30 +08:00
mrazkong
2bb44bcb76 Update Intergration_n9e_monitor.md 2021-01-07 17:09:15 +08:00
mike.zhangliang
684599f81b Update README.md 2021-01-07 15:44:17 +08:00
mike.zhangliang
b56d28f5df Update README.md 2021-01-07 15:43:07 +08:00
ZHAOYINRUI
02b9ac04c8 Update user_guide_cn.md 2020-12-30 22:44:23 +08:00
ZQKC
abb652ebd5 Merge pull request #104 from didi/dev
v2.1版本合并
2020-12-19 01:14:26 +08:00
ZQKC
ff78a9cc35 Merge pull request #101 from didi/dev
use mysql 8
2020-12-11 11:49:06 +08:00
ZQKC
aea63cad52 Merge pull request #94 from didi/dev
增加FAQ
2020-11-22 21:49:48 +08:00
ZQKC
dd6069e41a Merge pull request #93 from didi/dev
夜莺Mon集成配置说明
2020-11-22 20:09:34 +08:00
ZQKC
4d9a327b1f Merge pull request #92 from didi/dev
FIX N9e Mon
2020-11-22 18:15:49 +08:00
ZQKC
76c2477387 Merge pull request #91 from didi/dev
修复上报夜莺功能
2020-11-22 17:00:39 +08:00
ZQKC
edfd84a8e3 Merge pull request #88 from didi/dev
增加build.sh
2020-11-15 17:02:26 +08:00
ZQKC
abbe47f6b9 Merge pull request #87 from didi/dev
初始化SQL优化&KCM修复&连接信息修复
2020-11-15 16:55:42 +08:00
ZQKC
f70cfabede Merge pull request #84 from didi/dev
fix 前端资源加载问题
2020-11-14 16:56:16 +08:00
105 changed files with 2680 additions and 475 deletions

View File

@@ -9,6 +9,8 @@
## 主要功能特性
### 快速体验
- 体验地址 http://117.51.146.109:8080 账号密码 admin/admin
### 集群监控维度
@@ -32,7 +34,7 @@
## kafka-manager架构图
![kafka-manager-arch](./docs/assets/images/common/arch.png)
![kafka-manager-arch](https://img-ys011.didistatic.com/static/dicloudpub/do1_xgDHNDLj2ChKxctSuf72)
## 相关文档
@@ -45,13 +47,17 @@
## 钉钉交流群
![dingding_group](./docs/assets/images/common/dingding_group.jpg)
钉钉群ID32821440
## OCE认证
OCE是一个认证机制和交流平台为Logi-KafkaManager生产用户量身打造我们会为OCE企业提供更好的技术支持比如专属的技术沙龙、企业一对一的交流机会、专属的答疑群等如果贵司Logi-KafkaManager上了生产[快来加入吧](http://obsuite.didiyun.com/open/openAuth)
## 项目成员
### 内部核心人员
`iceyuhui``liuyaguang``limengmonty``zhangliangmike``nullhuangyiming``zengqiao``eilenexuzhe``huangjiaweihjw`
`iceyuhui``liuyaguang``limengmonty``zhangliangmike``nullhuangyiming``zengqiao``eilenexuzhe``huangjiaweihjw``zhaoyinrui``marzkonglingxu``joysunchao`
### 外部贡献者

View File

@@ -3,72 +3,52 @@ workspace=$(cd $(dirname $0) && pwd -P)
cd $workspace
## constant
km_version=2.1.0
app_name=kafka-manager-$km_version
OUTPUT_DIR=./output
KM_VERSION=2.1.0
APP_NAME=kafka-manager-$KM_VERSION
gitversion=.gitversion
control=./control.sh
create_mysql_table=./docs/install_guide/create_mysql_table.sql
app_config_file=./kafka-manager-web/src/main/resources/application.yml
MYSQL_TABLE_SQL_FILE=./docs/install_guide/create_mysql_table.sql
CONFIG_FILE=./kafka-manager-web/src/main/resources/application.yml
## function
function build() {
# 进行编译
# # cmd 设置使用的JDK, 按需选择, 默认已安装了JDK 8
# JVERSION=`java -version 2>&1 | awk 'NR==1{gsub(/"/,"");print $3}'`
# major=`echo $JVERSION | awk -F. '{print $1}'`
# mijor=`echo $JVERSION | awk -F. '{print $2}'`
# if [ $major -le 1 ] && [ $mijor -lt 8 ]; then
# export JAVA_HOME=/usr/local/jdk1.8.0_65 #(使用jdk8请设置)
# export PATH=$JAVA_HOME/bin:$PATH
# fi
# 编译命令
mvn -U clean package -Dmaven.test.skip=true
mvn -U clean package -Dmaven.test.skip=true
local sc=$?
if [ $sc -ne 0 ];then
## 编译失败, 退出码为 非0
echo "$app_name build error"
echo "$APP_NAME build error"
exit $sc
else
echo -n "$app_name build ok, vsn="`gitversion`
echo "$APP_NAME build ok"
fi
}
function make_output() {
# 新建output目录
rm -rf $app_name &>/dev/null
mkdir -p $app_name &>/dev/null
# 新建output目录
rm -rf ${OUTPUT_DIR} &>/dev/null
mkdir -p ${OUTPUT_DIR}/${APP_NAME} &>/dev/null
# 填充output目录, output内的内容 即为 线上部署内容
(
# cp -rf $control $output_dir && # 拷贝 control.sh 脚本 至output目录
cp -rf $create_mysql_table $app_name && # 拷贝 sql 初始化脚本 至output目录
cp -rf $app_config_file $app_name && # 拷贝 application.yml 至output目录
# 填充output目录, output内的内容
(
cp -rf ${MYSQL_TABLE_SQL_FILE} ${OUTPUT_DIR}/${APP_NAME} && # 拷贝 sql 初始化脚本 至output目录
cp -rf ${CONFIG_FILE} ${OUTPUT_DIR}/${APP_NAME} && # 拷贝 application.yml 至output目录
# 拷贝程序包到output路径
cp kafka-manager-web/target/kafka-manager-web-$km_version-SNAPSHOT.jar ${app_name}/${app_name}-SNAPSHOT.jar
echo -e "make output ok."
) || { echo -e "make output error"; exit 2; } # 填充output目录失败后, 退出码为 非0
# 拷贝程序包到output路径
cp kafka-manager-web/target/kafka-manager-web-${KM_VERSION}-SNAPSHOT.jar ${OUTPUT_DIR}/${APP_NAME}/${APP_NAME}-SNAPSHOT.jar
echo -e "make output ok."
) || { echo -e "make output error"; exit 2; } # 填充output目录失败后, 退出码为 非0
}
function make_package() {
# 压缩output目录
(
tar cvzf ${app_name}.tar.gz ${app_name}
echo -e "make package ok."
cd ${OUTPUT_DIR} && tar cvzf ${APP_NAME}.tar.gz ${APP_NAME}
echo -e "make package ok."
) || { echo -e "make package error"; exit 2; } # 压缩output目录失败后, 退出码为 非0
}
## internals
function gitversion() {
git log -1 --pretty=%h > $gitversion
local gv=`cat $gitversion`
echo "$gv"
}
##########################################
## main
## 其中,
@@ -88,4 +68,4 @@ make_package
# 编译成功
echo -e "build done"
exit 0
exit 0

Binary file not shown.

After

Width:  |  Height:  |  Size: 150 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 270 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 589 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 652 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 511 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 672 KiB

View File

@@ -0,0 +1,65 @@
---
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# 动态配置管理
## 1、Topic定时同步任务
### 1.1、配置的用途
`Logi-KafkaManager`在设计上,所有的资源都是挂在应用(app)下面。 如果接入的Kafka集群已经存在Topic了那么会导致这些Topic不属于任何的应用从而导致很多管理上的不便。
因此需要有一个方式将这些无主的Topic挂到某个应用下面。
这里提供了一个配置会定时自动将集群无主的Topic挂到某个应用下面下面。
### 1.2、相关实现
就是一个定时任务,该任务会定期做同步的工作。具体代码的位置在`com.xiaojukeji.kafka.manager.task.dispatch.op`包下面的`SyncTopic2DB`类。
### 1.3、配置说明
**步骤一:开启该功能**
在application.yml文件中增加如下配置已经有该配置的话直接把false修改为true即可
```yml
# 任务相关的开关
task:
op:
sync-topic-enabled: true # 无主的Topic定期同步到DB中
```
**步骤二:配置管理中指定挂在那个应用下面**
配置的位置:
![sync_topic_to_db](./assets/dynamic_config_manager/sync_topic_to_db.jpg)
配置键:`SYNC_TOPIC_2_DB_CONFIG_KEY`
配置值(JSON数组)
- clusterId需要进行定时同步的集群ID
- defaultAppId该集群无主的Topic将挂在哪个应用下面
- addAuthority是否需要加上权限, 默认是false。因为考虑到这个挂载只是临时的我们不希望用户使用这个App同时后续可能移交给真正的所属的应用因此默认是不加上权限。
**注意这里的集群ID或者是应用ID不存在的话会导致配置不生效。该任务对已经在DB中的Topic不会进行修改**
```json
[
{
"clusterId": 1234567,
"defaultAppId": "ANONYMOUS",
"addAuthority": false
},
{
"clusterId": 7654321,
"defaultAppId": "ANONYMOUS",
"addAuthority": false
}
]
```

View File

@@ -7,7 +7,7 @@
---
# 夜莺监控集成
# 监控系统集成——夜莺
- `Kafka-Manager`通过将 监控的数据 以及 监控的规则 都提交给夜莺,然后依赖夜莺的监控系统从而实现监控告警功能。
@@ -22,10 +22,13 @@ monitor:
n9e:
nid: 2
user-token: 123456
# 夜莺 mon监控服务 地址
mon:
base-url: http://127.0.0.1:8032
sink:
base-url: http://127.0.0.1:8006
# 夜莺 transfer上传服务 地址
sink:
base-url: http://127.0.0.1:8008
# 夜莺 rdb资源服务 地址
rdb:
base-url: http://127.0.0.1:80

View File

@@ -0,0 +1,54 @@
---
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# 监控系统集成
- 监控系统默认与 [夜莺] (https://github.com/didi/nightingale) 进行集成;
- 对接自有的监控系统需要进行简单的二次开发,即实现部分监控告警模块的相关接口即可;
- 集成会有两块内容,一个是指标数据上报的集成,还有一个是监控告警规则的集成;
## 1、指标数据上报集成
仅完成这一步的集成之后,即可将监控数据上报到监控系统中,此时已能够在自己的监控系统进行监控告警规则的配置了。
**步骤一:实现指标上报的接口**
- 按照自己内部监控系统的数据格式要求,将数据进行组装成符合自己内部监控系统要求的数据进行上报,具体的可以参考夜莺集成的实现代码。
- 至于会上报哪些指标,可以查看有哪些地方调用了该接口。
![sink_metrics](./assets/monitor_system_integrate_with_self/sink_metrics.jpg)
**步骤二:相关配置修改**
![change_config](./assets/monitor_system_integrate_with_self/change_config.jpg)
**步骤三:开启上报任务**
![open_sink_schedule](./assets/monitor_system_integrate_with_self/open_sink_schedule.jpg)
## 2、监控告警规则集成
完成**1、指标数据上报集成**之后,即可在自己的监控系统进行监控告警规则的配置了。完成该步骤的集成之后,可以在`Logi-KafkaManager`中进行监控告警规则的增删改查等等。
大体上和**1、指标数据上报集成**一致,
**步骤一:实现相关接口**
![integrate_ms](./assets/monitor_system_integrate_with_self/integrate_ms.jpg)
实现完成步骤一之后,接下来的步骤和**1、指标数据上报集成**中的步骤二、步骤三一致,都需要进行相关配置的修改即可。
## 3、总结
简单介绍了一下监控告警的集成,嫌麻烦的同学可以仅做 **1、指标数据上报集成** 这一节的内容即可满足一定场景下的需求。
**集成过程中有任何觉得文档没有说清楚的地方或者建议欢迎入群交流也欢迎贡献代码觉得好也辛苦给个star。**

View File

@@ -0,0 +1,104 @@
---
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# 配置说明
```yaml
server:
port: 8080 # 服务端口
tomcat:
accept-count: 1000
max-connections: 10000
max-threads: 800
min-spare-threads: 100
spring:
application:
name: kafkamanager
datasource:
kafka-manager: # 数据库连接配置
jdbc-url: jdbc:mysql://127.0.0.1:3306/kafka_manager?characterEncoding=UTF-8&serverTimezone=GMT%2B8 #数据库的地址
username: admin # 用户名
password: admin # 密码
driver-class-name: com.mysql.jdbc.Driver
main:
allow-bean-definition-overriding: true
profiles:
active: dev # 启用的配置
servlet:
multipart:
max-file-size: 100MB
max-request-size: 100MB
logging:
config: classpath:logback-spring.xml
custom:
idc: cn # 部署的数据中心, 忽略该配置, 后续会进行删除
jmx:
max-conn: 10 # 和单台 broker 的最大JMX连接数
store-metrics-task:
community:
broker-metrics-enabled: true # 社区部分broker metrics信息收集开关, 关闭之后metrics信息将不会进行收集及写DB
topic-metrics-enabled: true # 社区部分topic的metrics信息收集开关, 关闭之后metrics信息将不会进行收集及写DB
didi:
app-topic-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标因此默认关闭
topic-request-time-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标因此默认关闭
topic-throttled-metrics: false # 滴滴埋入的指标, 社区AK不存在该指标因此默认关闭
save-days: 7 #指标在DB中保持的天数-1表示永久保存7表示保存近7天的数据
# 任务相关的开关
task:
op:
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
account: # ldap相关的配置, 社区版本暂时支持不够完善,可以先忽略,欢迎贡献代码对这块做优化
ldap:
kcm: # 集群升级部署相关的功能需要配合夜莺及S3进行使用这块我们后续专门补充一个文档细化一下牵扯到kcm_script.sh脚本的修改
enabled: false # 默认关闭
storage:
base-url: http://127.0.0.1 # 存储地址
n9e:
base-url: http://127.0.0.1:8004 # 夜莺任务中心的地址
user-token: 12345678 # 夜莺用户的token
timeout: 300 # 集群任务的超时时间,单位秒
account: root # 集群任务使用的账号
script-file: kcm_script.sh # 集群任务的脚本
monitor: # 监控告警相关的功能,需要配合夜莺进行使用
enabled: false # 默认关闭true就是开启
n9e:
nid: 2
user-token: 1234567890
mon:
# 夜莺 mon监控服务 地址
base-url: http://127.0.0.1:8032
sink:
# 夜莺 transfer上传服务 地址
base-url: http://127.0.0.1:8006
rdb:
# 夜莺 rdb资源服务 地址
base-url: http://127.0.0.1:80
# enabled: 表示是否开启监控告警的功能, true: 开启, false: 不开启
# n9e.nid: 夜莺的节点ID
# n9e.user-token: 用户的密钥,在夜莺的个人设置中
# n9e.mon.base-url: 监控地址
# n9e.sink.base-url: 数据上报地址
# n9e.rdb.base-url: 用户资源中心地址
notify: # 通知的功能
kafka: # 默认通知发送到kafka的指定Topic中
cluster-id: 95 # Topic的集群ID
topic-name: didi-kafka-notify # Topic名称
order: # 部署的KM的地址
detail-url: http://127.0.0.1
```

View File

@@ -0,0 +1,25 @@
![kafka-manager-logo](../assets/images/common/logo_name.png))
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
## 报警策略-报警函数介绍
| 类别 | 函数 | 含义 |函数文案 |备注 |
| --- | --- | --- | --- | --- |
| 发生次数 |alln | 最近$n个周期内全发生 | 连续发生(all) | |
| 发生次数 | happen, n, m | 最近$n个周期内发生m次 | 出现(happen) | null点也计算在n内 |
| 数学统计 | sum, n | 最近$n个周期取值 的 和 | 求和(sum) | sum_over_time |
| 数学统计 | avg, n | 最近$n个周期取值 的 平均值 | 平均值(avg) | avg_over_time |
| 数学统计 | min, n | 最近$n个周期取值 的 最小值 | 最小值(min) | min_over_time |
| 数学统计 | max, n | 最近$n个周期取值 的 最大值 | 最大值(max | max_over_time |
| 变化率 | pdiff, n | 最近$n个点的变化率, 有一个满足 则触发 | 突增突降率(pdiff) | 假设, 最近3个周期的值分别为 v, v2, v3v为最新值那么计算公式为 any( (v-v2)/v2, (v-v3)/v3 )**区分正负** |
| 变化量 | diff, n | 最近$n个点的变化量, 有一个满足 则触发 | 突增突降值(diff) | 假设, 最近3个周期的值分别为 v, v2, v3v为最新值那么计算公式为 any( (v-v2), (v-v3) )**区分正负** |
| 变化量 | ndiff | 最近n个周期发生m次 v(t) - v(t-1) $OP threshold其中 v(t) 为最新值 | 连续变化(区分正负) - ndiff | |
| 数据中断 | nodata, t | 最近 $t 秒内 无数据上报 | 数据上报中断(nodata) | |
| 同环比 | c_avg_rate_abs, n | 最近$n个周期的取值相比 1天或7天前取值 的变化率 的绝对值 | 同比变化率(c_avg_rate_abs) | 假设最近的n个值为 v1, v2, v3历史取到的对应n'个值为 v1', v2'那么计算公式为abs((avg(v1,v2,v3) / avg(v1',v2') -1)* 100%) |
| 同环比 | c_avg_rate, n | 最近$n个周期的取值相比 1天或7天前取值 的变化率(**区分正负**) | 同比变化率(c_avg_rate) | 假设最近的n个值为 v1, v2, v3历史取到的对应n'个值为 v1', v2'那么计算公式为(avg(v1,v2,v3) / avg(v1',v2') -1)* 100% |

View File

@@ -29,7 +29,7 @@
主要用途是进行大集群的管理 & 集群细节的屏蔽。
- 逻辑集群通过逻辑集群概念将集群Broker按业务进行归类方便管理
- Region通过引入Region同时Topic按Region度创建减少Broker间的连接
- Region通过引入Region同时Topic按Region度创建减少Broker间的连接
---
@@ -53,13 +53,13 @@
- 3、数据库时区问题。
检查MySQL的topic表查看是否有数据如果有数据那么再检查设置的时区是否正确。
检查MySQL的topic_metrics、broker_metrics表,查看是否有数据,如果有数据,那么再检查设置的时区是否正确。
---
### 5、如何对接夜莺的监控告警功能
- 参看 [kafka-manager 对接夜莺监控](../dev_guide/Intergration_n9e_monitor.md) 说明。
- 参看 [kafka-manager 对接夜莺监控](../dev_guide/monitor_system_integrate_with_n9e.md) 说明。
---

View File

@@ -0,0 +1,72 @@
---
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# Topic 指标说明
## 1. 实时流量指标说明
| 指标名称| 单位| 指标含义|
|-- |---- |---|
| messagesIn| 条/s | 每秒发送到kafka的消息条数 |
| byteIn| B/s | 每秒发送到kafka的字节数 |
| byteOut| B/s | 每秒流出kafka的字节数所有消费组消费的流量如果是Kafka版本较低这个还包括副本同步的流量 |
| byteRejected| B/s | 每秒被拒绝的字节数 |
| failedFetchRequest| qps | 每秒拉取失败的请求数 |
| failedProduceRequest| qps | 每秒发送失败的请求数 |
| totalProduceRequest| qps | 每秒总共发送的请求数与messagesIn的区别是一个是发送请求里面可能会有多条消息 |
| totalFetchRequest| qps | 每秒总共拉取消息的请求数 |
 
## 2. 历史流量指标说明
| 指标名称| 单位| 指标含义|
|-- |---- |---|
| messagesIn| 条/s | 近一分钟每秒发送到kafka的消息条数 |
| byteIn| B/s | 近一分钟每秒发送到kafka的字节数 |
| byteOut| B/s | 近一分钟每秒流出kafka的字节数所有消费组消费的流量如果是Kafka版本较低副本同步的流量 |
| byteRejected| B/s | 近一分钟每秒被拒绝的字节数 |
| totalProduceRequest| qps | 近一分钟每秒总共发送的请求数与messagesIn的区别是一个是发送请求里面可能会有多条消息 |
 
## 3. 实时耗时指标说明
**基于滴滴加强版Kafka引擎的特性可以获取Broker的实时耗时信息和历史耗时信息**
| 指标名称| 单位 | 指标含义 | 耗时高原因 | 解决方案|
|-- |-- |-- |-- |--|
| RequestQueueTimeMs| ms | 请求队列排队时间 | 请求多,服务端处理不过来 | 联系运维人员处理 |
| LocalTimeMs| ms | Broker本地处理时间 | 服务端读写数据慢,可能是读写锁竞争 | 联系运维人员处理 |
| RemoteTimeMs| ms | 请求等待远程完成时间对于发送请求如果ack=-1该时间表示副本同步时间对于消费请求如果当前没有数据该时间为等待新数据时间如果请求的版本与topic存储的版本不同需要做版本转换也会拉高该时间 | 对于生产ack=-1必然会导致该指标耗时高对于消费如果topic数据写入很慢该指标高也正常。如果需要版本转换该指标耗时也会高 | 对于生产可以考虑修改ack=1消费端问题可以联系运维人员具体分析 |
| ThrottleTimeMs| ms | 请求限流时间 | 生产/消费被限流 | 申请提升限流值 |
| ResponseQueueTimeMs| ms | 响应队列排队时间 | 响应多,服务端处理不过来 | 联系运维人员处理 |
| ResponseSendTimeMs| ms | 响应返回客户端时间 | 1下游消费能力差导致向consumer发送数据时写网络缓冲区过慢2消费lag过大一直从磁盘读取数据 | 1:提升客户端消费性能2: 联系运维人员确认是否读取磁盘问题 |
| TotalTimeMs| ms | 接收到请求到完成总时间,理论上该时间等于上述六项时间之和,但由于各时间都是单独统计,总时间只是约等于上述六部分时间之和 | 上面六项有些耗时高 | 具体针对高的指标解决 |
**备注由于kafka消费端实现方式消费端一次会发送多个Fetch请求在接收到一个Response之后就会开始处理数据使Broker端返回其他Response等待因此ResponseSendTimeMs并不完全是服务端发送时间有时会包含一部分消费端处理数据时间**
## 4. 历史耗时指标说明
**基于滴滴加强版Kafka引擎的特性可以获取Broker的实时耗时信息和历史耗时信息**
| 指标名称| 单位| 指标含义|
|-- | ---- |---|
| produceRequestTime99thPercentile|ms|Topic近一分钟发送99分位耗时|
| fetchRequestTime99thPercentile|ms|Topic近一分钟拉取99分位耗时|
| produceRequestTime95thPercentile|ms|Topic近一分钟发送95分位耗时|
| fetchRequestTime95thPercentile|ms|Topic近一分钟拉取95分位耗时|
| produceRequestTime75thPercentile|ms|Topic近一分钟发送75分位耗时|
| fetchRequestTime75thPercentile|ms|Topic近一分钟拉取75分位耗时|
| produceRequestTime50thPercentile|ms|Topic近一分钟发送50分位耗时|
| fetchRequestTime50thPercentile|ms|Topic近一分钟拉取50分位耗时|

View File

@@ -622,6 +622,9 @@ Lag表示该消费客户端是否有堆积等于 partition offset-consume
<font size=2>步骤3</font>填写完成后,点击提交即可提交申请。
备注说明集群创建后还需在此基础上创建region、逻辑集群。具体操作可参照 [集群接入手册](https://github.com/didi/Logi-KafkaManager/blob/master/docs/user_guide/add_cluster/add_cluster.md)
![applycluster](./assets/applycluster.png)
#### 申请集群下线 ####

View File

@@ -6,8 +6,6 @@ package com.xiaojukeji.kafka.manager.common.bizenum;
*/
public enum IDCEnum {
CN("cn", "国内"),
US("us", "美东"),
RU("ru", "俄罗斯"),
;
private String idc;

View File

@@ -21,6 +21,8 @@ public enum ModuleEnum {
PARTITION(5, "分区"),
GATEWAY_CONFIG(6, "Gateway配置"),
UNKNOWN(-1, "未知")
;
ModuleEnum(int code, String message) {

View File

@@ -10,6 +10,7 @@ public enum RebalanceDimensionEnum {
REGION(1, "Region维度"),
BROKER(2, "Broker维度"),
TOPIC(3, "Topic维度"),
PARTITION(4, "Partition维度"),
;
private Integer code;

View File

@@ -45,4 +45,13 @@ public enum GatewayConfigKeyEnum {
", configName='" + configName + '\'' +
'}';
}
public static GatewayConfigKeyEnum getByConfigType(String configType) {
for (GatewayConfigKeyEnum configKeyEnum: GatewayConfigKeyEnum.values()) {
if (configKeyEnum.getConfigType().equals(configType)) {
return configKeyEnum;
}
}
return null;
}
}

View File

@@ -7,6 +7,8 @@ package com.xiaojukeji.kafka.manager.common.constant;
public class KafkaConstant {
public static final String COORDINATOR_TOPIC_NAME = "__consumer_offsets";
public static final String TRANSACTION_TOPIC_NAME = "__transaction_state";
public static final String BROKER_HOST_NAME_SUFFIX = ".diditaxi.com";
public static final String CLIENT_VERSION_CODE_UNKNOWN = "-1";

View File

@@ -12,11 +12,6 @@ public class TopicCreationConstant {
*/
public static final String LOG_X_CREATE_TOPIC_CONFIG_KEY_NAME = "LOG_X_CREATE_TOPIC_CONFIG";
/**
* 治理平台创建Topic配置KEY
*/
public static final String CHORUS_CREATE_TOPIC_CONFIG_KEY_NAME = "CHORUS_CREATE_TOPIC_CONFIG";
/**
* 内部创建Topic配置KEY
*/
@@ -30,6 +25,8 @@ public class TopicCreationConstant {
public static final String TOPIC_RETENTION_TIME_KEY_NAME = "retention.ms";
public static final Long DEFAULT_QUOTA = 3 * 1024 * 1024L;
public static Properties createNewProperties(Long retentionTime) {
Properties properties = new Properties();
properties.put(TOPIC_RETENTION_TIME_KEY_NAME, String.valueOf(retentionTime));

View File

@@ -3,7 +3,6 @@ package com.xiaojukeji.kafka.manager.common.entity;
import kafka.admin.AdminClient;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
/**
* @author zengqiao
@@ -16,17 +15,12 @@ public class ConsumerMetadata {
private Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap = new HashMap<>();
private Map<String, List<String>> consumerGroupAppMap = new ConcurrentHashMap<>();
public ConsumerMetadata(Set<String> consumerGroupSet,
Map<String, Set<String>> topicNameConsumerGroupMap,
Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap,
Map<String, List<String>> consumerGroupAppMap) {
Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap) {
this.consumerGroupSet = consumerGroupSet;
this.topicNameConsumerGroupMap = topicNameConsumerGroupMap;
this.consumerGroupSummaryMap = consumerGroupSummaryMap;
this.consumerGroupAppMap = consumerGroupAppMap;
}
public Set<String> getConsumerGroupSet() {
@@ -40,8 +34,4 @@ public class ConsumerMetadata {
public Map<String, AdminClient.ConsumerGroupSummary> getConsumerGroupSummaryMap() {
return consumerGroupSummaryMap;
}
public Map<String, List<String>> getConsumerGroupAppMap() {
return consumerGroupAppMap;
}
}

View File

@@ -1,6 +1,7 @@
package com.xiaojukeji.kafka.manager.common.entity;
import com.alibaba.fastjson.JSON;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import java.io.Serializable;
@@ -118,4 +119,9 @@ public class Result<T> implements Serializable {
result.setData(data);
return result;
}
public boolean failed() {
return !Constant.SUCCESS.equals(code);
}
}

View File

@@ -0,0 +1,53 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.cluster;
public class ControllerPreferredCandidate {
private Integer brokerId;
private String host;
private Long startTime;
private Integer status;
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Long getStartTime() {
return startTime;
}
public void setStartTime(Long startTime) {
this.startTime = startTime;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
@Override
public String toString() {
return "ControllerPreferredBroker{" +
"brokerId=" + brokerId +
", host='" + host + '\'' +
", startTime=" + startTime +
", status=" + status +
'}';
}
}

View File

@@ -2,30 +2,18 @@ package com.xiaojukeji.kafka.manager.common.entity.ao.consumer;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetLocationEnum;
import java.util.List;
import java.util.Objects;
/**
* 消费组信息
* @author zengqiao
* @date 19/4/18
*/
public class ConsumerGroupDTO {
public class ConsumerGroup {
private Long clusterId;
private String consumerGroup;
private List<String> appIdList;
private OffsetLocationEnum offsetStoreLocation;
public ConsumerGroupDTO(Long clusterId,
String consumerGroup,
List<String> appIdList,
OffsetLocationEnum offsetStoreLocation) {
public ConsumerGroup(Long clusterId, String consumerGroup, OffsetLocationEnum offsetStoreLocation) {
this.clusterId = clusterId;
this.consumerGroup = consumerGroup;
this.appIdList = appIdList;
this.offsetStoreLocation = offsetStoreLocation;
}
@@ -45,14 +33,6 @@ public class ConsumerGroupDTO {
this.consumerGroup = consumerGroup;
}
public List<String> getAppIdList() {
return appIdList;
}
public void setAppIdList(List<String> appIdList) {
this.appIdList = appIdList;
}
public OffsetLocationEnum getOffsetStoreLocation() {
return offsetStoreLocation;
}
@@ -63,10 +43,9 @@ public class ConsumerGroupDTO {
@Override
public String toString() {
return "ConsumerGroupDTO{" +
return "ConsumerGroup{" +
"clusterId=" + clusterId +
", consumerGroup='" + consumerGroup + '\'' +
", appIdList=" + appIdList +
", offsetStoreLocation=" + offsetStoreLocation +
'}';
}
@@ -79,7 +58,7 @@ public class ConsumerGroupDTO {
if (o == null || getClass() != o.getClass()) {
return false;
}
ConsumerGroupDTO that = (ConsumerGroupDTO) o;
ConsumerGroup that = (ConsumerGroup) o;
return clusterId.equals(that.clusterId)
&& consumerGroup.equals(that.consumerGroup)
&& offsetStoreLocation == that.offsetStoreLocation;

View File

@@ -0,0 +1,68 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.consumer;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetLocationEnum;
import java.util.List;
public class ConsumerGroupSummary {
private Long clusterId;
private String consumerGroup;
private OffsetLocationEnum offsetStoreLocation;
private List<String> appIdList;
private String state;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
public OffsetLocationEnum getOffsetStoreLocation() {
return offsetStoreLocation;
}
public void setOffsetStoreLocation(OffsetLocationEnum offsetStoreLocation) {
this.offsetStoreLocation = offsetStoreLocation;
}
public List<String> getAppIdList() {
return appIdList;
}
public void setAppIdList(List<String> appIdList) {
this.appIdList = appIdList;
}
public String getState() {
return state;
}
public void setState(String state) {
this.state = state;
}
@Override
public String toString() {
return "ConsumerGroupSummary{" +
"clusterId=" + clusterId +
", consumerGroup='" + consumerGroup + '\'' +
", offsetStoreLocation=" + offsetStoreLocation +
", appIdList=" + appIdList +
", state='" + state + '\'' +
'}';
}
}

View File

@@ -25,7 +25,10 @@ public class RebalanceDTO {
@ApiModelProperty(value = "TopicName")
private String topicName;
@ApiModelProperty(value = "维度[0: Cluster维度, 1: Region维度, 2:Broker维度, 3:Topic维度]")
@ApiModelProperty(value = "分区ID")
private Integer partitionId;
@ApiModelProperty(value = "维度[0: Cluster维度, 1: Region维度, 2:Broker维度, 3:Topic维度, 4:Partition纬度]")
private Integer dimension;
public Long getClusterId() {
@@ -60,6 +63,14 @@ public class RebalanceDTO {
this.topicName = topicName;
}
public Integer getPartitionId() {
return partitionId;
}
public void setPartitionId(Integer partitionId) {
this.partitionId = partitionId;
}
public Integer getDimension() {
return dimension;
}
@@ -68,22 +79,12 @@ public class RebalanceDTO {
this.dimension = dimension;
}
@Override
public String toString() {
return "RebalanceDTO{" +
"clusterId=" + clusterId +
", regionId=" + regionId +
", brokerId=" + brokerId +
", topicName='" + topicName + '\'' +
", dimension=" + dimension +
'}';
}
public boolean paramLegal() {
if (ValidateUtils.isNull(clusterId)
|| RebalanceDimensionEnum.REGION.getCode().equals(dimension) && ValidateUtils.isNull(regionId)
|| RebalanceDimensionEnum.BROKER.getCode().equals(dimension) && ValidateUtils.isNull(brokerId)
|| RebalanceDimensionEnum.TOPIC.getCode().equals(dimension) && ValidateUtils.isNull(topicName) ) {
|| (RebalanceDimensionEnum.REGION.getCode().equals(dimension) && ValidateUtils.isNull(regionId))
|| (RebalanceDimensionEnum.BROKER.getCode().equals(dimension) && ValidateUtils.isNull(brokerId))
|| (RebalanceDimensionEnum.TOPIC.getCode().equals(dimension) && ValidateUtils.isNull(topicName))
|| (RebalanceDimensionEnum.PARTITION.getCode().equals(dimension) && (ValidateUtils.isNull(topicName) || ValidateUtils.isNull(partitionId))) ) {
return false;
}
return true;

View File

@@ -0,0 +1,67 @@
package com.xiaojukeji.kafka.manager.common.entity.vo.normal.consumer;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.util.List;
/**
* @author zengqiao
* @date 21/01/14
*/
@ApiModel(value = "Topic消费组概要信息")
public class ConsumerGroupSummaryVO {
@ApiModelProperty(value = "消费组名称")
private String consumerGroup;
@ApiModelProperty(value = "使用的AppID")
private String appIds;
@ApiModelProperty(value = "offset存储位置")
private String location;
@ApiModelProperty(value = "消费组状态")
private String state;
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
public String getAppIds() {
return appIds;
}
public void setAppIds(String appIds) {
this.appIds = appIds;
}
public String getLocation() {
return location;
}
public void setLocation(String location) {
this.location = location;
}
public String getState() {
return state;
}
public void setState(String state) {
this.state = state;
}
@Override
public String toString() {
return "ConsumerGroupSummaryVO{" +
"consumerGroup='" + consumerGroup + '\'' +
", appIds=" + appIds +
", location='" + location + '\'' +
", state='" + state + '\'' +
'}';
}
}

View File

@@ -0,0 +1,103 @@
package com.xiaojukeji.kafka.manager.common.entity.vo.rd;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.util.Date;
/**
* @author zengqiao
* @date 20/3/19
*/
@ApiModel(value = "GatewayConfigVO", description = "Gateway配置信息")
public class GatewayConfigVO {
@ApiModelProperty(value="ID")
private Long id;
@ApiModelProperty(value="配置类型")
private String type;
@ApiModelProperty(value="配置名称")
private String name;
@ApiModelProperty(value="配置值")
private String value;
@ApiModelProperty(value="版本")
private Long version;
@ApiModelProperty(value="创建时间")
private Date createTime;
@ApiModelProperty(value="修改时间")
private Date modifyTime;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
public Long getVersion() {
return version;
}
public void setVersion(Long version) {
this.version = version;
}
public Date getCreateTime() {
return createTime;
}
public void setCreateTime(Date createTime) {
this.createTime = createTime;
}
public Date getModifyTime() {
return modifyTime;
}
public void setModifyTime(Date modifyTime) {
this.modifyTime = modifyTime;
}
@Override
public String toString() {
return "GatewayConfigVO{" +
"id=" + id +
", type='" + type + '\'' +
", name='" + name + '\'' +
", value='" + value + '\'' +
", version=" + version +
", createTime=" + createTime +
", modifyTime=" + modifyTime +
'}';
}
}

View File

@@ -0,0 +1,61 @@
package com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
@ApiModel(description = "Broker基本信息")
public class ControllerPreferredCandidateVO {
@ApiModelProperty(value = "brokerId")
private Integer brokerId;
@ApiModelProperty(value = "主机名")
private String host;
@ApiModelProperty(value = "启动时间")
private Long startTime;
@ApiModelProperty(value = "broker状态[0:在线, -1:不在线]")
private Integer status;
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Long getStartTime() {
return startTime;
}
public void setStartTime(Long startTime) {
this.startTime = startTime;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
@Override
public String toString() {
return "ControllerPreferredBrokerVO{" +
"brokerId=" + brokerId +
", host='" + host + '\'' +
", startTime=" + startTime +
", status=" + status +
'}';
}
}

View File

@@ -9,6 +9,7 @@ import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.TopicConnectionDO
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
/**
@@ -52,7 +53,7 @@ public class JsonUtils {
return JSON.toJSONString(obj);
}
public static List<TopicConnectionDO> parseTopicConnections(Long clusterId, JSONObject jsonObject) {
public static List<TopicConnectionDO> parseTopicConnections(Long clusterId, JSONObject jsonObject, long postTime) {
List<TopicConnectionDO> connectionDOList = new ArrayList<>();
for (String clientType: jsonObject.keySet()) {
JSONObject topicObject = jsonObject.getJSONObject(clientType);
@@ -73,6 +74,7 @@ public class JsonUtils {
connectionDO.setClusterId(clusterId);
connectionDO.setTopicName(topicName);
connectionDO.setType(clientType);
connectionDO.setCreateTime(new Date(postTime));
connectionDOList.add(connectionDO);
}
}

View File

@@ -1,7 +1,5 @@
package com.xiaojukeji.kafka.manager.common.utils;
import com.xiaojukeji.kafka.manager.common.bizenum.IDCEnum;
import com.xiaojukeji.kafka.manager.common.constant.TopicCreationConstant;
import org.apache.commons.lang.StringUtils;
import java.util.List;
@@ -83,23 +81,4 @@ public class ValidateUtils {
public static boolean isNullOrLessThanZero(Double value) {
return value == null || value < 0;
}
public static boolean topicNameLegal(String idc, String topicName) {
if (ValidateUtils.isNull(idc) || ValidateUtils.isNull(topicName)) {
return false;
}
// 校验Topic的长度
if (topicName.length() >= TopicCreationConstant.TOPIC_NAME_MAX_LENGTH) {
return false;
}
// 校验前缀
if (IDCEnum.CN.getIdc().equals(idc) ||
(IDCEnum.US.getIdc().equals(idc) && topicName.startsWith(TopicCreationConstant.TOPIC_NAME_PREFIX_US)) ||
(IDCEnum.RU.getIdc().equals(idc) && topicName.startsWith(TopicCreationConstant.TOPIC_NAME_PREFIX_RU))) {
return true;
}
return false;
}
}

View File

@@ -18,6 +18,8 @@ public class ZkPathUtil {
public static final String CONSUMER_ROOT_NODE = ZOOKEEPER_SEPARATOR + "consumers";
public static final String REASSIGN_PARTITIONS_ROOT_NODE = "/admin/reassign_partitions";
/**
* config
*/
@@ -27,11 +29,11 @@ public class ZkPathUtil {
public static final String CONFIG_CLIENTS_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "clients";
public static final String CONFIG_ENTITY_CHANGES_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "changes/config_change_";
public static final String CONFIG_ENTITY_CHANGES_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "changes/config_change_";
public static final String REASSIGN_PARTITIONS_ROOT_NODE = "/admin/reassign_partitions";
private static final String D_METRICS_CONFIG_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "KafkaExMetrics";
private static final String D_METRICS_CONFIG_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "KafkaExMetrics";
public static final String D_CONTROLLER_CANDIDATES = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "extension/candidates";
public static String getBrokerIdNodePath(Integer brokerId) {
return BROKER_IDS_ROOT + ZOOKEEPER_SEPARATOR + String.valueOf(brokerId);

View File

@@ -92,20 +92,4 @@ public class ConsumerMetadataCache {
}
return consumerMetadata.getTopicNameConsumerGroupMap().getOrDefault(topicName, new HashSet<>());
}
public static Map<String, List<String>> getConsumerGroupAppIdListInZk(Long clusterId) {
ConsumerMetadata consumerMetadata = CG_METADATA_IN_ZK_MAP.get(clusterId);
if(consumerMetadata == null){
return new HashMap<>(0);
}
return consumerMetadata.getConsumerGroupAppMap();
}
public static Map<String, List<String>> getConsumerGroupAppIdListInBK(Long clusterId) {
ConsumerMetadata consumerMetadata = CG_METADATA_IN_BK_MAP.get(clusterId);
if(consumerMetadata == null){
return new HashMap<>(0);
}
return consumerMetadata.getConsumerGroupAppMap();
}
}

View File

@@ -1,5 +1,6 @@
package com.xiaojukeji.kafka.manager.service.cache;
import com.google.common.collect.Sets;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.entity.pojo.LogicalClusterDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.RegionDO;
@@ -15,6 +16,7 @@ import org.springframework.stereotype.Service;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
/**
* 逻辑集群元信息
@@ -144,9 +146,16 @@ public class LogicalClusterMetadataManager {
@Scheduled(cron="0/30 * * * * ?")
public void flush() {
List<LogicalClusterDO> logicalClusterDOList = logicalClusterService.listAll();
if (ValidateUtils.isEmptyList(logicalClusterDOList)) {
return;
if (ValidateUtils.isNull(logicalClusterDOList)) {
logicalClusterDOList = Collections.EMPTY_LIST;
}
Set<Long> inDbLogicalClusterIds = logicalClusterDOList.stream()
.map(LogicalClusterDO::getId)
.collect(Collectors.toSet());
// inCache 和 inDb 取差集,差集结果为已删除的、新增的.
Sets.SetView<Long> diffLogicalClusterIds = Sets.difference(LOGICAL_CLUSTER_MAP.keySet(), inDbLogicalClusterIds);
diffLogicalClusterIds.forEach(logicalClusterId -> delLogicalClusterInCache(logicalClusterId));
Map<Long, RegionDO> regionMap = new HashMap<>();
List<RegionDO> regionDOList = regionService.listAll();
@@ -197,4 +206,11 @@ public class LogicalClusterMetadataManager {
}
TOPIC_LOGICAL_MAP.put(logicalClusterDO.getClusterId(), subMap);
}
private void delLogicalClusterInCache(Long logicalClusterId) {
LOGICAL_CLUSTER_ID_TOPIC_NAME_MAP.remove(logicalClusterId);
LOGICAL_CLUSTER_ID_BROKER_ID_MAP.remove(logicalClusterId);
LOGICAL_CLUSTER_MAP.remove(logicalClusterId);
TOPIC_LOGICAL_MAP.remove(logicalClusterId);
}
}

View File

@@ -13,6 +13,8 @@ import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkConfigImpl;
import com.xiaojukeji.kafka.manager.dao.ControllerDao;
import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConnectorWrap;
import com.xiaojukeji.kafka.manager.dao.TopicDao;
import com.xiaojukeji.kafka.manager.dao.gateway.AuthorityDao;
import com.xiaojukeji.kafka.manager.service.service.JmxService;
import com.xiaojukeji.kafka.manager.service.utils.ConfigUtils;
import com.xiaojukeji.kafka.manager.service.zookeeper.*;
@@ -48,6 +50,12 @@ public class PhysicalClusterMetadataManager {
@Autowired
private ConfigUtils configUtils;
@Autowired
private TopicDao topicDao;
@Autowired
private AuthorityDao authorityDao;
private final static Map<Long, ClusterDO> CLUSTER_MAP = new ConcurrentHashMap<>();
private final static Map<Long, ControllerData> CONTROLLER_DATA_MAP = new ConcurrentHashMap<>();
@@ -116,7 +124,7 @@ public class PhysicalClusterMetadataManager {
zkConfig.watchChildren(ZkPathUtil.BROKER_IDS_ROOT, brokerListener);
//增加Topic监控
TopicStateListener topicListener = new TopicStateListener(clusterDO.getId(), zkConfig);
TopicStateListener topicListener = new TopicStateListener(clusterDO.getId(), zkConfig, topicDao, authorityDao);
topicListener.init();
zkConfig.watchChildren(ZkPathUtil.BROKER_TOPICS_ROOT, topicListener);

View File

@@ -9,6 +9,19 @@ import java.util.List;
import java.util.Properties;
public interface AdminService {
/**
* 创建Topic
* @param clusterDO 集群DO
* @param topicDO TopicDO
* @param partitionNum 分区数
* @param replicaNum 副本数
* @param regionId RegionID
* @param brokerIdList BrokerId
* @param properties Topic属性
* @param applicant 申请人
* @param operator 操作人
* @return 操作状态
*/
ResultStatus createTopic(ClusterDO clusterDO,
TopicDO topicDO,
Integer partitionNum,
@@ -19,19 +32,86 @@ public interface AdminService {
String applicant,
String operator);
/**
* 删除Topic
* @param clusterDO 集群DO
* @param topicName Topic名称
* @param operator 操作人
* @return 操作状态
*/
ResultStatus deleteTopic(ClusterDO clusterDO,
String topicName,
String operator);
/**
* 优先副本选举状态
* @param clusterDO 集群DO
* @return 任务状态
*/
TaskStatusEnum preferredReplicaElectionStatus(ClusterDO clusterDO);
/**
* 集群纬度优先副本选举
* @param clusterDO 集群DO
* @return 任务状态
*/
ResultStatus preferredReplicaElection(ClusterDO clusterDO, String operator);
/**
* Broker纬度优先副本选举
* @param clusterDO 集群DO
* @param brokerId BrokerID
* @param operator 操作人
* @return 任务状态
*/
ResultStatus preferredReplicaElection(ClusterDO clusterDO, Integer brokerId, String operator);
/**
* Topic纬度优先副本选举
* @param clusterDO 集群DO
* @param topicName Topic名称
* @param operator 操作人
* @return 任务状态
*/
ResultStatus preferredReplicaElection(ClusterDO clusterDO, String topicName, String operator);
/**
* 分区纬度优先副本选举
* @param clusterDO 集群DO
* @param topicName Topic名称
* @param partitionId 分区ID
* @param operator 操作人
* @return 任务状态
*/
ResultStatus preferredReplicaElection(ClusterDO clusterDO, String topicName, Integer partitionId, String operator);
/**
* Topic扩分区
* @param clusterDO 集群DO
* @param topicName Topic名称
* @param partitionNum 新增? 分区数
* @param regionId RegionID
* @param brokerIdList 集群ID
* @param operator 操作人
* @return 任务状态
*/
ResultStatus expandPartitions(ClusterDO clusterDO, String topicName, Integer partitionNum, Long regionId, List<Integer> brokerIdList, String operator);
/**
* 获取Topic配置
* @param clusterDO 集群DO
* @param topicName Topic名称
* @return 任务状态
*/
Properties getTopicConfig(ClusterDO clusterDO, String topicName);
/**
* 修改Topic配置
* @param clusterDO 集群DO
* @param topicName Topic名称
* @param properties 新的属性
* @param operator 操作人
* @return 任务状态
*/
ResultStatus modifyTopicConfig(ClusterDO clusterDO, String topicName, Properties properties, String operator);
}

View File

@@ -1,7 +1,9 @@
package com.xiaojukeji.kafka.manager.service.service;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.ClusterDetailDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.cluster.ControllerPreferredCandidate;
import com.xiaojukeji.kafka.manager.common.entity.vo.normal.cluster.ClusterNameDTO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterMetricsDO;
@@ -43,5 +45,10 @@ public interface ClusterService {
ResultStatus deleteById(Long clusterId);
ClusterDO selectSuitableCluster(Long clusterId, String dataCenter);
/**
* 获取优先被选举为controller的broker
* @param clusterId 集群ID
* @return void
*/
Result<List<ControllerPreferredCandidate>> getControllerPreferredCandidates(Long clusterId);
}

View File

@@ -2,14 +2,14 @@ package com.xiaojukeji.kafka.manager.service.service;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetLocationEnum;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroup;
import com.xiaojukeji.kafka.manager.common.entity.ao.PartitionOffsetDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumeDetailDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupSummary;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* consumer相关的服务接口
@@ -20,33 +20,36 @@ public interface ConsumerService {
/**
* 获取消费组列表
*/
List<ConsumerGroupDTO> getConsumerGroupList(Long clusterId);
List<ConsumerGroup> getConsumerGroupList(Long clusterId);
/**
* 查询消费Topic的消费组
*/
List<ConsumerGroupDTO> getConsumerGroupList(Long clusterId, String topicName);
List<ConsumerGroup> getConsumerGroupList(Long clusterId, String topicName);
/**
* 获取消费Topic的消费组概要信息
*/
List<ConsumerGroupSummary> getConsumerGroupSummaries(Long clusterId, String topicName);
/**
* 查询消费详情
*/
List<ConsumeDetailDTO> getConsumeDetail(ClusterDO clusterDO, String topicName, ConsumerGroupDTO consumerGroupDTO);
List<ConsumeDetailDTO> getConsumeDetail(ClusterDO clusterDO, String topicName, ConsumerGroup consumerGroup);
/**
* 获取消费组消费的Topic列表
*/
List<String> getConsumerGroupConsumedTopicList(Long clusterId, String consumerGroup, String location);
Map<Integer, Long> getConsumerOffset(ClusterDO clusterDO,
String topicName,
ConsumerGroupDTO consumerGroupDTO);
Map<Integer, Long> getConsumerOffset(ClusterDO clusterDO, String topicName, ConsumerGroup consumerGroup);
/**
* 重置offset
*/
List<Result> resetConsumerOffset(ClusterDO clusterDO,
String topicName,
ConsumerGroupDTO consumerGroupDTO,
ConsumerGroup consumerGroup,
List<PartitionOffsetDTO> partitionOffsetDTOList);
Map<Long, Integer> getConsumerGroupNumMap(List<ClusterDO> clusterDOList);

View File

@@ -66,6 +66,19 @@ public interface TopicManagerService {
*/
ResultStatus modifyTopic(Long clusterId, String topicName, String description, String operator);
/**
* 修改Topic
* @param clusterId 集群ID
* @param topicName Topic名称
* @param appId 所属应用
* @param description 备注
* @param operator 操作人
* @author zengqiao
* @date 20/5/12
* @return ResultStatus
*/
ResultStatus modifyTopicByOp(Long clusterId, String topicName, String appId, String description, String operator);
/**
* 通过topictopic名称删除
* @param clusterId 集群id

View File

@@ -3,11 +3,27 @@ package com.xiaojukeji.kafka.manager.service.service;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.didi.TopicJmxSwitch;
import java.util.List;
/**
* ZK相关的接口
* @author tukun
* @date 2015/11/11.
*/
public interface ZookeeperService {
/**
* 开启JMX
* @param clusterId 集群ID
* @param topicName Topic名称
* @param jmxSwitch JMX开关
* @return 操作结果
*/
Result openTopicJmx(Long clusterId, String topicName, TopicJmxSwitch jmxSwitch);
/**
* 获取优先被选举为controller的broker
* @param clusterId 集群ID
* @return 操作结果
*/
Result<List<Integer>> getControllerPreferredCandidates(Long clusterId);
}

View File

@@ -60,4 +60,6 @@ public interface AuthorityService {
int addAuthorityAndQuota(AuthorityDO authorityDO, TopicQuota quota);
Map<String, Map<Long, Map<String, AuthorityDO>>> getAllAuthority();
int deleteAuthorityByTopic(Long clusterId, String topicName);
}

View File

@@ -1,18 +1,86 @@
package com.xiaojukeji.kafka.manager.service.service.gateway;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ao.gateway.*;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.GatewayConfigDO;
import java.util.List;
public interface GatewayConfigService {
/**
* 获取集群服务地址
* @param requestVersion 请求的版本
* @return
*/
KafkaBootstrapServerConfig getKafkaBootstrapServersConfig(Long requestVersion);
/**
* 获取服务发现的请求队列的配置
* @param requestVersion 请求的版本
* @return
*/
RequestQueueConfig getRequestQueueConfig(Long requestVersion);
/**
* 获取服务发现的App请求速度的配置
* @param requestVersion 请求的版本
* @return
*/
AppRateConfig getAppRateConfig(Long requestVersion);
/**
* 获取服务发现的IP请求速度的配置
* @param requestVersion 请求的版本
* @return
*/
IpRateConfig getIpRateConfig(Long requestVersion);
/**
* 获取服务发现的具体IP或者应用纬度的限速配置
* @param requestVersion 请求的版本
* @return
*/
SpRateConfig getSpRateConfig(Long requestVersion);
/**
* 获取配置
* @param configType 配置类型
* @param configName 配置名称
* @return
*/
GatewayConfigDO getByTypeAndName(String configType, String configName);
/**
* 获取配置
* @return
*/
List<GatewayConfigDO> list();
/**
* 新建配置
* @param gatewayConfigDO 配置信息
* @return
*/
Result insert(GatewayConfigDO gatewayConfigDO);
/**
* 删除配置
* @param id 配置ID
* @return
*/
Result deleteById(Long id);
/**
* 更新配置
* @param gatewayConfigDO 配置信息
* @return
*/
Result updateById(GatewayConfigDO gatewayConfigDO);
/**
* 获取配置
* @param id 配置ID
* @return
*/
GatewayConfigDO getById(Long id);
}

View File

@@ -196,8 +196,7 @@ public class AppServiceImpl implements AppService {
}
@Override
public List<AppTopicDTO> getAppTopicDTOList(String appId,
Boolean mine) {
public List<AppTopicDTO> getAppTopicDTOList(String appId, Boolean mine) {
// 查询AppID
AppDO appDO = appDao.getByAppId(appId);
if (ValidateUtils.isNull(appDO)) {
@@ -223,13 +222,17 @@ public class AppServiceImpl implements AppService {
TopicDO topicDO = topicMap
.getOrDefault(authorityDO.getClusterId(), new HashMap<>())
.get(authorityDO.getTopicName());
if (ValidateUtils.isNull(topicDO)) {
continue;
}
if (Boolean.TRUE.equals(mine)
&& (ValidateUtils.isNull(topicDO) || !topicDO.getAppId().equals(appId))) {
&& !topicDO.getAppId().equals(appId)) {
continue;
}
if (Boolean.FALSE.equals(mine)
&& !ValidateUtils.isNull(topicDO)
&& topicDO.getAppId().equals(appId)) {
continue;
}

View File

@@ -192,4 +192,10 @@ public class AuthorityServiceImpl implements AuthorityService {
public Map<String, Map<Long, Map<String, AuthorityDO>>> getAllAuthority() {
return authorityDao.getAllAuthority();
}
@Override
public int deleteAuthorityByTopic(Long clusterId, String topicName) {
return authorityDao.deleteAuthorityByTopic(clusterId, topicName);
}
}

View File

@@ -2,6 +2,8 @@ package com.xiaojukeji.kafka.manager.service.service.gateway.impl;
import com.alibaba.fastjson.JSON;
import com.xiaojukeji.kafka.manager.common.bizenum.gateway.GatewayConfigKeyEnum;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.gateway.*;
import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
@@ -13,6 +15,7 @@ import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -21,7 +24,7 @@ import java.util.Map;
* @author zengqiao
* @date 20/7/28
*/
@Service("gatewayConfigService")
@Service
public class GatewayConfigServiceImpl implements GatewayConfigService {
private final Logger LOGGER = LoggerFactory.getLogger(GatewayConfigServiceImpl.class);
@@ -52,7 +55,8 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
? new KafkaBootstrapServerConfig(maxVersion, clusterIdBootstrapServersMap)
: new KafkaBootstrapServerConfig(requestVersion, new HashMap<>(0));
} catch (Exception e) {
LOGGER.error("get kafka bootstrap servers config failed, data:{}.", JSON.toJSONString(doList), e);
LOGGER.error("class=GatewayConfigServiceImpl||method=getKafkaBootstrapServersConfig||data={}||errMsg={}||msg=get kafka bootstrap servers config failed",
JSON.toJSONString(doList), e.getMessage());
}
return null;
}
@@ -71,7 +75,8 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
return new RequestQueueConfig(configDO.getVersion(), Long.valueOf(configDO.getValue()));
} catch (Exception e) {
LOGGER.error("get request queue config failed, data:{}.", JSON.toJSONString(configDO), e);
LOGGER.error("class=GatewayConfigServiceImpl||method=getRequestQueueConfig||data={}||errMsg={}||msg=get request queue config failed",
JSON.toJSONString(configDO), e.getMessage());
}
return null;
}
@@ -90,7 +95,8 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
return new AppRateConfig(configDO.getVersion(), Long.valueOf(configDO.getValue()));
} catch (Exception e) {
LOGGER.error("get app rate config failed, data:{}.", JSON.toJSONString(configDO), e);
LOGGER.error("class=GatewayConfigServiceImpl||method=getAppRateConfig||data={}||errMsg={}||msg=get app rate config failed",
JSON.toJSONString(configDO), e.getMessage());
}
return null;
}
@@ -153,4 +159,94 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
}
return null;
}
@Override
public List<GatewayConfigDO> list() {
try {
return gatewayConfigDao.list();
} catch (Exception e) {
LOGGER.debug("class=GatewayConfigServiceImpl||method=list||errMsg={}||msg=list failed", e.getMessage());
}
return new ArrayList<>();
}
@Override
public Result insert(GatewayConfigDO gatewayConfigDO) {
try {
GatewayConfigKeyEnum configKeyEnum = GatewayConfigKeyEnum.getByConfigType(gatewayConfigDO.getType());
if (ValidateUtils.isNull(configKeyEnum)
&& ValidateUtils.isBlank(gatewayConfigDO.getName())
&& ValidateUtils.isBlank(gatewayConfigDO.getValue())) {
// 参数错误
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
// 获取当前同类配置, 插入之后需要增大这个version
List<GatewayConfigDO> gatewayConfigDOList = gatewayConfigDao.getByConfigType(gatewayConfigDO.getType());
Long version = 1L;
for (GatewayConfigDO elem: gatewayConfigDOList) {
if (elem.getVersion() > version) {
version = elem.getVersion() + 1L;
}
}
gatewayConfigDO.setVersion(version);
if (gatewayConfigDao.insert(gatewayConfigDO) > 0) {
return Result.buildSuc();
}
return Result.buildFrom(ResultStatus.MYSQL_ERROR);
} catch (Exception e) {
LOGGER.debug("class=GatewayConfigServiceImpl||method=insert||data={}||errMsg={}||msg=insert failed", gatewayConfigDO, e.getMessage());
}
return Result.buildFrom(ResultStatus.MYSQL_ERROR);
}
@Override
public Result deleteById(Long id) {
try {
if (gatewayConfigDao.deleteById(id) > 0) {
return Result.buildSuc();
}
return Result.buildFrom(ResultStatus.RESOURCE_NOT_EXIST);
} catch (Exception e) {
LOGGER.debug("class=GatewayConfigServiceImpl||method=deleteById||id={}||errMsg={}||msg=delete failed", id, e.getMessage());
}
return Result.buildFrom(ResultStatus.MYSQL_ERROR);
}
@Override
public Result updateById(GatewayConfigDO newGatewayConfigDO) {
try {
GatewayConfigDO oldGatewayConfigDO = this.getById(newGatewayConfigDO.getId());
if (ValidateUtils.isNull(oldGatewayConfigDO)) {
return Result.buildFrom(ResultStatus.RESOURCE_NOT_EXIST);
}
if (!oldGatewayConfigDO.getName().equals(newGatewayConfigDO.getName())
|| !oldGatewayConfigDO.getType().equals(newGatewayConfigDO.getType())
|| ValidateUtils.isBlank(newGatewayConfigDO.getValue())) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
newGatewayConfigDO.setVersion(oldGatewayConfigDO.getVersion() + 1);
if (gatewayConfigDao.updateById(oldGatewayConfigDO) > 0) {
return Result.buildSuc();
}
return Result.buildFrom(ResultStatus.MYSQL_ERROR);
} catch (Exception e) {
LOGGER.debug("class=GatewayConfigServiceImpl||method=updateById||data={}||errMsg={}||msg=update failed", newGatewayConfigDO, e.getMessage());
}
return Result.buildFrom(ResultStatus.MYSQL_ERROR);
}
@Override
public GatewayConfigDO getById(Long id) {
if (ValidateUtils.isNull(id)) {
return null;
}
try {
return gatewayConfigDao.getById(id);
} catch (Exception e) {
LOGGER.debug("class=GatewayConfigServiceImpl||method=getById||id={}||errMsg={}||msg=get failed", id, e.getMessage());
}
return null;
}
}

View File

@@ -13,6 +13,7 @@ import com.xiaojukeji.kafka.manager.common.zookeeper.ZkConfigImpl;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.BrokerMetadata;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.TopicDO;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.service.*;
import com.xiaojukeji.kafka.manager.service.service.gateway.AuthorityService;
@@ -139,6 +140,9 @@ public class AdminServiceImpl implements AdminService {
// 3. 数据库中删除topic
topicManagerService.deleteByTopicName(clusterDO.getId(), topicName);
// 4. 数据库中删除authority
authorityService.deleteAuthorityByTopic(clusterDO.getId(), topicName);
return rs;
}
@@ -191,15 +195,55 @@ public class AdminServiceImpl implements AdminService {
@Override
public ResultStatus preferredReplicaElection(ClusterDO clusterDO, Integer brokerId, String operator) {
BrokerMetadata brokerMetadata = PhysicalClusterMetadataManager.getBrokerMetadata(clusterDO.getId(), brokerId);
if (null == brokerMetadata) {
if (ValidateUtils.isNull(brokerMetadata)) {
return ResultStatus.PARAM_ILLEGAL;
}
Map<String, List<Integer>> partitionMap = topicService.getTopicPartitionIdMap(clusterDO.getId(), brokerId);
if (ValidateUtils.isEmptyMap(partitionMap)) {
return ResultStatus.SUCCESS;
}
return preferredReplicaElection(clusterDO, partitionMap, operator);
}
@Override
public ResultStatus preferredReplicaElection(ClusterDO clusterDO, String topicName, String operator) {
TopicMetadata topicMetadata = PhysicalClusterMetadataManager.getTopicMetadata(clusterDO.getId(), topicName);
if (ValidateUtils.isNull(topicMetadata)) {
return ResultStatus.TOPIC_NOT_EXIST;
}
Map<String, List<Integer>> partitionMap = new HashMap<>();
partitionMap.put(topicName, new ArrayList<>(topicMetadata.getPartitionMap().getPartitions().keySet()));
return preferredReplicaElection(clusterDO, partitionMap, operator);
}
@Override
public ResultStatus preferredReplicaElection(ClusterDO clusterDO, String topicName, Integer partitionId, String operator) {
TopicMetadata topicMetadata = PhysicalClusterMetadataManager.getTopicMetadata(clusterDO.getId(), topicName);
if (ValidateUtils.isNull(topicMetadata)) {
return ResultStatus.TOPIC_NOT_EXIST;
}
if (!topicMetadata.getPartitionMap().getPartitions().containsKey(partitionId)) {
return ResultStatus.PARTITION_NOT_EXIST;
}
Map<String, List<Integer>> partitionMap = new HashMap<>();
partitionMap.put(topicName, Arrays.asList(partitionId));
return preferredReplicaElection(clusterDO, partitionMap, operator);
}
private ResultStatus preferredReplicaElection(ClusterDO clusterDO, Map<String, List<Integer>> partitionMap, String operator) {
if (ValidateUtils.isEmptyMap(partitionMap)) {
return ResultStatus.SUCCESS;
}
ZkUtils zkUtils = null;
try {
Map<String, List<Integer>> partitionMap = topicService.getTopicPartitionIdMap(clusterDO.getId(), brokerId);
if (partitionMap == null || partitionMap.isEmpty()) {
return ResultStatus.SUCCESS;
}
String preferredReplicaElectString = convert2preferredReplicaElectString(partitionMap);
zkUtils = ZkUtils.apply(clusterDO.getZookeeper(),

View File

@@ -1,11 +1,16 @@
package com.xiaojukeji.kafka.manager.service.service.impl;
import com.xiaojukeji.kafka.manager.common.bizenum.DBStatusEnum;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.ClusterDetailDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.cluster.ControllerPreferredCandidate;
import com.xiaojukeji.kafka.manager.common.entity.vo.normal.cluster.ClusterNameDTO;
import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
import com.xiaojukeji.kafka.manager.common.entity.pojo.*;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.BrokerMetadata;
import com.xiaojukeji.kafka.manager.dao.ClusterDao;
import com.xiaojukeji.kafka.manager.dao.ClusterMetricsDao;
import com.xiaojukeji.kafka.manager.dao.ControllerDao;
@@ -14,6 +19,7 @@ import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
import com.xiaojukeji.kafka.manager.service.service.ConsumerService;
import com.xiaojukeji.kafka.manager.service.service.RegionService;
import com.xiaojukeji.kafka.manager.service.service.ZookeeperService;
import com.xiaojukeji.kafka.manager.service.utils.ConfigUtils;
import org.apache.zookeeper.ZooKeeper;
import org.slf4j.Logger;
@@ -57,6 +63,9 @@ public class ClusterServiceImpl implements ClusterService {
@Autowired
private ConfigUtils configUtils;
@Autowired
private ZookeeperService zookeeperService;
@Override
public ResultStatus addNew(ClusterDO clusterDO, String operator) {
if (ValidateUtils.isNull(clusterDO) || ValidateUtils.isNull(operator)) {
@@ -262,21 +271,6 @@ public class ClusterServiceImpl implements ClusterService {
return ResultStatus.SUCCESS;
}
@Override
public ClusterDO selectSuitableCluster(Long clusterId, String dataCenter) {
if (!ValidateUtils.isNullOrLessThanZero(clusterId)) {
return getById(clusterId);
}
if (ValidateUtils.isBlank(dataCenter)) {
return null;
}
List<ClusterDO> clusterDOList = this.listAll();
if (ValidateUtils.isEmptyList(clusterDOList)) {
return null;
}
return clusterDOList.get(0);
}
private ClusterDetailDTO getClusterDetailDTO(ClusterDO clusterDO, Boolean needDetail) {
if (ValidateUtils.isNull(clusterDO)) {
return null;
@@ -300,4 +294,31 @@ public class ClusterServiceImpl implements ClusterService {
dto.setControllerId(PhysicalClusterMetadataManager.getControllerId(clusterDO.getId()));
return dto;
}
@Override
public Result<List<ControllerPreferredCandidate>> getControllerPreferredCandidates(Long clusterId) {
Result<List<Integer>> candidateResult = zookeeperService.getControllerPreferredCandidates(clusterId);
if (candidateResult.failed()) {
return new Result<>(candidateResult.getCode(), candidateResult.getMessage());
}
if (ValidateUtils.isEmptyList(candidateResult.getData())) {
return Result.buildSuc(new ArrayList<>());
}
List<ControllerPreferredCandidate> controllerPreferredCandidateList = new ArrayList<>();
for (Integer brokerId: candidateResult.getData()) {
ControllerPreferredCandidate controllerPreferredCandidate = new ControllerPreferredCandidate();
controllerPreferredCandidate.setBrokerId(brokerId);
BrokerMetadata brokerMetadata = PhysicalClusterMetadataManager.getBrokerMetadata(clusterId, brokerId);
if (ValidateUtils.isNull(brokerMetadata)) {
controllerPreferredCandidate.setStatus(DBStatusEnum.DEAD.getStatus());
} else {
controllerPreferredCandidate.setHost(brokerMetadata.getHost());
controllerPreferredCandidate.setStartTime(brokerMetadata.getTimestamp());
controllerPreferredCandidate.setStatus(DBStatusEnum.ALIVE.getStatus());
}
controllerPreferredCandidateList.add(controllerPreferredCandidate);
}
return Result.buildSuc(controllerPreferredCandidateList);
}
}

View File

@@ -2,13 +2,14 @@ package com.xiaojukeji.kafka.manager.service.service.impl;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetPosEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetLocationEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.SinkMonitorSystemEnum;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumeDetailDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroup;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupSummary;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.PartitionOffsetDTO;
import com.xiaojukeji.kafka.manager.common.exception.ConfigException;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
@@ -23,6 +24,7 @@ import kafka.admin.AdminClient;
import org.apache.commons.lang.StringUtils;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.protocol.types.SchemaException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@@ -44,70 +46,116 @@ public class ConsumerServiceImpl implements ConsumerService {
private TopicService topicService;
@Override
public List<ConsumerGroupDTO> getConsumerGroupList(Long clusterId) {
List<ConsumerGroupDTO> consumerGroupDTOList = new ArrayList<>();
public List<ConsumerGroup> getConsumerGroupList(Long clusterId) {
List<ConsumerGroup> consumerGroupList = new ArrayList<>();
for (OffsetLocationEnum location: OffsetLocationEnum.values()) {
Map<String, List<String>> consumerGroupAppIdMap = null;
Set<String> consumerGroupSet = null;
if (OffsetLocationEnum.ZOOKEEPER.equals(location)) {
// 获取ZK中的消费组
consumerGroupAppIdMap = ConsumerMetadataCache.getConsumerGroupAppIdListInZk(clusterId);
consumerGroupSet = ConsumerMetadataCache.getGroupInZkMap(clusterId);
} else if (OffsetLocationEnum.BROKER.equals(location)) {
// 获取Broker中的消费组
consumerGroupAppIdMap = ConsumerMetadataCache.getConsumerGroupAppIdListInBK(clusterId);
consumerGroupSet = ConsumerMetadataCache.getGroupInBrokerMap(clusterId);
}
if (consumerGroupSet == null || consumerGroupAppIdMap == null) {
if (ValidateUtils.isEmptySet(consumerGroupSet)) {
continue;
}
for (String consumerGroup : consumerGroupSet) {
consumerGroupDTOList.add(new ConsumerGroupDTO(
clusterId,
consumerGroup,
consumerGroupAppIdMap.getOrDefault(consumerGroup, new ArrayList<>()),
location)
); }
consumerGroupList.add(new ConsumerGroup(clusterId, consumerGroup, location));
}
}
return consumerGroupDTOList;
return consumerGroupList;
}
@Override
public List<ConsumerGroupDTO> getConsumerGroupList(Long clusterId, String topicName) {
List<ConsumerGroupDTO> consumerGroupDTOList = new ArrayList<>();
public List<ConsumerGroup> getConsumerGroupList(Long clusterId, String topicName) {
List<ConsumerGroup> consumerGroupList = new ArrayList<>();
for (OffsetLocationEnum location: OffsetLocationEnum.values()) {
Map<String, List<String>> consumerGroupAppIdMap = null;
Set<String> consumerGroupSet = null;
if (OffsetLocationEnum.ZOOKEEPER.equals(location)) {
// 获取ZK中的消费组
consumerGroupAppIdMap = ConsumerMetadataCache.getConsumerGroupAppIdListInZk(clusterId);
consumerGroupSet = ConsumerMetadataCache.getTopicConsumerGroupInZk(clusterId, topicName);
} else if (OffsetLocationEnum.BROKER.equals(location)) {
// 获取Broker中的消费组
consumerGroupAppIdMap = ConsumerMetadataCache.getConsumerGroupAppIdListInBK(clusterId);
consumerGroupSet = ConsumerMetadataCache.getTopicConsumerGroupInBroker(clusterId, topicName);
}
if (consumerGroupSet == null || consumerGroupAppIdMap == null) {
if (ValidateUtils.isEmptySet(consumerGroupSet)) {
continue;
}
for (String consumerGroup : consumerGroupSet) {
consumerGroupDTOList.add(new ConsumerGroupDTO(
clusterId,
consumerGroup,
consumerGroupAppIdMap.getOrDefault(consumerGroup, new ArrayList<>()),
location
)
);
consumerGroupList.add(new ConsumerGroup(clusterId, consumerGroup, location));
}
}
return consumerGroupDTOList;
return consumerGroupList;
}
@Override
public List<ConsumeDetailDTO> getConsumeDetail(ClusterDO clusterDO,
String topicName,
ConsumerGroupDTO consumeGroupDTO) {
public List<ConsumerGroupSummary> getConsumerGroupSummaries(Long clusterId, String topicName) {
List<ConsumerGroup> consumerGroupList = this.getConsumerGroupList(clusterId, topicName);
if (ValidateUtils.isEmptyList(consumerGroupList)) {
return Collections.emptyList();
}
List<ConsumerGroupSummary> summaryList = new ArrayList<>();
for (ConsumerGroup consumerGroup: consumerGroupList) {
ConsumerGroupSummary consumerGroupSummary = null;
if (OffsetLocationEnum.ZOOKEEPER.equals(consumerGroup.getOffsetStoreLocation())) {
consumerGroupSummary = new ConsumerGroupSummary();
consumerGroupSummary.setClusterId(consumerGroup.getClusterId());
consumerGroupSummary.setConsumerGroup(consumerGroup.getConsumerGroup());
consumerGroupSummary.setOffsetStoreLocation(consumerGroup.getOffsetStoreLocation());
} else {
consumerGroupSummary = getConsumerGroupSummary(clusterId, topicName, consumerGroup.getConsumerGroup());
}
summaryList.add(consumerGroupSummary);
}
return summaryList;
}
private ConsumerGroupSummary getConsumerGroupSummary(Long clusterId, String topicName, String consumerGroup) {
ConsumerGroupSummary summary = new ConsumerGroupSummary();
summary.setClusterId(clusterId);
summary.setConsumerGroup(consumerGroup);
summary.setOffsetStoreLocation(OffsetLocationEnum.BROKER);
summary.setAppIdList(new ArrayList<>());
summary.setState("");
try {
AdminClient adminClient = KafkaClientPool.getAdminClient(clusterId);
AdminClient.ConsumerGroupSummary consumerGroupSummary = adminClient.describeConsumerGroup(consumerGroup);
if (ValidateUtils.isNull(consumerGroupSummary)) {
return summary;
}
summary.setState(consumerGroupSummary.state());
java.util.Iterator<scala.collection.immutable.List<AdminClient.ConsumerSummary>> it = JavaConversions.asJavaIterator(consumerGroupSummary.consumers().iterator());
while (it.hasNext()) {
List<AdminClient.ConsumerSummary> consumerSummaryList = JavaConversions.asJavaList(it.next());
for (AdminClient.ConsumerSummary consumerSummary: consumerSummaryList) {
List<TopicPartition> topicPartitionList = JavaConversions.asJavaList(consumerSummary.assignment());
if (ValidateUtils.isEmptyList(topicPartitionList)) {
continue;
}
if (topicPartitionList.stream().anyMatch(elem -> elem.topic().equals(topicName)) && consumerSummary.clientId().contains(".")) {
String [] splitArray = consumerSummary.clientId().split("\\.");
summary.getAppIdList().add(splitArray[0]);
}
}
}
} catch (SchemaException e) {
logger.error("class=ConsumerServiceImpl||method=getConsumerGroupSummary||clusterId={}||topicName={}||consumerGroup={}||errMsg={}||schema exception",
clusterId, topicName, consumerGroup, e.getMessage());
} catch (Exception e) {
logger.error("class=ConsumerServiceImpl||method=getConsumerGroupSummary||clusterId={}||topicName={}||consumerGroup={}||errMsg={}||throws exception",
clusterId, topicName, consumerGroup, e.getMessage());
}
summary.setAppIdList(new ArrayList<>(new HashSet<>(summary.getAppIdList())));
return summary;
}
@Override
public List<ConsumeDetailDTO> getConsumeDetail(ClusterDO clusterDO, String topicName, ConsumerGroup consumerGroup) {
TopicMetadata topicMetadata = PhysicalClusterMetadataManager.getTopicMetadata(clusterDO.getId(), topicName);
if (topicMetadata == null) {
logger.warn("class=ConsumerServiceImpl||method=getConsumeDetail||clusterId={}||topicName={}||msg=topicMetadata is null!",
@@ -116,10 +164,10 @@ public class ConsumerServiceImpl implements ConsumerService {
}
List<ConsumeDetailDTO> consumerGroupDetailDTOList = null;
if (OffsetLocationEnum.ZOOKEEPER.equals(consumeGroupDTO.getOffsetStoreLocation())) {
consumerGroupDetailDTOList = getConsumerPartitionStateInZK(clusterDO, topicMetadata, consumeGroupDTO);
} else if (OffsetLocationEnum.BROKER.equals(consumeGroupDTO.getOffsetStoreLocation())){
consumerGroupDetailDTOList = getConsumerPartitionStateInBroker(clusterDO, topicMetadata, consumeGroupDTO);
if (OffsetLocationEnum.ZOOKEEPER.equals(consumerGroup.getOffsetStoreLocation())) {
consumerGroupDetailDTOList = getConsumerPartitionStateInZK(clusterDO, topicMetadata, consumerGroup);
} else if (OffsetLocationEnum.BROKER.equals(consumerGroup.getOffsetStoreLocation())){
consumerGroupDetailDTOList = getConsumerPartitionStateInBroker(clusterDO, topicMetadata, consumerGroup);
}
if (consumerGroupDetailDTOList == null) {
logger.info("class=ConsumerServiceImpl||method=getConsumeDetail||msg=consumerGroupDetailDTOList is null!");
@@ -147,7 +195,7 @@ public class ConsumerServiceImpl implements ConsumerService {
}
@Override
public List<Result> resetConsumerOffset(ClusterDO clusterDO, String topicName, ConsumerGroupDTO consumerGroupDTO, List<PartitionOffsetDTO> partitionOffsetDTOList) {
public List<Result> resetConsumerOffset(ClusterDO clusterDO, String topicName, ConsumerGroup consumerGroup, List<PartitionOffsetDTO> partitionOffsetDTOList) {
Map<TopicPartition, Long> offsetMap = partitionOffsetDTOList.stream().collect(Collectors.toMap(elem -> {return new TopicPartition(topicName, elem.getPartitionId());}, PartitionOffsetDTO::getOffset));
List<Result> resultList = new ArrayList<>();
@@ -155,12 +203,12 @@ public class ConsumerServiceImpl implements ConsumerService {
KafkaConsumer<String, String> kafkaConsumer = null;
try {
Properties properties = KafkaClientPool.createProperties(clusterDO, false);
properties.setProperty("group.id", consumerGroupDTO.getConsumerGroup());
properties.setProperty("group.id", consumerGroup.getConsumerGroup());
kafkaConsumer = new KafkaConsumer<>(properties);
checkAndCorrectPartitionOffset(kafkaConsumer, offsetMap);
return resetConsumerOffset(clusterDO, kafkaConsumer, consumerGroupDTO, offsetMap);
return resetConsumerOffset(clusterDO, kafkaConsumer, consumerGroup, offsetMap);
} catch (Exception e) {
logger.error("create kafka consumer failed, clusterId:{} topicName:{} consumerGroup:{} partition:{}.", clusterDO.getId(), topicName, consumerGroupDTO, partitionOffsetDTOList, e);
logger.error("create kafka consumer failed, clusterId:{} topicName:{} consumerGroup:{} partition:{}.", clusterDO.getId(), topicName, consumerGroup, partitionOffsetDTOList, e);
resultList.add(new Result(
ResultStatus.OPERATION_FAILED.getCode(),
"reset failed, create KafkaConsumer or check offset failed"
@@ -173,20 +221,20 @@ public class ConsumerServiceImpl implements ConsumerService {
return resultList;
}
private List<Result> resetConsumerOffset(ClusterDO cluster, KafkaConsumer<String, String> kafkaConsumer, ConsumerGroupDTO consumerGroupDTO, Map<TopicPartition, Long> offsetMap) {
private List<Result> resetConsumerOffset(ClusterDO cluster, KafkaConsumer<String, String> kafkaConsumer, ConsumerGroup consumerGroup, Map<TopicPartition, Long> offsetMap) {
List<Result> resultList = new ArrayList<>();
for(Map.Entry<TopicPartition, Long> entry: offsetMap.entrySet()){
TopicPartition tp = entry.getKey();
Long offset = entry.getValue();
try {
if (consumerGroupDTO.getOffsetStoreLocation().equals(OffsetLocationEnum.ZOOKEEPER)) {
resetConsumerOffsetInZK(cluster, consumerGroupDTO.getConsumerGroup(), tp, offset);
} else if (consumerGroupDTO.getOffsetStoreLocation().equals(OffsetLocationEnum.BROKER)) {
if (consumerGroup.getOffsetStoreLocation().equals(OffsetLocationEnum.ZOOKEEPER)) {
resetConsumerOffsetInZK(cluster, consumerGroup.getConsumerGroup(), tp, offset);
} else if (consumerGroup.getOffsetStoreLocation().equals(OffsetLocationEnum.BROKER)) {
resetConsumerOffsetInBroker(kafkaConsumer, tp, offset);
}
} catch (Exception e) {
logger.error("reset failed, clusterId:{} consumerGroup:{} topic-partition:{}.", cluster.getId(), consumerGroupDTO, tp, e);
logger.error("reset failed, clusterId:{} consumerGroup:{} topic-partition:{}.", cluster.getId(), consumerGroup, tp, e);
resultList.add(new Result(
ResultStatus.OPERATION_FAILED.getCode(),
"reset failed..."));
@@ -232,14 +280,14 @@ public class ConsumerServiceImpl implements ConsumerService {
@Override
public Map<Integer, Long> getConsumerOffset(ClusterDO clusterDO,
String topicName,
ConsumerGroupDTO consumerGroupDTO) {
if (ValidateUtils.isNull(clusterDO) || ValidateUtils.isBlank(topicName) || ValidateUtils.isNull(consumerGroupDTO)) {
ConsumerGroup consumerGroup) {
if (ValidateUtils.isNull(clusterDO) || ValidateUtils.isBlank(topicName) || ValidateUtils.isNull(consumerGroup)) {
return null;
}
if (OffsetLocationEnum.BROKER.equals(consumerGroupDTO.getOffsetStoreLocation())) {
return getConsumerOffsetFromBK(clusterDO, topicName, consumerGroupDTO.getConsumerGroup());
} else if (OffsetLocationEnum.ZOOKEEPER.equals(consumerGroupDTO.getOffsetStoreLocation())) {
return getConsumerOffsetFromZK(clusterDO.getId(), topicName, consumerGroupDTO.getConsumerGroup());
if (OffsetLocationEnum.BROKER.equals(consumerGroup.getOffsetStoreLocation())) {
return getConsumerOffsetFromBK(clusterDO, topicName, consumerGroup.getConsumerGroup());
} else if (OffsetLocationEnum.ZOOKEEPER.equals(consumerGroup.getOffsetStoreLocation())) {
return getConsumerOffsetFromZK(clusterDO.getId(), topicName, consumerGroup.getConsumerGroup());
}
return null;
}
@@ -306,9 +354,9 @@ public class ConsumerServiceImpl implements ConsumerService {
return consumerIdMap;
}
private List<ConsumeDetailDTO> getConsumerPartitionStateInBroker(ClusterDO clusterDO, TopicMetadata topicMetadata, ConsumerGroupDTO consumerGroupDTO) {
Map<Integer, String> consumerIdMap = getConsumeIdMap(clusterDO.getId(), topicMetadata.getTopic(), consumerGroupDTO.getConsumerGroup());
Map<Integer, String> consumeOffsetMap = getOffsetByGroupAndTopicFromBroker(clusterDO, consumerGroupDTO.getConsumerGroup(), topicMetadata.getTopic());
private List<ConsumeDetailDTO> getConsumerPartitionStateInBroker(ClusterDO clusterDO, TopicMetadata topicMetadata, ConsumerGroup consumerGroup) {
Map<Integer, String> consumerIdMap = getConsumeIdMap(clusterDO.getId(), topicMetadata.getTopic(), consumerGroup.getConsumerGroup());
Map<Integer, String> consumeOffsetMap = getOffsetByGroupAndTopicFromBroker(clusterDO, consumerGroup.getConsumerGroup(), topicMetadata.getTopic());
List<ConsumeDetailDTO> consumeDetailDTOList = new ArrayList<>();
for (int partitionId : topicMetadata.getPartitionMap().getPartitions().keySet()) {
@@ -318,7 +366,7 @@ public class ConsumerServiceImpl implements ConsumerService {
try {
consumeDetailDTO.setConsumeOffset(StringUtils.isEmpty(consumeOffsetStr)? null: Long.valueOf(consumeOffsetStr));
} catch (Exception e) {
logger.error("illegal consumer offset, clusterId:{} topicName:{} consumerGroup:{} offset:{}.", clusterDO.getId(), topicMetadata.getTopic(), consumerGroupDTO.getConsumerGroup(), consumeOffsetStr, e);
logger.error("illegal consumer offset, clusterId:{} topicName:{} consumerGroup:{} offset:{}.", clusterDO.getId(), topicMetadata.getTopic(), consumerGroup.getConsumerGroup(), consumeOffsetStr, e);
}
consumeDetailDTO.setConsumerId(consumerIdMap.get(partitionId));
consumeDetailDTOList.add(consumeDetailDTO);
@@ -326,21 +374,19 @@ public class ConsumerServiceImpl implements ConsumerService {
return consumeDetailDTOList;
}
private List<ConsumeDetailDTO> getConsumerPartitionStateInZK(ClusterDO clusterDO,
TopicMetadata topicMetadata,
ConsumerGroupDTO consumerGroupDTO) {
private List<ConsumeDetailDTO> getConsumerPartitionStateInZK(ClusterDO clusterDO, TopicMetadata topicMetadata, ConsumerGroup consumerGroup) {
ZkConfigImpl zkConfig = PhysicalClusterMetadataManager.getZKConfig(clusterDO.getId());
List<ConsumeDetailDTO> consumeDetailDTOList = new ArrayList<>();
for (Integer partitionId : topicMetadata.getPartitionMap().getPartitions().keySet()) {
String consumeGroupPath = ZkPathUtil.getConsumerGroupOffsetTopicPartitionNode(consumerGroupDTO.getConsumerGroup(), topicMetadata.getTopic(), partitionId);
String consumeGroupPath = ZkPathUtil.getConsumerGroupOffsetTopicPartitionNode(consumerGroup.getConsumerGroup(), topicMetadata.getTopic(), partitionId);
String consumeOffset = null;
try {
consumeOffset = zkConfig.get(consumeGroupPath);
} catch (ConfigException e) {
logger.error("get consumeOffset error for zk path:{}", consumeGroupPath, e);
}
String consumeIdZkPath = ZkPathUtil.getConsumerGroupOwnersTopicPartitionNode(consumerGroupDTO.getConsumerGroup(), topicMetadata.getTopic(), partitionId);
String consumeIdZkPath = ZkPathUtil.getConsumerGroupOwnersTopicPartitionNode(consumerGroup.getConsumerGroup(), topicMetadata.getTopic(), partitionId);
String consumerId = null;
try {
consumerId = zkConfig.get(consumeIdZkPath);
@@ -394,7 +440,7 @@ public class ConsumerServiceImpl implements ConsumerService {
@Override
public boolean checkConsumerGroupExist(OffsetLocationEnum offsetLocation, Long clusterId, String topicName, String consumerGroup) {
List<ConsumerGroupDTO> consumerGroupList = getConsumerGroupList(clusterId, topicName).stream()
List<ConsumerGroup> consumerGroupList = getConsumerGroupList(clusterId, topicName).stream()
.filter(group -> offsetLocation.location.equals(group.getOffsetStoreLocation().location) && consumerGroup.equals(group.getConsumerGroup()))
.collect(Collectors.toList());
return !ValidateUtils.isEmptyList(consumerGroupList);

View File

@@ -3,6 +3,7 @@ package com.xiaojukeji.kafka.manager.service.service.impl;
import com.xiaojukeji.kafka.manager.common.bizenum.KafkaClientEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.TopicAuthorityEnum;
import com.xiaojukeji.kafka.manager.common.constant.KafkaMetricsCollections;
import com.xiaojukeji.kafka.manager.common.constant.TopicCreationConstant;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.RdTopicBasic;
@@ -14,6 +15,7 @@ import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.AppDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.AuthorityDO;
import com.xiaojukeji.kafka.manager.common.utils.DateUtils;
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
import com.xiaojukeji.kafka.manager.common.utils.NumberUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
@@ -33,6 +35,7 @@ import com.xiaojukeji.kafka.manager.service.utils.KafkaZookeeperUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import org.springframework.util.StringUtils;
@@ -345,6 +348,47 @@ public class TopicManagerServiceImpl implements TopicManagerService {
return ResultStatus.MYSQL_ERROR;
}
@Override
public ResultStatus modifyTopicByOp(Long clusterId, String topicName, String appId, String description, String operator) {
try {
if (!PhysicalClusterMetadataManager.isTopicExist(clusterId, topicName)) {
return ResultStatus.TOPIC_NOT_EXIST;
}
AppDO appDO = appService.getByAppId(appId);
if (ValidateUtils.isNull(appDO)) {
return ResultStatus.APP_NOT_EXIST;
}
TopicDO topicDO = topicDao.getByTopicName(clusterId, topicName);
if (ValidateUtils.isNull(topicDO)) {
// 不存在, 则需要插入
topicDO = new TopicDO();
topicDO.setAppId(appId);
topicDO.setClusterId(clusterId);
topicDO.setTopicName(topicName);
topicDO.setPeakBytesIn(TopicCreationConstant.DEFAULT_QUOTA);
topicDO.setDescription(description);
this.addTopic(topicDO);
} else {
// 存在, 则直接更新
topicDO.setAppId(appId);
topicDO.setDescription(description);
topicDao.updateByName(topicDO);
}
AuthorityDO authorityDO = new AuthorityDO();
authorityDO.setAppId(appId);
authorityDO.setClusterId(clusterId);
authorityDO.setTopicName(topicName);
authorityDO.setAccess(TopicAuthorityEnum.READ_WRITE.getCode());
authorityService.addAuthority(authorityDO);
} catch (Exception e) {
LOGGER.error("modify topic failed, clusterId:{} topicName:{} description:{} operator:{} ",
clusterId, topicName, description, operator, e);
}
return ResultStatus.MYSQL_ERROR;
}
@Override
public int deleteByTopicName(Long clusterId, String topicName) {
try {
@@ -359,6 +403,9 @@ public class TopicManagerServiceImpl implements TopicManagerService {
public int addTopic(TopicDO topicDO) {
try {
return topicDao.insert(topicDO);
} catch (DuplicateKeyException duplicateKeyException) {
// 主建重复了, 非重要问题
LOGGER.debug("class=TopicManagerServiceImpl||method=addTopic||data={}||msg=exist duplicate topic", JsonUtils.toJSONString(topicDO));
} catch (Exception e) {
LOGGER.error("insert topic failed, TopicDO:{}", topicDO.toString(), e);
}

View File

@@ -29,6 +29,7 @@ import com.xiaojukeji.kafka.manager.service.cache.LogicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.service.*;
import com.xiaojukeji.kafka.manager.service.service.gateway.AppService;
import com.xiaojukeji.kafka.manager.service.strategy.AbstractHealthScoreStrategy;
import com.xiaojukeji.kafka.manager.service.utils.KafkaZookeeperUtils;
import com.xiaojukeji.kafka.manager.service.utils.MetricsConvertUtils;
import org.apache.kafka.clients.consumer.ConsumerRecord;
@@ -83,6 +84,9 @@ public class TopicServiceImpl implements TopicService {
@Autowired
private RegionService regionService;
@Autowired
private AbstractHealthScoreStrategy healthScoreStrategy;
@Override
public List<TopicMetricsDO> getTopicMetricsFromDB(Long clusterId, String topicName, Date startTime, Date endTime) {
try {
@@ -235,7 +239,7 @@ public class TopicServiceImpl implements TopicService {
basicDTO.setRegionNameList(regionDOList.stream().map(RegionDO::getName).collect(Collectors.toList()));
basicDTO.setTopicCodeC(jmxService.getTopicCodeCValue(clusterId, topicName));
basicDTO.setScore(100);
basicDTO.setScore(healthScoreStrategy.calTopicHealthScore(clusterId, topicName));
return basicDTO;
}

View File

@@ -2,8 +2,10 @@ package com.xiaojukeji.kafka.manager.service.service.impl;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkConfigImpl;
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkPathUtil;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.didi.TopicJmxSwitch;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
@@ -13,6 +15,9 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.List;
/**
* @author zengqiao
* @date 20/8/27
@@ -40,4 +45,29 @@ public class ZookeeperServiceImpl implements ZookeeperService {
}
return new Result();
}
@Override
public Result<List<Integer>> getControllerPreferredCandidates(Long clusterId) {
if (ValidateUtils.isNull(clusterId)) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
ZkConfigImpl zkConfig = PhysicalClusterMetadataManager.getZKConfig(clusterId);
if (ValidateUtils.isNull(zkConfig)) {
return Result.buildFrom(ResultStatus.CONNECT_ZOOKEEPER_FAILED);
}
try {
if (!zkConfig.checkPathExists(ZkPathUtil.D_CONTROLLER_CANDIDATES)) {
return Result.buildSuc(new ArrayList<>());
}
List<String> brokerIdList = zkConfig.getChildren(ZkPathUtil.D_CONTROLLER_CANDIDATES);
if (ValidateUtils.isEmptyList(brokerIdList)) {
return Result.buildSuc(new ArrayList<>());
}
return Result.buildSuc(ListUtils.string2IntList(ListUtils.strList2String(brokerIdList)));
} catch (Exception e) {
LOGGER.error("class=ZookeeperServiceImpl||method=getControllerPreferredCandidates||clusterId={}||errMsg={}", clusterId, e.getMessage());
}
return Result.buildFrom(ResultStatus.READ_ZOOKEEPER_FAILED);
}
}

View File

@@ -72,8 +72,8 @@ public class DidiHealthScoreStrategy extends AbstractHealthScoreStrategy {
// 数据获取失败
return Constant.INVALID_CODE;
}
if (((Double) failedFetchRequestsPerSecOneMinuteRate) > 0
|| ((Double) failedProduceRequestsPerSecOneMinuteRate) > 0) {
if (((Double) failedFetchRequestsPerSecOneMinuteRate) > 0.01
|| ((Double) failedProduceRequestsPerSecOneMinuteRate) > 0.01) {
return HEALTH_SCORE_VERY_BAD;
}

View File

@@ -5,6 +5,8 @@ import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata
import com.xiaojukeji.kafka.manager.common.zookeeper.StateChangeListener;
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkConfigImpl;
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkPathUtil;
import com.xiaojukeji.kafka.manager.dao.TopicDao;
import com.xiaojukeji.kafka.manager.dao.gateway.AuthorityDao;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.cache.ThreadPool;
import org.apache.zookeeper.data.Stat;
@@ -28,11 +30,22 @@ public class TopicStateListener implements StateChangeListener {
private ZkConfigImpl zkConfig;
private TopicDao topicDao;
private AuthorityDao authorityDao;
public TopicStateListener(Long clusterId, ZkConfigImpl zkConfig) {
this.clusterId = clusterId;
this.zkConfig = zkConfig;
}
public TopicStateListener(Long clusterId, ZkConfigImpl zkConfig, TopicDao topicDao, AuthorityDao authorityDao) {
this.clusterId = clusterId;
this.zkConfig = zkConfig;
this.topicDao = topicDao;
this.authorityDao = authorityDao;
}
@Override
public void init() {
try {
@@ -79,6 +92,8 @@ public class TopicStateListener implements StateChangeListener {
private void processTopicDelete(String topicName) {
LOGGER.warn("delete topic, clusterId:{} topicName:{}.", clusterId, topicName);
PhysicalClusterMetadataManager.removeTopicMetadata(clusterId, topicName);
topicDao.removeTopicInCache(clusterId, topicName);
authorityDao.removeAuthorityInCache(clusterId, topicName);
}
private void processTopicAdded(String topicName) {

View File

@@ -22,4 +22,6 @@ public interface TopicDao {
List<TopicDO> listAll();
TopicDO getTopic(Long clusterId, String topicName, String appId);
TopicDO removeTopicInCache(Long clusterId, String topicName);
}

View File

@@ -37,4 +37,8 @@ public interface AuthorityDao {
List<AuthorityDO> listAll();
Map<String, Map<Long, Map<String, AuthorityDO>>> getAllAuthority();
void removeAuthorityInCache(Long clusterId, String topicName);
int deleteAuthorityByTopic(Long clusterId, String topicName);
}

View File

@@ -1,5 +1,6 @@
package com.xiaojukeji.kafka.manager.dao.gateway;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.GatewayConfigDO;
import java.util.List;
@@ -12,4 +13,14 @@ public interface GatewayConfigDao {
List<GatewayConfigDO> getByConfigType(String configType);
GatewayConfigDO getByConfigTypeAndName(String configType, String configName);
List<GatewayConfigDO> list();
int insert(GatewayConfigDO gatewayConfigDO);
int deleteById(Long id);
int updateById(GatewayConfigDO gatewayConfigDO);
GatewayConfigDO getById(Long id);
}

View File

@@ -1,6 +1,7 @@
package com.xiaojukeji.kafka.manager.dao.gateway.impl;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.AuthorityDO;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.dao.gateway.AuthorityDao;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
@@ -86,6 +87,32 @@ public class AuthorityDaoImpl implements AuthorityDao {
return AUTHORITY_MAP;
}
@Override
public void removeAuthorityInCache(Long clusterId, String topicName) {
AUTHORITY_MAP.forEach((appId, map) -> {
map.forEach((id, subMap) -> {
if (id.equals(clusterId)) {
subMap.remove(topicName);
if (subMap.isEmpty()) {
map.remove(id);
}
}
});
if (map.isEmpty()) {
AUTHORITY_MAP.remove(appId);
}
});
}
@Override
public int deleteAuthorityByTopic(Long clusterId, String topicName) {
Map<String, Object> params = new HashMap<>(2);
params.put("clusterId", clusterId);
params.put("topicName", topicName);
return sqlSession.delete("AuthorityDao.deleteByTopic", params);
}
private void updateAuthorityCache() {
Long timestamp = System.currentTimeMillis();

View File

@@ -35,4 +35,29 @@ public class GatewayConfigDaoImpl implements GatewayConfigDao {
params.put("configName", configName);
return sqlSession.selectOne("GatewayConfigDao.getByConfigTypeAndName", params);
}
@Override
public List<GatewayConfigDO> list() {
return sqlSession.selectList("GatewayConfigDao.list");
}
@Override
public int insert(GatewayConfigDO gatewayConfigDO) {
return sqlSession.insert("GatewayConfigDao.insert", gatewayConfigDO);
}
@Override
public int deleteById(Long id) {
return sqlSession.delete("GatewayConfigDao.deleteById", id);
}
@Override
public int updateById(GatewayConfigDO gatewayConfigDO) {
return sqlSession.update("GatewayConfigDao.updateById", gatewayConfigDO);
}
@Override
public GatewayConfigDO getById(Long id) {
return sqlSession.selectOne("GatewayConfigDao.getById", id);
}
}

View File

@@ -89,6 +89,11 @@ public class TopicDaoImpl implements TopicDao {
return sqlSession.selectOne("TopicDao.getTopic", params);
}
@Override
public TopicDO removeTopicInCache(Long clusterId, String topicName) {
return TOPIC_MAP.getOrDefault(clusterId, new HashMap<>(0)).remove(topicName);
}
private void updateTopicCache() {
Long timestamp = System.currentTimeMillis();

View File

@@ -45,4 +45,9 @@
<select id="listAfterTime" parameterType="java.util.Date" resultMap="AuthorityMap">
SELECT * FROM authority WHERE modify_time >= #{afterTime}
</select>
<delete id="deleteByTopic" parameterType="java.util.Map">
DELETE FROM authority WHERE cluster_id = #{clusterId} AND topic_name = #{topicName}
</delete>
</mapper>

View File

@@ -19,4 +19,38 @@
<select id="getByConfigTypeAndName" parameterType="java.util.Map" resultMap="GatewayConfigMap">
SELECT * FROM gateway_config WHERE `type`=#{configType} AND `name`=#{configName}
</select>
<select id="list" resultMap="GatewayConfigMap">
SELECT * FROM gateway_config
</select>
<insert id="insert" parameterType="com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.GatewayConfigDO">
<![CDATA[
INSERT INTO gateway_config
(`type`, name, value, version)
VALUES
(#{type}, #{name}, #{value}, #{version})
]]>
</insert>
<delete id="deleteById" parameterType="java.lang.Long">
<![CDATA[
DELETE FROM gateway_config WHERE id=#{id}
]]>
</delete>
<update id="updateById" parameterType="com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.GatewayConfigDO">
<![CDATA[
UPDATE gateway_config SET
`type`=#{type},
`name`=#{name},
`value`=#{value},
`version`=#{version}
WHERE id=#{id}
]]>
</update>
<select id="getById" parameterType="java.lang.Long" resultMap="GatewayConfigMap">
SELECT * FROM gateway_config WHERE id=#{id}
</select>
</mapper>

View File

@@ -1,28 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="DeprecatedKafkaAclDao">
<resultMap id="DeprecatedKafkaAclMap" type="com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.DeprecatedKafkaAclDO">
<id column="id" jdbcType="BIGINT" property="id" />
<result column="user_name" jdbcType="VARCHAR" property="userName" />
<result column="cluster_id" jdbcType="BIGINT" property="clusterId" />
<result column="topic_name" jdbcType="VARCHAR" property="topicName" />
<result column="access" jdbcType="INTEGER" property="access" />
<result column="operation" jdbcType="INTEGER" property="operation" />
<result column="gm_create" jdbcType="TIMESTAMP" property="gmCreate" />
<result column="gm_modify" jdbcType="TIMESTAMP" property="gmModify" />
</resultMap>
<insert id="insert"
parameterType="com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.DeprecatedKafkaAclDO"
useGeneratedKeys="true"
keyProperty="id">
INSERT INTO kafka_acl
(cluster_id, topic_name, user_name, access, operation, gm_create, gm_modify)
VALUES
(#{clusterId}, #{topicName}, #{userName}, #{access}, #{operation}, #{gmCreate}, #{gmModify})
</insert>
<select id="listAll" resultMap="DeprecatedKafkaAclMap">
SELECT * FROM kafka_acl ORDER BY gm_create ASC
</select>
</mapper>

View File

@@ -1,29 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="DeprecatedKafkaUserDao">
<resultMap id="DeprecatedKafkaUserDOMap" type="com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.DeprecatedKafkaUserDO">
<id property="id" column="id"/>
<result property="name" column="name"/>
<result property="password" column="password"/>
<result property="userType" column="user_type"/>
<result property="operation" column="operation"/>
<result property="gmtCreate" column="gm_create"/>
<result property="gmtModify" column="gm_modify"/>
</resultMap>
<insert id="insert"
parameterType="com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.DeprecatedKafkaUserDO"
useGeneratedKeys="true"
keyProperty="id">
INSERT INTO kafka_user
(`name`, password, user_type, operation, gm_create, gm_modify)
VALUES
(#{name}, #{password}, #{userType}, #{operation}, #{gmtCreate}, #{gmtModify})
</insert>
<select id="listAll" resultMap="DeprecatedKafkaUserDOMap">
SELECT * FROM kafka_user
</select>
</mapper>

View File

@@ -7,30 +7,80 @@ import com.xiaojukeji.kafka.manager.common.entity.ao.account.Account;
import com.xiaojukeji.kafka.manager.common.entity.pojo.AccountDO;
import java.util.List;
import java.util.Map;
/**
* @author huangyiminghappy@163.com
* @date 2019-04-26
*/
public interface AccountService {
/**
* 增加账号
* @param accountDO 账号信息
* @return
*/
ResultStatus createAccount(AccountDO accountDO);
/**
* 查询账号信息
* @param username 用户名
* @return
*/
AccountDO getAccountDO(String username);
/**
* 删除用户
* @param username 用户名
* @return
*/
ResultStatus deleteByName(String username);
/**
* 更新账号
* @param accountDO 账号信息
* @return
*/
ResultStatus updateAccount(AccountDO accountDO);
/**
* 获取用户列表
* @return
*/
List<AccountDO> list();
/**
* 依据前缀获取查询用户信息
* @param prefix
* @return
*/
List<EnterpriseStaff> searchAccountByPrefix(String prefix);
/**
* 从cache中获取用户角色
* @param username
* @return
*/
AccountRoleEnum getAccountRoleFromCache(String username);
/**
* 从cache中获取用户信息
* @param userName
* @return
*/
Account getAccountFromCache(String userName);
/**
* 判断当前用户是否是管理员工单的审批人
* @param username
* @return
*/
boolean isAdminOrderHandler(String username);
/**
* 是否是运维或者研发角色
* @param username
* @return
*/
boolean isOpOrRd(String username);
List<Account> getAdminOrderHandlerFromCache();
}

View File

@@ -226,6 +226,18 @@ public class AccountServiceImpl implements AccountService {
return false;
}
@Override
public boolean isOpOrRd(String username) {
if (ValidateUtils.isNull(ACCOUNT_ROLE_CACHE)) {
flush();
}
AccountRoleEnum accountRoleEnum = ACCOUNT_ROLE_CACHE.getOrDefault(username, AccountRoleEnum.NORMAL);
if (accountRoleEnum.equals(AccountRoleEnum.OP) || accountRoleEnum.equals(AccountRoleEnum.RD)) {
return true;
}
return false;
}
@Override
public List<Account> getAdminOrderHandlerFromCache() {
if (ValidateUtils.isEmptyList(ADMIN_ORDER_HANDLER_CACHE)) {

View File

@@ -25,6 +25,10 @@ public enum OrderTypeEnum {
APPLY_EXPAND_CLUSTER (05, "集群扩容", "modifyClusterOrder"),
APPLY_REDUCE_CLUSTER (15, "集群缩容", "modifyClusterOrder"),
ADD_GATEWAY_CONFIG (06, "增加GateWay配置", "addGatewayConfigOrder"),
DELETE_GATEWAY_CONFIG (16, "删除GateWay配置", "deleteGatewayConfigOrder"),
MODIFY_GATEWAY_CONFIG (26, "修改GateWay配置", "modifyGatewayConfigOrder"),
;
private Integer code;

View File

@@ -0,0 +1,62 @@
package com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import io.swagger.annotations.ApiModelProperty;
/**
* 增加gateway配置
* @author zengqiao
* @date 2021/01/12
*/
public class OrderExtensionAddGatewayConfigDTO {
@ApiModelProperty(value = "类型")
private String type;
@ApiModelProperty(value = "名称")
private String name;
@ApiModelProperty(value = "")
private String value;
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
@Override
public String toString() {
return "OrderExtensionAddGatewayConfigDTO{" +
"type='" + type + '\'' +
", name='" + name + '\'' +
", value='" + value + '\'' +
'}';
}
public boolean legal() {
if (ValidateUtils.isBlank(type)
|| ValidateUtils.isBlank(name)
|| ValidateUtils.isBlank(value)) {
return false;
}
return true;
}
}

View File

@@ -0,0 +1,36 @@
package com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import io.swagger.annotations.ApiModelProperty;
/**
* 删除gateway配置
* @author zengqiao
* @date 2021/01/12
*/
public class OrderExtensionDeleteGatewayConfigDTO {
@ApiModelProperty(value = "配置ID")
private Long id;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
@Override
public String toString() {
return "OrderExtensionDeleteGatewayConfigDTO{" +
"id=" + id +
'}';
}
public boolean legal() {
if (ValidateUtils.isNull(id)) {
return false;
}
return true;
}
}

View File

@@ -0,0 +1,77 @@
package com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import io.swagger.annotations.ApiModelProperty;
/**
* 修改gateway配置
* @author zengqiao
* @date 2021/01/12
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public class OrderExtensionModifyGatewayConfigDTO {
@ApiModelProperty(value = "配置ID")
private Long id;
@ApiModelProperty(value = "类型")
private String type;
@ApiModelProperty(value = "名称")
private String name;
@ApiModelProperty(value = "")
private String value;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
@Override
public String toString() {
return "OrderExtensionModifyGatewayConfigDTO{" +
"id=" + id +
", type='" + type + '\'' +
", name='" + name + '\'' +
", value='" + value + '\'' +
'}';
}
public boolean legal() {
if (ValidateUtils.isNull(id)
|| ValidateUtils.isBlank(name)
|| ValidateUtils.isBlank(type)
|| ValidateUtils.isBlank(value)) {
return false;
}
return true;
}
}

View File

@@ -0,0 +1,65 @@
package com.xiaojukeji.kafka.manager.bpm.common.entry.detail;
public class OrderDetailGatewayConfigData extends AbstractOrderDetailData {
private Long id;
private String type;
private String name;
private String value;
private Long version;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
public Long getVersion() {
return version;
}
public void setVersion(Long version) {
this.version = version;
}
@Override
public String toString() {
return "OrderDetailGatewayConfigData{" +
"id=" + id +
", type='" + type + '\'' +
", name='" + name + '\'' +
", value='" + value + '\'' +
", version=" + version +
'}';
}
}

View File

@@ -0,0 +1,42 @@
package com.xiaojukeji.kafka.manager.bpm.common.entry.detail;
/**
* gateway config修改
* @author zengqiao
* @date 2021/01/13
*/
public class OrderDetailGatewayConfigModifyData extends AbstractOrderDetailData {
/**
* 旧的Gateway Config
*/
private OrderDetailGatewayConfigData oldGatewayConfig;
/**
* 新的Gateway Config
*/
private OrderDetailGatewayConfigData newGatewayConfig;
public OrderDetailGatewayConfigData getOldGatewayConfig() {
return oldGatewayConfig;
}
public void setOldGatewayConfig(OrderDetailGatewayConfigData oldGatewayConfig) {
this.oldGatewayConfig = oldGatewayConfig;
}
public OrderDetailGatewayConfigData getNewGatewayConfig() {
return newGatewayConfig;
}
public void setNewGatewayConfig(OrderDetailGatewayConfigData newGatewayConfig) {
this.newGatewayConfig = newGatewayConfig;
}
@Override
public String toString() {
return "OrderDetailGatewayConfigModifyData{" +
"oldGatewayConfig=" + oldGatewayConfig +
", newGatewayConfig=" + newGatewayConfig +
'}';
}
}

View File

@@ -0,0 +1,27 @@
package com.xiaojukeji.kafka.manager.bpm.order;
import com.xiaojukeji.kafka.manager.account.AccountService;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.account.Account;
import com.xiaojukeji.kafka.manager.common.entity.pojo.OrderDO;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.List;
public abstract class AbstractGatewayConfigOrder extends AbstractOrder {
@Autowired
private AccountService accountService;
@Override
public ResultStatus checkAuthority(OrderDO orderDO, String username) {
if (!accountService.isAdminOrderHandler(username)) {
return ResultStatus.USER_WITHOUT_AUTHORITY;
}
return ResultStatus.SUCCESS;
}
@Override
public List<Account> getApproverList(String extensions) {
return accountService.getAdminOrderHandlerFromCache();
}
}

View File

@@ -0,0 +1,32 @@
package com.xiaojukeji.kafka.manager.bpm.order.impl.gateway;
import com.xiaojukeji.kafka.manager.bpm.common.entry.detail.AbstractOrderDetailData;
import com.xiaojukeji.kafka.manager.bpm.common.handle.OrderHandleBaseDTO;
import com.xiaojukeji.kafka.manager.bpm.order.AbstractGatewayConfigOrder;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.pojo.OrderDO;
import org.springframework.stereotype.Component;
/**
* @author zengqiao
* @date 2021/01/12
*/
@Component("addGatewayConfigOrder")
public class AddGatewayConfigOrder extends AbstractGatewayConfigOrder {
@Override
public Result<String> checkExtensionFieldsAndGenerateTitle(String extensions) {
return Result.buildSuc();
}
@Override
public AbstractOrderDetailData getOrderExtensionDetailData(String extensions) {
return null;
}
@Override
public ResultStatus handleOrderDetail(OrderDO orderDO, OrderHandleBaseDTO baseDTO, String userName) {
return ResultStatus.SUCCESS;
}
}

View File

@@ -0,0 +1,31 @@
package com.xiaojukeji.kafka.manager.bpm.order.impl.gateway;
import com.xiaojukeji.kafka.manager.bpm.common.entry.detail.AbstractOrderDetailData;
import com.xiaojukeji.kafka.manager.bpm.common.handle.OrderHandleBaseDTO;
import com.xiaojukeji.kafka.manager.bpm.order.AbstractGatewayConfigOrder;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.pojo.OrderDO;
import org.springframework.stereotype.Component;
/**
* @author zengqiao
* @date 2021/01/12
*/
@Component("deleteGatewayConfigOrder")
public class DeleteGatewayConfigOrder extends AbstractGatewayConfigOrder {
@Override
public Result<String> checkExtensionFieldsAndGenerateTitle(String extensions) {
return Result.buildSuc();
}
@Override
public AbstractOrderDetailData getOrderExtensionDetailData(String extensions) {
return null;
}
@Override
public ResultStatus handleOrderDetail(OrderDO orderDO, OrderHandleBaseDTO baseDTO, String userName) {
return ResultStatus.SUCCESS;
}
}

View File

@@ -0,0 +1,109 @@
package com.xiaojukeji.kafka.manager.bpm.order.impl.gateway;
import com.alibaba.fastjson.JSONObject;
import com.xiaojukeji.kafka.manager.bpm.common.OrderTypeEnum;
import com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway.OrderExtensionModifyGatewayConfigDTO;
import com.xiaojukeji.kafka.manager.bpm.common.entry.detail.AbstractOrderDetailData;
import com.xiaojukeji.kafka.manager.bpm.common.entry.detail.OrderDetailGatewayConfigData;
import com.xiaojukeji.kafka.manager.bpm.common.entry.detail.OrderDetailGatewayConfigModifyData;
import com.xiaojukeji.kafka.manager.bpm.common.handle.OrderHandleBaseDTO;
import com.xiaojukeji.kafka.manager.bpm.order.AbstractGatewayConfigOrder;
import com.xiaojukeji.kafka.manager.common.bizenum.gateway.GatewayConfigKeyEnum;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.pojo.OrderDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.GatewayConfigDO;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.service.service.OperateRecordService;
import com.xiaojukeji.kafka.manager.service.service.gateway.GatewayConfigService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
/**
* @author zengqiao
* @date 2021/01/12
*/
@Component("modifyGatewayConfigOrder")
public class ModifyGatewayConfigOrder extends AbstractGatewayConfigOrder {
private static final Logger LOGGER = LoggerFactory.getLogger(ModifyGatewayConfigOrder.class);
@Autowired
private GatewayConfigService gatewayConfigService;
@Autowired
private OperateRecordService operateRecordService;
@Override
public Result<String> checkExtensionFieldsAndGenerateTitle(String extensions) {
OrderExtensionModifyGatewayConfigDTO orderExtensionDTO = null;
try {
orderExtensionDTO = JSONObject.parseObject(extensions, OrderExtensionModifyGatewayConfigDTO.class);
} catch (Exception e) {
LOGGER.error("class=ModifyGatewayConfigOrder||method=checkExtensionFieldsAndGenerateTitle||params={}||errMsg={}", extensions, e.getMessage());
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
if (!orderExtensionDTO.legal()) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
GatewayConfigDO gatewayConfigDO = gatewayConfigService.getById(orderExtensionDTO.getId());
if (ValidateUtils.isNull(gatewayConfigDO)) {
// 配置不存在
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
GatewayConfigKeyEnum configKeyEnum = GatewayConfigKeyEnum.getByConfigType(orderExtensionDTO.getType());
if (ValidateUtils.isNull(configKeyEnum)) {
// 配置类型不对
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
return new Result<>(OrderTypeEnum.MODIFY_GATEWAY_CONFIG.getMessage());
}
@Override
public AbstractOrderDetailData getOrderExtensionDetailData(String extensions) {
OrderExtensionModifyGatewayConfigDTO orderExtensionDTO = null;
try {
orderExtensionDTO = JSONObject.parseObject(extensions, OrderExtensionModifyGatewayConfigDTO.class);
} catch (Exception e) {
LOGGER.error("class=ModifyGatewayConfigOrder||method=getOrderExtensionDetailData||params={}||errMsg={}", extensions, e.getMessage());
return null;
}
// 返回的数据
OrderDetailGatewayConfigModifyData orderDetailDTO = new OrderDetailGatewayConfigModifyData();
// 新的配置
OrderDetailGatewayConfigData newGatewayConfig = new OrderDetailGatewayConfigData();
newGatewayConfig.setId(orderExtensionDTO.getId());
newGatewayConfig.setType(orderExtensionDTO.getType());
newGatewayConfig.setName(orderExtensionDTO.getName());
newGatewayConfig.setValue(orderExtensionDTO.getValue());
orderDetailDTO.setNewGatewayConfig(newGatewayConfig);
GatewayConfigDO gatewayConfigDO = gatewayConfigService.getById(orderExtensionDTO.getId());
if (ValidateUtils.isNull(gatewayConfigDO)) {
// 旧的配置不存在
return orderDetailDTO;
}
// 旧的配置
OrderDetailGatewayConfigData oldGatewayConfig = new OrderDetailGatewayConfigData();
newGatewayConfig.setId(gatewayConfigDO.getId());
newGatewayConfig.setType(gatewayConfigDO.getType());
newGatewayConfig.setName(gatewayConfigDO.getName());
newGatewayConfig.setValue(gatewayConfigDO.getValue());
newGatewayConfig.setVersion(gatewayConfigDO.getVersion());
orderDetailDTO.setOldGatewayConfig(oldGatewayConfig);
return orderDetailDTO;
}
@Override
public ResultStatus handleOrderDetail(OrderDO orderDO, OrderHandleBaseDTO baseDTO, String username) {
return ResultStatus.SUCCESS;
}
}

View File

@@ -14,37 +14,119 @@ import com.xiaojukeji.kafka.manager.common.entity.pojo.MonitorRuleDO;
import java.util.List;
/**
* 监控系统接口
* @author zengqiao
* @date 20/5/21
*/
public interface MonitorService {
/**
* 创建告警规则
* @param monitorDTO 告警规则
* @param operator 操作人
* @return 操作状态结果
*/
ResultStatus createMonitorRule(MonitorRuleDTO monitorDTO, String operator);
/**
* 删除告警规则
* @param id 告警ID
* @param operator 操作人
* @return 操作状态结果
*/
ResultStatus deleteMonitorRule(Long id, String operator);
/**
* 修改告警规则
* @param monitorDTO 告警规则
* @param operator 操作人
* @return 操作状态结果
*/
ResultStatus modifyMonitorRule(MonitorRuleDTO monitorDTO, String operator);
/**
* 获取告警规则
* @param operator 操作人
* @return 监控告警规则概要信息
*/
List<MonitorRuleSummary> getMonitorRules(String operator);
/**
* 获取监控告警规则的详情信息
* @param monitorRuleDO 本地存储的监控告警规则概要信息
* @return
*/
Result<MonitorRuleDTO> getMonitorRuleDetail(MonitorRuleDO monitorRuleDO);
/**
* 依据主键ID, 获取存储于MySQL中的监控告警规则基本信息
* @param id 本地监控告警规则ID
* @return 本地监控告警规则信息
*/
MonitorRuleDO getById(Long id);
/**
* 依据策略ID, 获取存储于MySQL中的监控告警规则基本信息
* @param strategyId 策略ID
* @return 本地监控告警规则信息
*/
MonitorRuleDO getByStrategyId(Long strategyId);
/**
* 获取告警历史
* @param id 告警ID
* @param startTime 查询的起始时间
* @param endTime 查询的截止时间
* @return 告警历史
*/
Result<List<Alert>> getMonitorAlertHistory(Long id, Long startTime, Long endTime);
/**
* 查询告警详情
* @param alertId 告警ID
* @return 告警详情
*/
Result<MonitorAlertDetail> getMonitorAlertDetail(Long alertId);
/**
* 屏蔽告警
* @param monitorSilenceDTO 屏蔽的信息
* @param operator 操作人
* @return 屏蔽操作的结果
*/
Result createSilence(MonitorSilenceDTO monitorSilenceDTO, String operator);
/**
* 删除屏蔽策略
* @param silenceId 屏蔽ID
* @return 删除屏蔽告警的操作结果
*/
Boolean releaseSilence(Long silenceId);
/**
* 修改屏蔽告警的规则
* @param monitorSilenceDTO 屏蔽告警的信息
* @param operator 操作人
* @return 操作结果
*/
Result modifySilence(MonitorSilenceDTO monitorSilenceDTO, String operator);
/**
* 获取屏蔽策略
* @param strategyId 告警策略ID
* @return
*/
Result<List<Silence>> getSilences(Long strategyId);
/**
* 获取屏蔽详情
* @param silenceId 屏蔽ID
* @return
*/
Silence getSilenceById(Long silenceId);
/**
* 获取告警接收组
* @return
*/
List<NotifyGroup> getNotifyGroups();
}

View File

@@ -14,44 +14,34 @@ public abstract class AbstractMonitorService {
* 监控策略的增删改查
*/
public abstract Integer createStrategy(Strategy strategy);
public abstract Boolean deleteStrategyById(Long strategyId);
public abstract Boolean modifyStrategy(Strategy strategy);
public abstract List<Strategy> getStrategies();
public abstract Strategy getStrategyById(Long strategyId);
/**
* 告警的查
* 告警被触发后, 告警信息的查
*/
public abstract List<Alert> getAlerts(Long strategyId, Long startTime, Long endTime);
public abstract Alert getAlertById(Long alertId);
/**
* 屏蔽的增删改查
* 告警被触发之后, 进行屏蔽时, 屏蔽策略的增删改查
*/
public abstract Boolean createSilence(Silence silence);
public abstract Boolean releaseSilence(Long silenceId);
public abstract Boolean modifySilence(Silence silence);
public abstract List<Silence> getSilences(Long strategyId);
public abstract Silence getSilenceById(Long silenceId);
/**
* 指标的上报和查询
*/
public abstract Boolean sinkMetrics(List<MetricSinkPoint> pointList);
public abstract Metric getMetrics(String metric, Long startTime, Long endTime, Integer step, Properties tags);
/**
* 告警组
* 告警组获取
*/
public abstract List<NotifyGroup> getNotifyGroups();
/**
* 监控指标的上报和查询
*/
public abstract Boolean sinkMetrics(List<MetricSinkPoint> pointList);
public abstract Metric getMetrics(String metric, Long startTime, Long endTime, Integer step, Properties tags);
}

View File

@@ -10,6 +10,7 @@ import com.xiaojukeji.kafka.manager.notify.common.OrderNotifyTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.ApplicationListener;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;
/**
@@ -24,6 +25,7 @@ public class OrderApplyNotifyService implements ApplicationListener<OrderApplyEv
@Value("${notify.order.detail-url}")
private String orderDetailUrl;
@Async
@Override
public void onApplicationEvent(OrderApplyEvent orderApplyEvent) {
OrderDO orderDO = orderApplyEvent.getOrderDO();

View File

@@ -7,6 +7,7 @@ import com.xiaojukeji.kafka.manager.notify.notifyer.AbstractNotifyService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.ApplicationListener;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;
/**
@@ -21,6 +22,7 @@ public class OrderPassedNotifyService implements ApplicationListener<OrderPassed
@Value("${notify.order.detail-url}")
private String orderDetailUrl;
@Async
@Override
public void onApplicationEvent(OrderPassedEvent orderPassEvent) {
OrderDO orderDO = orderPassEvent.getOrderDO();

View File

@@ -7,6 +7,7 @@ import com.xiaojukeji.kafka.manager.notify.notifyer.AbstractNotifyService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.ApplicationListener;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;
/**
@@ -21,6 +22,7 @@ public class OrderRefusedNotifyService implements ApplicationListener<OrderRefus
@Value("${notify.order.detail-url}")
private String orderDetailUrl;
@Async
@Override
public void onApplicationEvent(OrderRefusedEvent orderRefuseEvent) {
OrderDO orderDO = orderRefuseEvent.getOrderDO();

View File

@@ -4,7 +4,7 @@ import com.xiaojukeji.kafka.manager.common.bizenum.*;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.PartitionOffsetDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroup;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
@@ -144,16 +144,11 @@ public class ThirdPartServiceImpl implements ThirdPartService {
if (ResultStatus.SUCCESS.getCode() != result.getCode()) {
return null;
}
ConsumerGroupDTO consumerGroupDTO = new ConsumerGroupDTO(
clusterDO.getId(),
dto.getConsumerGroup(),
new ArrayList<>(),
OffsetLocationEnum.getOffsetStoreLocation(dto.getLocation())
);
ConsumerGroup consumerGroup = new ConsumerGroup(clusterDO.getId(), dto.getConsumerGroup(), OffsetLocationEnum.getOffsetStoreLocation(dto.getLocation()));
return consumerService.resetConsumerOffset(
clusterDO,
dto.getTopicName(),
consumerGroupDTO,
consumerGroup,
offsetDTOList
);
}

View File

@@ -0,0 +1,51 @@
package com.xiaojukeji.kafka.manager.task.config;
public class SyncTopic2DBConfig {
/**
* 默认的App
*/
private String defaultAppId;
/**
* 进行同步的集群
*/
private Long clusterId;
/**
* 是否增加权限信息, 默认不增加
*/
private boolean addAuthority;
public String getDefaultAppId() {
return defaultAppId;
}
public void setDefaultAppId(String defaultAppId) {
this.defaultAppId = defaultAppId;
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public boolean isAddAuthority() {
return addAuthority;
}
public void setAddAuthority(boolean addAuthority) {
this.addAuthority = addAuthority;
}
@Override
public String toString() {
return "SyncTopic2DBConfig{" +
"defaultAppId='" + defaultAppId + '\'' +
", clusterId=" + clusterId +
", addAuthority=" + addAuthority +
'}';
}
}

View File

@@ -2,7 +2,7 @@ package com.xiaojukeji.kafka.manager.task.dispatch.metrics.collect;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetPosEnum;
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroup;
import com.xiaojukeji.kafka.manager.common.entity.metrics.ConsumerMetrics;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.events.ConsumerMetricsCollectedEvent;
@@ -105,7 +105,7 @@ public class CollectAndPublishCGData extends AbstractScheduledTask<ClusterDO> {
private List<ConsumerMetrics> getTopicConsumerMetrics(ClusterDO clusterDO,
String topicName,
long startTimeUnitMs) {
List<ConsumerGroupDTO> consumerGroupDTOList = consumerService.getConsumerGroupList(clusterDO.getId(), topicName);
List<ConsumerGroup> consumerGroupDTOList = consumerService.getConsumerGroupList(clusterDO.getId(), topicName);
if (ValidateUtils.isEmptyList(consumerGroupDTOList)) {
// 重试
consumerGroupDTOList = consumerService.getConsumerGroupList(clusterDO.getId(), topicName);
@@ -131,7 +131,7 @@ public class CollectAndPublishCGData extends AbstractScheduledTask<ClusterDO> {
partitionOffsetMap.put(entry.getKey().partition(), entry.getValue());
}
for (ConsumerGroupDTO consumerGroupDTO: consumerGroupDTOList) {
for (ConsumerGroup consumerGroupDTO: consumerGroupDTOList) {
try {
ConsumerMetrics consumerMetrics =
getTopicConsumerMetrics(clusterDO, topicName, consumerGroupDTO, partitionOffsetMap, startTimeUnitMs);
@@ -150,20 +150,20 @@ public class CollectAndPublishCGData extends AbstractScheduledTask<ClusterDO> {
private ConsumerMetrics getTopicConsumerMetrics(ClusterDO clusterDO,
String topicName,
ConsumerGroupDTO consumerGroupDTO,
ConsumerGroup consumerGroup,
Map<Integer, Long> partitionOffsetMap,
long startTimeUnitMs) {
Map<Integer, Long> consumerOffsetMap =
consumerService.getConsumerOffset(clusterDO, topicName, consumerGroupDTO);
consumerService.getConsumerOffset(clusterDO, topicName, consumerGroup);
if (ValidateUtils.isEmptyMap(consumerOffsetMap)) {
return null;
}
ConsumerMetrics metrics = new ConsumerMetrics();
metrics.setClusterId(clusterDO.getId());
metrics.setTopicName(topicName);
metrics.setConsumerGroup(consumerGroupDTO.getConsumerGroup());
metrics.setLocation(consumerGroupDTO.getOffsetStoreLocation().location);
metrics.setConsumerGroup(consumerGroup.getConsumerGroup());
metrics.setLocation(consumerGroup.getOffsetStoreLocation().location);
metrics.setPartitionOffsetMap(partitionOffsetMap);
metrics.setConsumeOffsetMap(consumerOffsetMap);
metrics.setTimestampUnitMs(startTimeUnitMs);

View File

@@ -15,6 +15,7 @@ import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.entity.pojo.*;
import com.xiaojukeji.kafka.manager.service.cache.LogicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.service.*;
import com.xiaojukeji.kafka.manager.task.component.AbstractScheduledTask;
import com.xiaojukeji.kafka.manager.task.component.CustomScheduled;
@@ -109,6 +110,11 @@ public class AutoHandleTopicOrder extends AbstractScheduledTask<EmptyEntry> {
return false;
}
if (PhysicalClusterMetadataManager.isTopicExist(physicalClusterId, dto.getTopicName())) {
rejectForRepeatedTopicName(orderDO);
return false;
}
if (ValidateUtils.isNull(dto.isPhysicalClusterId()) || !dto.isPhysicalClusterId()) {
return handleApplyTopicOrderByLogicalClusterId(clusterDO, orderDO, dto, createConfig);
}
@@ -117,6 +123,13 @@ public class AutoHandleTopicOrder extends AbstractScheduledTask<EmptyEntry> {
return handleApplyTopicOrderByPhysicalClusterId(clusterDO, orderDO, dto, createConfig);
}
private void rejectForRepeatedTopicName(OrderDO orderDO) {
orderDO.setApplicant(Constant.AUTO_HANDLE_USER_NAME);
orderDO.setStatus(OrderStatusEnum.REFUSED.getCode());
orderDO.setOpinion("驳回:该 Topic 已被别人申请并生效");
orderService.updateOrderById(orderDO);
}
/**
* 逻辑集群申请单
*/

View File

@@ -0,0 +1,163 @@
package com.xiaojukeji.kafka.manager.task.dispatch.op;
import com.xiaojukeji.kafka.manager.common.bizenum.TopicAuthorityEnum;
import com.xiaojukeji.kafka.manager.common.constant.KafkaConstant;
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.constant.TopicCreationConstant;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.TopicDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.AppDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.AuthorityDO;
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
import com.xiaojukeji.kafka.manager.service.service.ConfigService;
import com.xiaojukeji.kafka.manager.service.service.TopicManagerService;
import com.xiaojukeji.kafka.manager.service.service.gateway.AppService;
import com.xiaojukeji.kafka.manager.service.service.gateway.AuthorityService;
import com.xiaojukeji.kafka.manager.task.component.AbstractScheduledTask;
import com.xiaojukeji.kafka.manager.task.component.CustomScheduled;
import com.xiaojukeji.kafka.manager.task.component.EmptyEntry;
import com.xiaojukeji.kafka.manager.task.config.SyncTopic2DBConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.stereotype.Component;
import java.util.*;
import java.util.stream.Collectors;
/**
* 定期将未落盘的Topic刷新到DB中, 仅存储对应的关系, 并不会增加权限等信息
* @author zengqiao
* @date 19/12/29
*/
@Component
@CustomScheduled(name = "syncTopic2DB", cron = "0 0/2 * * * ?", threadNum = 1)
@ConditionalOnProperty(prefix = "task.op", name = "sync-topic-enabled", havingValue = "true", matchIfMissing = false)
public class SyncTopic2DB extends AbstractScheduledTask<EmptyEntry> {
private static final Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
private static final String SYNC_TOPIC_2_DB_CONFIG_KEY = "SYNC_TOPIC_2_DB_CONFIG_KEY";
@Autowired
private AppService appService;
@Autowired
private ConfigService configService;
@Autowired
private ClusterService clusterService;
@Autowired
private AuthorityService authorityService;
@Autowired
private TopicManagerService topicManagerService;
@Override
public List<EmptyEntry> listAllTasks() {
EmptyEntry emptyEntry = new EmptyEntry();
emptyEntry.setId(System.currentTimeMillis() / 1000);
return Arrays.asList(emptyEntry);
}
@Override
public void processTask(EmptyEntry entryEntry) {
Map<Long, SyncTopic2DBConfig> clusterIdConfigMap = getConfig();
if (ValidateUtils.isEmptyMap(clusterIdConfigMap)) {
LOGGER.warn("class=SyncTopic2DB||method=processTask||msg=without config or config illegal");
return;
}
LOGGER.info("class=SyncTopic2DB||method=processTask||data={}||msg=start sync", JsonUtils.toJSONString(clusterIdConfigMap));
List<ClusterDO> clusterDOList = clusterService.list();
if (ValidateUtils.isEmptyList(clusterDOList)) {
return;
}
for (ClusterDO clusterDO: clusterDOList) {
if (!clusterIdConfigMap.containsKey(clusterDO.getId())) {
continue;
}
try {
syncTopic2DB(clusterDO.getId(), clusterIdConfigMap.get(clusterDO.getId()));
} catch (Exception e) {
LOGGER.error("class=SyncTopic2DB||method=processTask||clusterId={}||errMsg={}||msg=sync failed", clusterDO.getId(), e.getMessage());
}
}
}
private void syncTopic2DB(Long clusterId, SyncTopic2DBConfig syncTopic2DBConfig) {
List<TopicDO> doList = topicManagerService.getByClusterId(clusterId);
if (ValidateUtils.isNull(doList)) {
doList = new ArrayList<>();
}
Set<String> existedTopicNameSet = doList.stream().map(elem -> elem.getTopicName()).collect(Collectors.toSet());
for (String topicName: PhysicalClusterMetadataManager.getTopicNameList(clusterId)) {
if (existedTopicNameSet.contains(topicName)
|| KafkaConstant.COORDINATOR_TOPIC_NAME.equals(topicName)
|| KafkaConstant.TRANSACTION_TOPIC_NAME.equals(topicName)) {
continue;
}
TopicMetadata topicMetadata = PhysicalClusterMetadataManager.getTopicMetadata(clusterId, topicName);
if (ValidateUtils.isNull(topicMetadata)) {
continue;
}
// 新创建10分钟内的Topic不进行同步, 避免KM平台上新建的, 有所属应用的Topic被错误的同步了
if (System.currentTimeMillis() - topicMetadata.getCreateTime() < 10 * 60 * 1000) {
continue;
}
TopicDO topicDO = new TopicDO();
topicDO.setAppId(syncTopic2DBConfig.getDefaultAppId());
topicDO.setClusterId(clusterId);
topicDO.setTopicName(topicName);
topicDO.setDescription("定期同步至DB中的无主Topic");
topicDO.setPeakBytesIn(TopicCreationConstant.DEFAULT_QUOTA);
topicManagerService.addTopic(topicDO);
if (ValidateUtils.isNull(syncTopic2DBConfig.isAddAuthority()) || !syncTopic2DBConfig.isAddAuthority()) {
// 不增加权限信息, 则直接忽略
return;
}
// TODO 当前添加 Topic 和 添加 Authority 是非事务的, 中间出现异常之后, 会导致数据错误, 后续还需要优化一下
AuthorityDO authorityDO = new AuthorityDO();
authorityDO.setAppId(syncTopic2DBConfig.getDefaultAppId());
authorityDO.setClusterId(clusterId);
authorityDO.setTopicName(topicName);
authorityDO.setAccess(TopicAuthorityEnum.READ_WRITE.getCode());
authorityService.addAuthority(authorityDO);
}
}
private Map<Long, SyncTopic2DBConfig> getConfig() {
List<SyncTopic2DBConfig> configList = configService.getArrayByKey(SYNC_TOPIC_2_DB_CONFIG_KEY, SyncTopic2DBConfig.class);
if (ValidateUtils.isEmptyList(configList)) {
return Collections.EMPTY_MAP;
}
Map<Long, SyncTopic2DBConfig> clusterIdConfigMap = new HashMap<>();
for (SyncTopic2DBConfig syncTopic2DBConfig: configList) {
if (ValidateUtils.isNullOrLessThanZero(syncTopic2DBConfig.getClusterId())
|| ValidateUtils.isBlank(syncTopic2DBConfig.getDefaultAppId())) {
continue;
}
AppDO appDO = appService.getByAppId(syncTopic2DBConfig.getDefaultAppId());
if (ValidateUtils.isNull(appDO)) {
continue;
}
clusterIdConfigMap.put(syncTopic2DBConfig.getClusterId(), syncTopic2DBConfig);
}
return clusterIdConfigMap;
}
}

View File

@@ -50,8 +50,7 @@ public class FlushBKConsumerGroupMetadata {
private void flush(Long clusterId) {
// 获取消费组列表
Set<String> consumerGroupSet = new HashSet<>();
Map<String, List<String>> consumerGroupAppIdMap = new HashMap<>();
collectAndSaveConsumerGroup(clusterId, consumerGroupSet, consumerGroupAppIdMap);
collectAndSaveConsumerGroup(clusterId, consumerGroupSet);
// 获取消费组summary信息
Map<String, Set<String>> topicNameConsumerGroupMap = new HashMap<>();
@@ -67,15 +66,12 @@ public class FlushBKConsumerGroupMetadata {
new ConsumerMetadata(
consumerGroupSet,
topicNameConsumerGroupMap,
consumerGroupSummary,
consumerGroupAppIdMap
consumerGroupSummary
)
);
}
private void collectAndSaveConsumerGroup(Long clusterId,
Set<String> consumerGroupSet,
Map<String, List<String>> consumerGroupAppIdMap) {
private void collectAndSaveConsumerGroup(Long clusterId, Set<String> consumerGroupSet) {
try {
AdminClient adminClient = KafkaClientPool.getAdminClient(clusterId);
@@ -83,20 +79,14 @@ public class FlushBKConsumerGroupMetadata {
for (scala.collection.immutable.List<kafka.coordinator.GroupOverview> brokerGroup : JavaConversions.asJavaMap(brokerGroupMap).values()) {
List<kafka.coordinator.GroupOverview> lists = JavaConversions.asJavaList(brokerGroup);
for (kafka.coordinator.GroupOverview groupOverview : lists) {
String consumerGroup = groupOverview.groupId();
List<String> appIdList = new ArrayList<>();
if (consumerGroup != null && consumerGroup.contains("#")) {
String[] splitArray = consumerGroup.split("#");
consumerGroup = splitArray[splitArray.length - 1];
appIdList = Arrays.asList(splitArray).subList(0, splitArray.length - 1);
}
consumerGroupAppIdMap.put(consumerGroup, appIdList);
consumerGroupSet.add(consumerGroup);
}
}
return ;
} catch (Exception e) {
LOGGER.error("collect consumerGroup failed, clusterId:{}.", clusterId, e);
}

View File

@@ -55,7 +55,7 @@ public class FlushZKConsumerGroupMetadata {
collectTopicAndConsumerGroupMap(clusterId, new ArrayList<>(consumerGroupSet));
ConsumerMetadataCache.putConsumerMetadataInZK(
clusterId,
new ConsumerMetadata(consumerGroupSet, topicNameConsumerGroupMap, new HashMap<>(0), new HashMap<>(0))
new ConsumerMetadata(consumerGroupSet, topicNameConsumerGroupMap, new HashMap<>(0))
);
}

View File

@@ -1,44 +0,0 @@
<assembly
xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
<id>bin</id>
<formats>
<format>dir</format>
<format>tar.gz</format>
</formats>
<fileSets>
<fileSet>
<includes>
<include>bin/*</include>
</includes>
<fileMode>0755</fileMode>
</fileSet>
<fileSet>
<directory>../docs/install_guide</directory>
<outputDirectory>install</outputDirectory>
<includes>
<include>*</include>
</includes>
</fileSet>
<fileSet>
<directory>src/main/resources/</directory>
<outputDirectory>conf</outputDirectory>
<includes>
<include>application.yml</include>
<include>logback-spring.xml</include>
</includes>
</fileSet>
<fileSet>
<directory>${project.build.directory}</directory>
<outputDirectory>libs</outputDirectory>
<includes>
<include>*.jar</include>
</includes>
</fileSet>
</fileSets>
</assembly>

View File

@@ -122,26 +122,6 @@
</execution>
</executions>
</plugin>
<!--<plugin>-->
<!--<groupId>org.apache.maven.plugins</groupId>-->
<!--<artifactId>maven-assembly-plugin</artifactId>-->
<!--<executions>-->
<!--<execution>-->
<!--<id>make-assembly</id>-->
<!--<phase>package</phase>-->
<!--<goals>-->
<!--<goal>single</goal>-->
<!--</goals>-->
<!--<configuration>-->
<!--<finalName>kafka-manager-${project.version}</finalName>-->
<!--<descriptors>-->
<!--<descriptor>assembly.xml</descriptor>-->
<!--</descriptors>-->
<!--<tarLongFileMode>posix</tarLongFileMode>-->
<!--</configuration>-->
<!--</execution>-->
<!--</executions>-->
<!--</plugin>-->
</plugins>
</build>
</project>

View File

@@ -47,7 +47,7 @@ public class GatewayHeartbeatController {
List<TopicConnectionDO> doList = null;
try {
doList = JsonUtils.parseTopicConnections(clusterId, jsonObject);
doList = JsonUtils.parseTopicConnections(clusterId, jsonObject, System.currentTimeMillis());
} catch (Exception e) {
LOGGER.error("class=GatewayHeartbeatController||method=receiveTopicConnections||clusterId={}||brokerId={}||msg=parse data failed||exception={}", clusterId, brokerId, e.getMessage());
return Result.buildFailure("fail");

View File

@@ -76,7 +76,7 @@ public class NormalAppController {
@RequestMapping(value = "apps/{appId}/basic-info", method = RequestMethod.GET)
@ResponseBody
public Result<AppVO> getAppBasicInfo(@PathVariable String appId) {
if (accountService.isAdminOrderHandler(SpringTool.getUserName())) {
if (accountService.isOpOrRd(SpringTool.getUserName())) {
return new Result<>(AppConverter.convert2AppVO(appService.getByAppId(appId)));
}
@@ -101,7 +101,7 @@ public class NormalAppController {
@RequestMapping(value = "apps/{appId}/topics", method = RequestMethod.GET)
@ResponseBody
public Result<List<AppTopicVO>> getAppTopics(@PathVariable String appId,
@RequestParam(value = "mine") Boolean mine) {
@RequestParam(value = "mine", required = false) Boolean mine) {
List<AppTopicDTO> dtoList = appService.getAppTopicDTOList(appId, mine);
List<AppTopicVO> voList = new ArrayList<>();

View File

@@ -6,10 +6,10 @@ import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.PartitionOffsetDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumeDetailDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroup;
import com.xiaojukeji.kafka.manager.common.entity.dto.normal.TopicOffsetResetDTO;
import com.xiaojukeji.kafka.manager.common.entity.vo.normal.consumer.ConsumerGroupDetailVO;
import com.xiaojukeji.kafka.manager.common.entity.vo.normal.consumer.ConsumerGroupVO;
import com.xiaojukeji.kafka.manager.common.entity.vo.normal.consumer.ConsumerGroupSummaryVO;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.service.cache.LogicalClusterMetadataManager;
@@ -55,7 +55,7 @@ public class NormalConsumerController {
@ApiOperation(value = "查询消费Topic的消费组", notes = "")
@RequestMapping(value = "{clusterId}/consumers/{topicName}/consumer-groups", method = RequestMethod.GET)
@ResponseBody
public Result<List<ConsumerGroupVO>> getConsumeGroups(
public Result<List<ConsumerGroupSummaryVO>> getConsumeGroups(
@PathVariable Long clusterId,
@PathVariable String topicName,
@RequestParam(value = "isPhysicalClusterId", required = false) Boolean isPhysicalClusterId) {
@@ -63,9 +63,9 @@ public class NormalConsumerController {
if (ValidateUtils.isNull(physicalClusterId)) {
return Result.buildFrom(ResultStatus.CLUSTER_NOT_EXIST);
}
return new Result<>(ConsumerModelConverter.convert2ConsumerGroupVOList(
consumerService.getConsumerGroupList(physicalClusterId, topicName))
);
return new Result<>(ConsumerModelConverter.convert2ConsumerGroupSummaryVOList(
consumerService.getConsumerGroupSummaries(physicalClusterId, topicName)
));
}
@ApiOperation(value = "查询消费组的消费详情", notes = "")
@@ -95,15 +95,10 @@ public class NormalConsumerController {
return Result.buildFrom(ResultStatus.CG_LOCATION_ILLEGAL);
}
ConsumerGroupDTO consumeGroupDTO = new ConsumerGroupDTO(
clusterDO.getId(),
consumerGroup,
new ArrayList<>(),
offsetStoreLocation
);
ConsumerGroup consumeGroup = new ConsumerGroup(clusterDO.getId(), consumerGroup, offsetStoreLocation);
try {
List<ConsumeDetailDTO> consumeDetailDTOList =
consumerService.getConsumeDetail(clusterDO, topicName, consumeGroupDTO);
consumerService.getConsumeDetail(clusterDO, topicName, consumeGroup);
return new Result<>(
ConsumerModelConverter.convert2ConsumerGroupDetailVO(
topicName,
@@ -113,7 +108,7 @@ public class NormalConsumerController {
)
);
} catch (Exception e) {
LOGGER.error("get consume detail failed, consumerGroup:{}.", consumeGroupDTO, e);
LOGGER.error("get consume detail failed, consumerGroup:{}.", consumeGroup, e);
}
return Result.buildFrom(ResultStatus.OPERATION_FAILED);
@@ -139,16 +134,11 @@ public class NormalConsumerController {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
ConsumerGroupDTO consumerGroupDTO = new ConsumerGroupDTO(
physicalClusterId,
dto.getConsumerGroup(),
new ArrayList<>(),
OffsetLocationEnum.getOffsetStoreLocation(dto.getLocation())
);
ConsumerGroup consumerGroup = new ConsumerGroup(physicalClusterId, dto.getConsumerGroup(), OffsetLocationEnum.getOffsetStoreLocation(dto.getLocation()));
List<Result> resultList = consumerService.resetConsumerOffset(
clusterDO,
dto.getTopicName(),
consumerGroupDTO,
consumerGroup,
offsetDTOList
);
for (Result result: resultList) {

View File

@@ -0,0 +1,52 @@
package com.xiaojukeji.kafka.manager.web.api.versionone.op;
import com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway.OrderExtensionAddGatewayConfigDTO;
import com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway.OrderExtensionDeleteGatewayConfigDTO;
import com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway.OrderExtensionModifyGatewayConfigDTO;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.service.service.gateway.GatewayConfigService;
import com.xiaojukeji.kafka.manager.web.converters.GatewayModelConverter;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
@Api(tags = "OP-Gateway配置相关接口(REST)")
@RestController
public class OpGatewayConfigController {
@Autowired
private GatewayConfigService gatewayConfigService;
@ApiOperation(value = "创建Gateway配置", notes = "")
@RequestMapping(value = "gateway-configs", method = RequestMethod.POST)
@ResponseBody
public Result createGatewayConfig(@RequestBody OrderExtensionAddGatewayConfigDTO dto) {
if (ValidateUtils.isNull(dto) || !dto.legal()) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
return gatewayConfigService.insert(GatewayModelConverter.convert2GatewayConfigDO(dto));
}
@ApiOperation(value = "修改Gateway配置", notes = "")
@RequestMapping(value = "gateway-configs", method = RequestMethod.PUT)
@ResponseBody
public Result modifyGatewayConfig(@RequestBody OrderExtensionModifyGatewayConfigDTO dto) {
if (ValidateUtils.isNull(dto) || !dto.legal()) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
return gatewayConfigService.updateById(GatewayModelConverter.convert2GatewayConfigDO(dto));
}
@ApiOperation(value = "删除Gateway配置", notes = "")
@RequestMapping(value = "gateway-configs", method = RequestMethod.DELETE)
@ResponseBody
public Result deleteGatewayConfig(@RequestBody OrderExtensionDeleteGatewayConfigDTO dto) {
if (ValidateUtils.isNull(dto) || !dto.legal()) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
return gatewayConfigService.deleteById(dto.getId());
}
}

View File

@@ -194,15 +194,22 @@ public class OpUtilsController {
if (ValidateUtils.isNull(clusterDO)) {
return Result.buildFrom(ResultStatus.CLUSTER_NOT_EXIST);
}
String operator = SpringTool.getUserName();
ResultStatus rs = null;
if (RebalanceDimensionEnum.CLUSTER.getCode().equals(reqObj.getDimension())) {
rs = adminService.preferredReplicaElection(clusterDO, operator);
// 按照Cluster纬度均衡
rs = adminService.preferredReplicaElection(clusterDO, SpringTool.getUserName());
} else if (RebalanceDimensionEnum.BROKER.getCode().equals(reqObj.getDimension())) {
rs = adminService.preferredReplicaElection(clusterDO, reqObj.getBrokerId(), operator);
// 按照Broker纬度均衡
rs = adminService.preferredReplicaElection(clusterDO, reqObj.getBrokerId(), SpringTool.getUserName());
} else if (RebalanceDimensionEnum.TOPIC.getCode().equals(reqObj.getDimension())) {
// 按照Topic纬度均衡
rs = adminService.preferredReplicaElection(clusterDO, reqObj.getTopicName(), SpringTool.getUserName());
} else if (RebalanceDimensionEnum.PARTITION.getCode().equals(reqObj.getDimension())) {
// 按照Partition纬度均衡
rs = adminService.preferredReplicaElection(clusterDO, reqObj.getTopicName(), reqObj.getPartitionId(), SpringTool.getUserName());
} else {
// TODO: 19/7/8 Topic维度 & Region维度 优先副本选举
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
return Result.buildFrom(rs);
}

View File

@@ -1,10 +1,11 @@
package com.xiaojukeji.kafka.manager.web.api.versionone.rd;
import com.xiaojukeji.kafka.manager.common.bizenum.KafkaClientEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.PeakFlowStatusEnum;
import com.xiaojukeji.kafka.manager.common.constant.KafkaMetricsCollections;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ao.cluster.ControllerPreferredCandidate;
import com.xiaojukeji.kafka.manager.common.entity.vo.normal.cluster.TopicMetadataVO;
import com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster.ControllerPreferredCandidateVO;
import com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster.RdClusterMetricsVO;
import com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster.ClusterBrokerStatusVO;
import com.xiaojukeji.kafka.manager.common.entity.ao.BrokerOverviewDTO;
@@ -26,7 +27,6 @@ import io.swagger.annotations.ApiOperation;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
@@ -168,4 +168,15 @@ public class RdClusterController {
public Result<List<TopicMetadataVO>> getTopicMetadatas(@PathVariable("clusterId") Long clusterId) {
return new Result<>(ClusterModelConverter.convert2TopicMetadataVOList(clusterId));
}
@ApiOperation(value = "Controller优先候选的Broker", notes = "滴滴内部引擎特性")
@RequestMapping(value = "clusters/{clusterId}/controller-preferred-candidates", method = RequestMethod.GET)
@ResponseBody
public Result<List<ControllerPreferredCandidateVO>> getControllerPreferredCandidates(@PathVariable("clusterId") Long clusterId) {
Result<List<ControllerPreferredCandidate>> candidateResult = clusterService.getControllerPreferredCandidates(clusterId);
if (candidateResult.failed()) {
return new Result(candidateResult.getCode(), candidateResult.getMessage());
}
return Result.buildSuc(ClusterModelConverter.convert2ControllerPreferredCandidateVOList(candidateResult.getData()));
}
}

View File

@@ -0,0 +1,32 @@
package com.xiaojukeji.kafka.manager.web.api.versionone.rd;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.GatewayConfigDO;
import com.xiaojukeji.kafka.manager.common.entity.vo.rd.GatewayConfigVO;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.service.service.gateway.GatewayConfigService;
import com.xiaojukeji.kafka.manager.web.converters.GatewayModelConverter;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import java.util.List;
@Api(tags = "RD-Gateway配置相关接口(REST)")
@RestController
public class RdGatewayConfigController {
@Autowired
private GatewayConfigService gatewayConfigService;
@ApiOperation(value = "Gateway相关配置信息", notes = "")
@RequestMapping(value = "gateway-configs", method = RequestMethod.GET)
@ResponseBody
public Result<List<GatewayConfigVO>> getGatewayConfigs() {
List<GatewayConfigDO> doList = gatewayConfigService.list();
if (ValidateUtils.isEmptyList(doList)) {
return Result.buildSuc();
}
return Result.buildSuc(GatewayModelConverter.convert2GatewayConfigVOList(doList));
}
}

View File

@@ -8,7 +8,7 @@ import com.xiaojukeji.kafka.manager.common.constant.SystemCodeConstant;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumeDetailDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroup;
import com.xiaojukeji.kafka.manager.openapi.common.dto.ConsumeHealthDTO;
import com.xiaojukeji.kafka.manager.openapi.common.dto.OffsetResetDTO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
@@ -29,7 +29,6 @@ import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
@@ -152,15 +151,10 @@ public class ThirdPartConsumeController {
return Result.buildFrom(ResultStatus.CG_LOCATION_ILLEGAL);
}
ConsumerGroupDTO consumeGroupDTO = new ConsumerGroupDTO(
clusterDO.getId(),
consumerGroup,
new ArrayList<>(),
offsetStoreLocation
);
ConsumerGroup consumeGroup = new ConsumerGroup(clusterDO.getId(), consumerGroup, offsetStoreLocation);
try {
List<ConsumeDetailDTO> consumeDetailDTOList =
consumerService.getConsumeDetail(clusterDO, topicName, consumeGroupDTO);
consumerService.getConsumeDetail(clusterDO, topicName, consumeGroup);
return new Result<>(
ConsumerModelConverter.convert2ConsumerGroupDetailVO(
topicName,
@@ -170,7 +164,7 @@ public class ThirdPartConsumeController {
)
);
} catch (Exception e) {
LOGGER.error("get consume detail failed, consumerGroup:{}.", consumeGroupDTO, e);
LOGGER.error("get consume detail failed, consumerGroup:{}.", consumeGroup, e);
}
return Result.buildFrom(ResultStatus.OPERATION_FAILED);
}

View File

@@ -0,0 +1,63 @@
package com.xiaojukeji.kafka.manager.web.api.versionone.thirdpart;
import com.xiaojukeji.kafka.manager.common.bizenum.RebalanceDimensionEnum;
import com.xiaojukeji.kafka.manager.common.constant.ApiPrefix;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.dto.op.RebalanceDTO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.utils.SpringTool;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.service.service.AdminService;
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
/**
* @author zengqiao
* @date 20/9/23
*/
@Api(tags = "开放接口-OP相关接口(REST)")
@RestController
@RequestMapping(ApiPrefix.API_V1_THIRD_PART_PREFIX)
public class ThirdPartOpController {
@Autowired
private AdminService adminService;
@Autowired
private ClusterService clusterService;
@ApiOperation(value = "优先副本选举")
@RequestMapping(value = "op/rebalance", method = RequestMethod.POST)
@ResponseBody
public Result preferredReplicaElect(@RequestBody RebalanceDTO reqObj) {
if (!reqObj.paramLegal()) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
ClusterDO clusterDO = clusterService.getById(reqObj.getClusterId());
if (ValidateUtils.isNull(clusterDO)) {
return Result.buildFrom(ResultStatus.CLUSTER_NOT_EXIST);
}
ResultStatus rs = null;
if (RebalanceDimensionEnum.CLUSTER.getCode().equals(reqObj.getDimension())) {
// 按照Cluster纬度均衡
rs = adminService.preferredReplicaElection(clusterDO, SpringTool.getUserName());
} else if (RebalanceDimensionEnum.BROKER.getCode().equals(reqObj.getDimension())) {
// 按照Broker纬度均衡
rs = adminService.preferredReplicaElection(clusterDO, reqObj.getBrokerId(), SpringTool.getUserName());
} else if (RebalanceDimensionEnum.TOPIC.getCode().equals(reqObj.getDimension())) {
// 按照Topic纬度均衡
rs = adminService.preferredReplicaElection(clusterDO, reqObj.getTopicName(), SpringTool.getUserName());
} else if (RebalanceDimensionEnum.PARTITION.getCode().equals(reqObj.getDimension())) {
// 按照Partition纬度均衡
rs = adminService.preferredReplicaElection(clusterDO, reqObj.getTopicName(), reqObj.getPartitionId(), SpringTool.getUserName());
} else {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
return Result.buildFrom(rs);
}
}

Some files were not shown because too many files have changed in this diff Show More