mirror of
https://github.com/didi/KnowStreaming.git
synced 2026-01-01 09:42:11 +08:00
Merge branch 'master' into master
This commit is contained in:
11
README.md
11
README.md
@@ -67,11 +67,16 @@
|
||||
- [滴滴Logi-KafkaManager 系列视频教程](https://mp.weixin.qq.com/s/9X7gH0tptHPtfjPPSdGO8g)
|
||||
- [kafka实践(十五):滴滴开源Kafka管控平台 Logi-KafkaManager研究--A叶子叶来](https://blog.csdn.net/yezonggang/article/details/113106244)
|
||||
|
||||
## 3 滴滴Logi开源用户钉钉交流群
|
||||
## 3 滴滴Logi开源用户交流群
|
||||
|
||||
|
||||

|
||||
微信加群:关注公众号 Obsuite(官方公众号) 回复 "Logi加群"
|
||||
|
||||

|
||||
钉钉群ID:32821440
|
||||
|
||||
钉钉群ID:32821440
|
||||
|
||||
|
||||
## 4 OCE认证
|
||||
OCE是一个认证机制和交流平台,为滴滴Logi-KafkaManager生产用户量身打造,我们会为OCE企业提供更好的技术支持,比如专属的技术沙龙、企业一对一的交流机会、专属的答疑群等,如果贵司Logi-KafkaManager上了生产,[快来加入吧](http://obsuite.didiyun.com/open/openAuth)
|
||||
|
||||
|
||||
97
Releases_Notes.md
Normal file
97
Releases_Notes.md
Normal file
@@ -0,0 +1,97 @@
|
||||
|
||||
---
|
||||
|
||||

|
||||
|
||||
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
||||
|
||||
---
|
||||
|
||||
## v2.3.0
|
||||
|
||||
版本上线时间:2021-02-08
|
||||
|
||||
|
||||
### 能力提升
|
||||
|
||||
- 新增支持docker化部署
|
||||
- 可指定Broker作为候选controller
|
||||
- 可新增并管理网关配置
|
||||
- 可获取消费组状态
|
||||
- 增加集群的JMX认证
|
||||
|
||||
### 体验优化
|
||||
|
||||
- 优化编辑用户角色、修改密码的流程
|
||||
- 新增consumerID的搜索功能
|
||||
- 优化“Topic连接信息”、“消费组重置消费偏移”、“修改Topic保存时间”的文案提示
|
||||
- 在相应位置增加《资源申请文档》链接
|
||||
|
||||
### bug修复
|
||||
|
||||
- 修复Broker监控图表时间轴展示错误的问题
|
||||
- 修复创建夜莺监控告警规则时,使用的告警周期的单位不正确的问题
|
||||
|
||||
|
||||
|
||||
## v2.2.0
|
||||
|
||||
版本上线时间:2021-01-25
|
||||
|
||||
|
||||
|
||||
### 能力提升
|
||||
|
||||
- 优化工单批量操作流程
|
||||
- 增加获取Topic75分位/99分位的实时耗时数据
|
||||
- 增加定时任务,可将无主未落DB的Topic定期写入DB
|
||||
|
||||
### 体验优化
|
||||
|
||||
- 在相应位置增加《集群接入文档》链接
|
||||
- 优化物理集群、逻辑集群含义
|
||||
- 在Topic详情页、Topic扩分区操作弹窗增加展示Topic所属Region的信息
|
||||
- 优化Topic审批时,Topic数据保存时间的配置流程
|
||||
- 优化Topic/应用申请、审批时的错误提示文案
|
||||
- 优化Topic数据采样的操作项文案
|
||||
- 优化运维人员删除Topic时的提示文案
|
||||
- 优化运维人员删除Region的删除逻辑与提示文案
|
||||
- 优化运维人员删除逻辑集群的提示文案
|
||||
- 优化上传集群配置文件时的文件类型限制条件
|
||||
|
||||
### bug修复
|
||||
|
||||
- 修复填写应用名称时校验特殊字符出错的问题
|
||||
- 修复普通用户越权访问应用详情的问题
|
||||
- 修复由于Kafka版本升级,导致的数据压缩格式无法获取的问题
|
||||
- 修复删除逻辑集群或Topic之后,界面依旧展示的问题
|
||||
- 修复进行Leader rebalance操作时执行结果重复提示的问题
|
||||
|
||||
|
||||
## v2.1.0
|
||||
|
||||
版本上线时间:2020-12-19
|
||||
|
||||
|
||||
|
||||
### 体验优化
|
||||
|
||||
- 优化页面加载时的背景样式
|
||||
- 优化普通用户申请Topic权限的流程
|
||||
- 优化Topic申请配额、申请分区的权限限制
|
||||
- 优化取消Topic权限的文案提示
|
||||
- 优化申请配额表单的表单项名称
|
||||
- 优化重置消费偏移的操作流程
|
||||
- 优化创建Topic迁移任务的表单内容
|
||||
- 优化Topic扩分区操作的弹窗界面样式
|
||||
- 优化集群Broker监控可视化图表样式
|
||||
- 优化创建逻辑集群的表单内容
|
||||
- 优化集群安全协议的提示文案
|
||||
|
||||
### bug修复
|
||||
|
||||
- 修复偶发性重置消费偏移失败的问题
|
||||
|
||||
|
||||
|
||||
|
||||
2
build.sh
2
build.sh
@@ -4,7 +4,7 @@ cd $workspace
|
||||
|
||||
## constant
|
||||
OUTPUT_DIR=./output
|
||||
KM_VERSION=2.2.0
|
||||
KM_VERSION=2.3.1
|
||||
APP_NAME=kafka-manager
|
||||
APP_DIR=${APP_NAME}-${KM_VERSION}
|
||||
|
||||
|
||||
@@ -1,14 +1,11 @@
|
||||
FROM openjdk:8-jdk-alpine3.9
|
||||
FROM openjdk:16-jdk-alpine3.13
|
||||
|
||||
LABEL author="yangvipguang"
|
||||
|
||||
ENV VERSION 2.1.0
|
||||
ENV JAR_PATH kafka-manager-web/target
|
||||
COPY $JAR_PATH/kafka-manager-web-$VERSION-SNAPSHOT.jar /tmp/app.jar
|
||||
COPY $JAR_PATH/application.yml /km/
|
||||
ENV VERSION 2.3.1
|
||||
|
||||
RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories
|
||||
RUN apk add --no-cache --virtual .build-deps \
|
||||
RUN apk add --no-cache --virtual .build-deps \
|
||||
font-adobe-100dpi \
|
||||
ttf-dejavu \
|
||||
fontconfig \
|
||||
@@ -19,26 +16,28 @@ RUN apk add --no-cache --virtual .build-deps \
|
||||
tomcat-native \
|
||||
&& apk del .build-deps
|
||||
|
||||
RUN apk add --no-cache tini
|
||||
|
||||
|
||||
|
||||
|
||||
ENV AGENT_HOME /opt/agent/
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY $JAR_PATH/kafka-manager.jar app.jar
|
||||
# COPY application.yml application.yml ##默认使用helm 挂载,防止敏感配置泄露
|
||||
|
||||
COPY docker-depends/config.yaml $AGENT_HOME
|
||||
COPY docker-depends/jmx_prometheus_javaagent-0.14.0.jar $AGENT_HOME
|
||||
|
||||
ENV JAVA_AGENT="-javaagent:$AGENT_HOME/jmx_prometheus_javaagent-0.14.0.jar=9999:$AGENT_HOME/config.yaml"
|
||||
COPY docker-depends/jmx_prometheus_javaagent-0.15.0.jar $AGENT_HOME
|
||||
|
||||
ENV JAVA_AGENT="-javaagent:$AGENT_HOME/jmx_prometheus_javaagent-0.15.0.jar=9999:$AGENT_HOME/config.yaml"
|
||||
ENV JAVA_HEAP_OPTS="-Xms1024M -Xmx1024M -Xmn100M "
|
||||
|
||||
ENV JAVA_OPTS="-verbose:gc \
|
||||
-XX:+PrintGC -XX:+PrintGCDetails -XX:+PrintHeapAtGC -Xloggc:/tmp/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps \
|
||||
-XX:MaxMetaspaceSize=256M -XX:+DisableExplicitGC -XX:+UseStringDeduplication \
|
||||
-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:-UseContainerSupport"
|
||||
#-Xlog:gc -Xlog:gc* -Xlog:gc+heap=trace -Xlog:safepoint
|
||||
|
||||
EXPOSE 8080 9999
|
||||
|
||||
ENTRYPOINT ["sh","-c","java -jar $JAVA_HEAP_OPTS $JAVA_OPTS /tmp/app.jar --spring.config.location=/km/application.yml"]
|
||||
|
||||
## 默认不带Prometheus JMX监控,需要可以自行取消以下注释并注释上面一行默认Entrypoint 命令。
|
||||
## ENTRYPOINT ["sh","-c","java -jar $JAVA_AGENT $JAVA_HEAP_OPTS $JAVA_OPTS /tmp/app.jar --spring.config.location=/km/application.yml"]
|
||||
ENTRYPOINT ["tini", "--"]
|
||||
|
||||
CMD ["sh","-c","java -jar $JAVA_AGENT $JAVA_HEAP_OPTS $JAVA_OPTS app.jar --spring.config.location=application.yml"]
|
||||
|
||||
Binary file not shown.
Binary file not shown.
@@ -9,6 +9,13 @@
|
||||
|
||||
# 动态配置管理
|
||||
|
||||
## 0、目录
|
||||
|
||||
- 1、Topic定时同步任务
|
||||
- 2、专家服务——Topic分区热点
|
||||
- 3、专家服务——Topic分区不足
|
||||
|
||||
|
||||
## 1、Topic定时同步任务
|
||||
|
||||
### 1.1、配置的用途
|
||||
@@ -63,3 +70,53 @@ task:
|
||||
]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2、专家服务——Topic分区热点
|
||||
|
||||
在`Region`所圈定的Broker范围内,某个Topic的Leader数在这些圈定的Broker上分布不均衡时,我们认为该Topic是存在热点的Topic。
|
||||
|
||||
备注:单纯的查看Leader数的分布,确实存在一定的局限性,这块欢迎贡献更多的热点定义于代码。
|
||||
|
||||
|
||||
Topic分区热点相关的动态配置(页面在运维管控->平台管理->配置管理):
|
||||
|
||||
配置Key:
|
||||
```
|
||||
REGION_HOT_TOPIC_CONFIG
|
||||
```
|
||||
|
||||
配置Value:
|
||||
```json
|
||||
{
|
||||
"maxDisPartitionNum": 2, # Region内Broker间的leader数差距超过2时,则认为是存在热点的Topic
|
||||
"minTopicBytesInUnitB": 1048576, # 流量低于该值的Topic不做统计
|
||||
"ignoreClusterIdList": [ # 忽略的集群
|
||||
50
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3、专家服务——Topic分区不足
|
||||
|
||||
总流量除以分区数,超过指定值时,则我们认为存在Topic分区不足。
|
||||
|
||||
Topic分区不足相关的动态配置(页面在运维管控->平台管理->配置管理):
|
||||
|
||||
配置Key:
|
||||
```
|
||||
TOPIC_INSUFFICIENT_PARTITION_CONFIG
|
||||
```
|
||||
|
||||
配置Value:
|
||||
```json
|
||||
{
|
||||
"maxBytesInPerPartitionUnitB": 3145728, # 单分区流量超过该值, 则认为分区不去
|
||||
"minTopicBytesInUnitB": 1048576, # 流量低于该值的Topic不做统计
|
||||
"ignoreClusterIdList": [ # 忽略的集群
|
||||
50
|
||||
]
|
||||
}
|
||||
```
|
||||
10
docs/dev_guide/gateway_config_manager.md
Normal file
10
docs/dev_guide/gateway_config_manager.md
Normal file
@@ -0,0 +1,10 @@
|
||||
|
||||
---
|
||||
|
||||

|
||||
|
||||
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
||||
|
||||
---
|
||||
|
||||
# Kafka-Gateway 配置说明
|
||||
17
docs/dev_guide/upgrade_manual/logi-km-v2.3.0.md
Normal file
17
docs/dev_guide/upgrade_manual/logi-km-v2.3.0.md
Normal file
@@ -0,0 +1,17 @@
|
||||
|
||||
---
|
||||
|
||||

|
||||
|
||||
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
||||
|
||||
---
|
||||
|
||||
# 升级至`2.3.0`版本
|
||||
|
||||
`2.3.0`版本在`gateway_config`表增加了一个描述说明的字段,因此需要执行下面的sql进行字段的增加。
|
||||
|
||||
```sql
|
||||
ALTER TABLE `gateway_config`
|
||||
ADD COLUMN `description` TEXT NULL COMMENT '描述信息' AFTER `version`;
|
||||
```
|
||||
@@ -203,7 +203,8 @@ CREATE TABLE `gateway_config` (
|
||||
`type` varchar(128) NOT NULL DEFAULT '' COMMENT '配置类型',
|
||||
`name` varchar(128) NOT NULL DEFAULT '' COMMENT '配置名称',
|
||||
`value` text COMMENT '配置值',
|
||||
`version` bigint(20) unsigned NOT NULL DEFAULT '0' COMMENT '版本信息',
|
||||
`version` bigint(20) unsigned NOT NULL DEFAULT '1' COMMENT '版本信息',
|
||||
`description` text COMMENT '描述信息',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
|
||||
94
docs/install_guide/install_guide_nginx_cn.md
Normal file
94
docs/install_guide/install_guide_nginx_cn.md
Normal file
@@ -0,0 +1,94 @@
|
||||
---
|
||||
|
||||

|
||||
|
||||
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
||||
|
||||
---
|
||||
|
||||
## nginx配置-安装手册
|
||||
|
||||
# 一、独立部署
|
||||
|
||||
请参考参考:[kafka-manager 安装手册](install_guide_cn.md)
|
||||
|
||||
# 二、nginx配置
|
||||
|
||||
## 1、独立部署配置
|
||||
|
||||
```
|
||||
#nginx 根目录访问配置如下
|
||||
location / {
|
||||
proxy_pass http://ip:port;
|
||||
}
|
||||
```
|
||||
|
||||
## 2、前后端分离&配置多个静态资源
|
||||
|
||||
以下配置解决`nginx代理多个静态资源`,实现项目前后端分离,版本更新迭代。
|
||||
|
||||
### 1、源码下载
|
||||
|
||||
根据所需版本下载对应代码,下载地址:[Github 下载地址](https://github.com/didi/Logi-KafkaManager)
|
||||
|
||||
### 2、修改webpack.config.js 配置文件
|
||||
|
||||
修改`kafka-manager-console`模块 `webpack.config.js`
|
||||
以下所有<font color='red'>xxxx</font>为nginx代理路径和打包静态文件加载前缀,<font color='red'>xxxx</font>可根据需求自行更改。
|
||||
|
||||
```
|
||||
cd kafka-manager-console
|
||||
vi webpack.config.js
|
||||
|
||||
# publicPath默认打包方式根目录下,修改为nginx代理访问路径。
|
||||
let publicPath = '/xxxx';
|
||||
```
|
||||
|
||||
### 3、打包
|
||||
|
||||
```
|
||||
|
||||
npm cache clean --force && npm install
|
||||
|
||||
```
|
||||
|
||||
ps:如果打包过程中报错,运行`npm install clipboard@2.0.6`,相反请忽略!
|
||||
|
||||
### 4、部署
|
||||
|
||||
#### 1、前段静态文件部署
|
||||
|
||||
静态资源 `../kafka-manager-web/src/main/resources/templates`
|
||||
|
||||
上传到指定目录,目前以`root目录`做demo
|
||||
|
||||
#### 2、上传jar包并启动,请参考:[kafka-manager 安装手册](install_guide_cn.md)
|
||||
|
||||
#### 3、修改nginx 配置
|
||||
|
||||
```
|
||||
location /xxxx {
|
||||
# 静态文件存放位置
|
||||
alias /root/templates;
|
||||
try_files $uri $uri/ /xxxx/index.html;
|
||||
index index.html;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://ip:port;
|
||||
}
|
||||
#后代端口建议使用/api,如果冲突可以使用以下配置
|
||||
#location /api/v2 {
|
||||
# proxy_pass http://ip:port;
|
||||
#}
|
||||
#location /api/v1 {
|
||||
# proxy_pass http://ip:port;
|
||||
#}
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -7,9 +7,9 @@
|
||||
|
||||
---
|
||||
|
||||
# FAQ
|
||||
# FAQ
|
||||
|
||||
- 0、Github图裂问题解决
|
||||
- 0、支持哪些Kafka版本?
|
||||
- 1、Topic申请、新建监控告警等操作时没有可选择的集群?
|
||||
- 2、逻辑集群 & Region的用途?
|
||||
- 3、登录失败?
|
||||
@@ -18,22 +18,16 @@
|
||||
- 6、如何使用`MySQL 8`?
|
||||
- 7、`Jmx`连接失败如何解决?
|
||||
- 8、`topic biz data not exist`错误及处理方式
|
||||
- 9、进程启动后,如何查看API文档
|
||||
- 10、如何创建告警组?
|
||||
- 11、连接信息、耗时信息为什么没有数据?
|
||||
- 12、逻辑集群申请审批通过之后为什么看不到逻辑集群?
|
||||
|
||||
---
|
||||
|
||||
### 0、Github图裂问题解决
|
||||
### 0、支持哪些Kafka版本?
|
||||
|
||||
可以在本地机器`ping github.com`这个地址,获取到`github.com`地址的IP地址。
|
||||
|
||||
然后将IP绑定到`/etc/hosts`文件中。
|
||||
|
||||
例如
|
||||
|
||||
```shell
|
||||
# 在 /etc/hosts文件中增加如下信息
|
||||
|
||||
140.82.113.3 github.com
|
||||
```
|
||||
基本上只要所使用的Kafka还依赖于Zookeeper,那么该版本的主要功能基本上应该就是支持的。
|
||||
|
||||
---
|
||||
|
||||
@@ -43,7 +37,7 @@
|
||||
|
||||
逻辑集群的创建参看:
|
||||
|
||||
- [kafka-manager 接入集群](docs/user_guide/add_cluster/add_cluster.md) 手册,这里的Region和逻辑集群都必须添加。
|
||||
- [kafka-manager 接入集群](add_cluster/add_cluster.md) 手册,这里的Region和逻辑集群都必须添加。
|
||||
|
||||
---
|
||||
|
||||
@@ -76,7 +70,7 @@
|
||||
|
||||
- 3、数据库时区问题。
|
||||
|
||||
检查MySQL的topic表,查看是否有数据,如果有数据,那么再检查设置的时区是否正确。
|
||||
检查MySQL的topic_metrics表,查看是否有数据,如果有数据,那么再检查设置的时区是否正确。
|
||||
|
||||
---
|
||||
|
||||
@@ -109,3 +103,26 @@
|
||||
可以在`运维管控->集群列表->Topic信息`下面,编辑申请权限的Topic,为Topic选择一个应用即可。
|
||||
|
||||
以上仅仅只是针对单个Topic的场景,如果你有非常多的Topic需要进行初始化的,那么此时可以在配置管理中增加一个配置,来定时的对无主的Topic进行同步,具体见:[动态配置管理 - 1、Topic定时同步任务](../dev_guide/dynamic_config_manager.md)
|
||||
|
||||
---
|
||||
|
||||
### 9、进程启动后,如何查看API文档
|
||||
|
||||
- 滴滴Logi-KafkaManager采用Swagger-API工具记录API文档。Swagger-API地址: [http://IP:PORT/swagger-ui.html#/](http://IP:PORT/swagger-ui.html#/)
|
||||
|
||||
|
||||
### 10、如何创建告警组?
|
||||
|
||||
这块需要配合监控系统进行使用,现在默认已经实现了夜莺的对接,当然也可以对接自己内部的监控系统,不过需要实现一些接口。
|
||||
|
||||
具体的文档可见:[监控功能对接夜莺](../dev_guide/monitor_system_integrate_with_n9e.md)、[监控功能对接其他系统](../dev_guide/monitor_system_integrate_with_self.md)
|
||||
|
||||
### 11、连接信息、耗时信息为什么没有数据?
|
||||
|
||||
这块需要结合滴滴内部的kafka-gateway一同使用才会有数据,滴滴kafka-gateway暂未开源。
|
||||
|
||||
### 12、逻辑集群申请审批通过之后为什么看不到逻辑集群?
|
||||
|
||||
逻辑集群的申请与审批仅仅只是一个工单流程,并不会去实际创建逻辑集群,逻辑集群的创建还需要手动去创建。
|
||||
|
||||
具体的操作可见:[kafka-manager 接入集群](add_cluster/add_cluster.md)。
|
||||
|
||||
@@ -47,4 +47,13 @@ public enum AccountRoleEnum {
|
||||
}
|
||||
return AccountRoleEnum.UNKNOWN;
|
||||
}
|
||||
|
||||
public static AccountRoleEnum getUserRoleEnum(String roleName) {
|
||||
for (AccountRoleEnum elem: AccountRoleEnum.values()) {
|
||||
if (elem.message.equalsIgnoreCase(roleName)) {
|
||||
return elem;
|
||||
}
|
||||
}
|
||||
return AccountRoleEnum.UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* 是否上报监控系统
|
||||
* @author zengqiao
|
||||
* @date 20/9/25
|
||||
*/
|
||||
public enum SinkMonitorSystemEnum {
|
||||
SINK_MONITOR_SYSTEM(0, "上报监控系统"),
|
||||
NOT_SINK_MONITOR_SYSTEM(1, "不上报监控系统"),
|
||||
;
|
||||
|
||||
private Integer code;
|
||||
|
||||
private String message;
|
||||
|
||||
SinkMonitorSystemEnum(Integer code, String message) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public void setCode(Integer code) {
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
public void setMessage(String message) {
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "SinkMonitorSystemEnum{" +
|
||||
"code=" + code +
|
||||
", message='" + message + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
package com.xiaojukeji.kafka.manager.common.bizenum;
|
||||
|
||||
/**
|
||||
* 过期Topic状态
|
||||
* @author zengqiao
|
||||
* @date 21/01/25
|
||||
*/
|
||||
public enum TopicExpiredStatusEnum {
|
||||
ALREADY_NOTIFIED_AND_DELETED(-2, "已通知, 已下线"),
|
||||
ALREADY_NOTIFIED_AND_CAN_DELETE(-1, "已通知, 可下线"),
|
||||
ALREADY_EXPIRED_AND_WAIT_NOTIFY(0, "已过期, 待通知"),
|
||||
ALREADY_NOTIFIED_AND_WAIT_RESPONSE(1, "已通知, 待反馈"),
|
||||
|
||||
;
|
||||
|
||||
private int status;
|
||||
|
||||
private String message;
|
||||
|
||||
TopicExpiredStatusEnum(int status, String message) {
|
||||
this.status = status;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public int getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
}
|
||||
@@ -7,18 +7,18 @@ package com.xiaojukeji.kafka.manager.common.constant;
|
||||
*/
|
||||
public class ApiPrefix {
|
||||
public static final String API_PREFIX = "/api/";
|
||||
public static final String API_V1_PREFIX = API_PREFIX + "v1/";
|
||||
public static final String API_V2_PREFIX = API_PREFIX + "v2/";
|
||||
private static final String API_V1_PREFIX = API_PREFIX + "v1/";
|
||||
|
||||
// login
|
||||
public static final String API_V1_SSO_PREFIX = API_V1_PREFIX + "sso/";
|
||||
|
||||
// console
|
||||
public static final String API_V1_SSO_PREFIX = API_V1_PREFIX + "sso/";
|
||||
public static final String API_V1_NORMAL_PREFIX = API_V1_PREFIX + "normal/";
|
||||
public static final String API_V1_RD_PREFIX = API_V1_PREFIX + "rd/";
|
||||
public static final String API_V1_OP_PREFIX = API_V1_PREFIX + "op/";
|
||||
|
||||
// open
|
||||
public static final String API_V1_THIRD_PART_PREFIX = API_V1_PREFIX + "third-part/";
|
||||
public static final String API_V2_THIRD_PART_PREFIX = API_V2_PREFIX + "third-part/";
|
||||
|
||||
// gateway
|
||||
public static final String GATEWAY_API_V1_PREFIX = "/gateway" + API_V1_PREFIX;
|
||||
|
||||
@@ -97,7 +97,7 @@ public class Result<T> implements Serializable {
|
||||
return result;
|
||||
}
|
||||
|
||||
public static <T> Result<T> buildFailure(String message) {
|
||||
public static <T> Result<T> buildGatewayFailure(String message) {
|
||||
Result<T> result = new Result<T>();
|
||||
result.setCode(ResultStatus.GATEWAY_INVALID_REQUEST.getCode());
|
||||
result.setMessage(message);
|
||||
@@ -105,6 +105,14 @@ public class Result<T> implements Serializable {
|
||||
return result;
|
||||
}
|
||||
|
||||
public static <T> Result<T> buildFailure(String message) {
|
||||
Result<T> result = new Result<T>();
|
||||
result.setCode(ResultStatus.FAIL.getCode());
|
||||
result.setMessage(message);
|
||||
result.setData(null);
|
||||
return result;
|
||||
}
|
||||
|
||||
public static Result buildFrom(ResultStatus resultStatus) {
|
||||
Result result = new Result();
|
||||
result.setCode(resultStatus.getCode());
|
||||
|
||||
@@ -12,6 +12,8 @@ public enum ResultStatus {
|
||||
|
||||
SUCCESS(Constant.SUCCESS, "success"),
|
||||
|
||||
FAIL(1, "操作失败"),
|
||||
|
||||
/**
|
||||
* 操作错误[1000, 2000)
|
||||
* ------------------------------------------------------------------------------------------
|
||||
@@ -23,6 +25,9 @@ public enum ResultStatus {
|
||||
CHANGE_ZOOKEEPER_FORBIDDEN(1405, "change zookeeper forbidden"),
|
||||
|
||||
|
||||
APP_OFFLINE_FORBIDDEN(1406, "先下线topic,才能下线应用~"),
|
||||
|
||||
|
||||
TOPIC_OPERATION_PARAM_NULL_POINTER(1450, "参数错误"),
|
||||
TOPIC_OPERATION_PARTITION_NUM_ILLEGAL(1451, "分区数错误"),
|
||||
TOPIC_OPERATION_BROKER_NUM_NOT_ENOUGH(1452, "Broker数不足错误"),
|
||||
@@ -91,6 +96,8 @@ public enum ResultStatus {
|
||||
|
||||
ZOOKEEPER_CONNECT_FAILED(8020, "zookeeper connect failed"),
|
||||
ZOOKEEPER_READ_FAILED(8021, "zookeeper read failed"),
|
||||
ZOOKEEPER_WRITE_FAILED(8022, "zookeeper write failed"),
|
||||
ZOOKEEPER_DELETE_FAILED(8023, "zookeeper delete failed"),
|
||||
|
||||
// 调用集群任务里面的agent失败
|
||||
CALL_CLUSTER_TASK_AGENT_FAILED(8030, " call cluster task agent failed"),
|
||||
@@ -102,6 +109,7 @@ public enum ResultStatus {
|
||||
STORAGE_UPLOAD_FILE_FAILED(8050, "upload file failed"),
|
||||
STORAGE_FILE_TYPE_NOT_SUPPORT(8051, "File type not support"),
|
||||
STORAGE_DOWNLOAD_FILE_FAILED(8052, "download file failed"),
|
||||
LDAP_AUTHENTICATION_FAILED(8053, "ldap authentication failed"),
|
||||
|
||||
;
|
||||
|
||||
|
||||
@@ -23,6 +23,8 @@ public class ClusterDetailDTO {
|
||||
|
||||
private String securityProperties;
|
||||
|
||||
private String jmxProperties;
|
||||
|
||||
private Integer status;
|
||||
|
||||
private Date gmtCreate;
|
||||
@@ -103,6 +105,14 @@ public class ClusterDetailDTO {
|
||||
this.securityProperties = securityProperties;
|
||||
}
|
||||
|
||||
public String getJmxProperties() {
|
||||
return jmxProperties;
|
||||
}
|
||||
|
||||
public void setJmxProperties(String jmxProperties) {
|
||||
this.jmxProperties = jmxProperties;
|
||||
}
|
||||
|
||||
public Integer getStatus() {
|
||||
return status;
|
||||
}
|
||||
@@ -176,8 +186,9 @@ public class ClusterDetailDTO {
|
||||
", bootstrapServers='" + bootstrapServers + '\'' +
|
||||
", kafkaVersion='" + kafkaVersion + '\'' +
|
||||
", idc='" + idc + '\'' +
|
||||
", mode='" + mode + '\'' +
|
||||
", mode=" + mode +
|
||||
", securityProperties='" + securityProperties + '\'' +
|
||||
", jmxProperties='" + jmxProperties + '\'' +
|
||||
", status=" + status +
|
||||
", gmtCreate=" + gmtCreate +
|
||||
", gmtModify=" + gmtModify +
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.ao.config;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/9/7
|
||||
*/
|
||||
public class SinkTopicRequestTimeMetricsConfig {
|
||||
private Long clusterId;
|
||||
|
||||
private String topicName;
|
||||
|
||||
private Long startId;
|
||||
|
||||
private Long step;
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public String getTopicName() {
|
||||
return topicName;
|
||||
}
|
||||
|
||||
public void setTopicName(String topicName) {
|
||||
this.topicName = topicName;
|
||||
}
|
||||
|
||||
public Long getStartId() {
|
||||
return startId;
|
||||
}
|
||||
|
||||
public void setStartId(Long startId) {
|
||||
this.startId = startId;
|
||||
}
|
||||
|
||||
public Long getStep() {
|
||||
return step;
|
||||
}
|
||||
|
||||
public void setStep(Long step) {
|
||||
this.step = step;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "SinkTopicRequestTimeMetricsConfig{" +
|
||||
"clusterId=" + clusterId +
|
||||
", topicName='" + topicName + '\'' +
|
||||
", startId=" + startId +
|
||||
", step=" + step +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.dto.op;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 21/01/24
|
||||
*/
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
@ApiModel(description="优选为Controller的候选者")
|
||||
public class ControllerPreferredCandidateDTO {
|
||||
@ApiModelProperty(value="集群ID")
|
||||
private Long clusterId;
|
||||
|
||||
@ApiModelProperty(value="优选为controller的BrokerId")
|
||||
private List<Integer> brokerIdList;
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public void setClusterId(Long clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public List<Integer> getBrokerIdList() {
|
||||
return brokerIdList;
|
||||
}
|
||||
|
||||
public void setBrokerIdList(List<Integer> brokerIdList) {
|
||||
this.brokerIdList = brokerIdList;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ControllerPreferredCandidateDTO{" +
|
||||
"clusterId=" + clusterId +
|
||||
", brokerIdList=" + brokerIdList +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -40,6 +40,9 @@ public class TopicCreationDTO extends ClusterTopicDTO {
|
||||
@ApiModelProperty(value = "Topic属性列表")
|
||||
private Properties properties;
|
||||
|
||||
@ApiModelProperty(value = "最大写入字节数")
|
||||
private Long peakBytesIn;
|
||||
|
||||
public String getAppId() {
|
||||
return appId;
|
||||
}
|
||||
@@ -104,6 +107,14 @@ public class TopicCreationDTO extends ClusterTopicDTO {
|
||||
this.properties = properties;
|
||||
}
|
||||
|
||||
public Long getPeakBytesIn() {
|
||||
return peakBytesIn;
|
||||
}
|
||||
|
||||
public void setPeakBytesIn(Long peakBytesIn) {
|
||||
this.peakBytesIn = peakBytesIn;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TopicCreationDTO{" +
|
||||
@@ -135,4 +146,4 @@ public class TopicCreationDTO extends ClusterTopicDTO {
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,12 +102,11 @@ public class ClusterDTO {
|
||||
'}';
|
||||
}
|
||||
|
||||
public Boolean legal() {
|
||||
public boolean legal() {
|
||||
if (ValidateUtils.isNull(clusterName)
|
||||
|| ValidateUtils.isNull(zookeeper)
|
||||
|| ValidateUtils.isNull(idc)
|
||||
|| ValidateUtils.isNull(bootstrapServers)
|
||||
) {
|
||||
|| ValidateUtils.isNull(bootstrapServers)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.pojo;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
@@ -116,4 +117,22 @@ public class ClusterDO implements Comparable<ClusterDO> {
|
||||
public int compareTo(ClusterDO clusterDO) {
|
||||
return this.id.compareTo(clusterDO.id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
ClusterDO clusterDO = (ClusterDO) o;
|
||||
return Objects.equals(id, clusterDO.id)
|
||||
&& Objects.equals(clusterName, clusterDO.clusterName)
|
||||
&& Objects.equals(zookeeper, clusterDO.zookeeper)
|
||||
&& Objects.equals(bootstrapServers, clusterDO.bootstrapServers)
|
||||
&& Objects.equals(securityProperties, clusterDO.securityProperties)
|
||||
&& Objects.equals(jmxProperties, clusterDO.jmxProperties);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(id, clusterName, zookeeper, bootstrapServers, securityProperties, jmxProperties);
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package com.xiaojukeji.kafka.manager.common.entity.pojo;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.op.topic.TopicCreationDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
|
||||
import java.util.Date;
|
||||
|
||||
@@ -95,6 +96,7 @@ public class TopicDO {
|
||||
topicDO.setClusterId(dto.getClusterId());
|
||||
topicDO.setTopicName(dto.getTopicName());
|
||||
topicDO.setDescription(dto.getDescription());
|
||||
topicDO.setPeakBytesIn(ValidateUtils.isNull(dto.getPeakBytesIn()) ? -1L : dto.getPeakBytesIn());
|
||||
return topicDO;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,8 @@ public class GatewayConfigDO {
|
||||
|
||||
private Long version;
|
||||
|
||||
private String description;
|
||||
|
||||
private Date createTime;
|
||||
|
||||
private Date modifyTime;
|
||||
@@ -61,6 +63,14 @@ public class GatewayConfigDO {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public void setDescription(String description) {
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
public Date getCreateTime() {
|
||||
return createTime;
|
||||
}
|
||||
@@ -85,6 +95,7 @@ public class GatewayConfigDO {
|
||||
", name='" + name + '\'' +
|
||||
", value='" + value + '\'' +
|
||||
", version=" + version +
|
||||
", description='" + description + '\'' +
|
||||
", createTime=" + createTime +
|
||||
", modifyTime=" + modifyTime +
|
||||
'}';
|
||||
|
||||
@@ -28,7 +28,7 @@ public class ExpiredTopicVO {
|
||||
@ApiModelProperty(value = "负责人")
|
||||
private String principals;
|
||||
|
||||
@ApiModelProperty(value = "状态, -1:可下线, 0:过期待通知, 1+:已通知待反馈")
|
||||
@ApiModelProperty(value = "状态, -1:已通知可下线, 0:过期待通知, 1+:已通知待反馈")
|
||||
private Integer status;
|
||||
|
||||
public Long getClusterId() {
|
||||
|
||||
@@ -26,6 +26,9 @@ public class GatewayConfigVO {
|
||||
@ApiModelProperty(value="版本")
|
||||
private Long version;
|
||||
|
||||
@ApiModelProperty(value="描述说明")
|
||||
private String description;
|
||||
|
||||
@ApiModelProperty(value="创建时间")
|
||||
private Date createTime;
|
||||
|
||||
@@ -72,6 +75,14 @@ public class GatewayConfigVO {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public void setDescription(String description) {
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
public Date getCreateTime() {
|
||||
return createTime;
|
||||
}
|
||||
@@ -96,6 +107,7 @@ public class GatewayConfigVO {
|
||||
", name='" + name + '\'' +
|
||||
", value='" + value + '\'' +
|
||||
", version=" + version +
|
||||
", description='" + description + '\'' +
|
||||
", createTime=" + createTime +
|
||||
", modifyTime=" + modifyTime +
|
||||
'}';
|
||||
|
||||
@@ -60,6 +60,13 @@ public class JsonUtils {
|
||||
return JSON.parseObject(src, clazz);
|
||||
}
|
||||
|
||||
public static <T> List<T> stringToArrObj(String src, Class<T> clazz) {
|
||||
if (ValidateUtils.isBlank(src)) {
|
||||
return null;
|
||||
}
|
||||
return JSON.parseArray(src, clazz);
|
||||
}
|
||||
|
||||
public static List<TopicConnectionDO> parseTopicConnections(Long clusterId, JSONObject jsonObject, long postTime) {
|
||||
List<TopicConnectionDO> connectionDOList = new ArrayList<>();
|
||||
for (String clientType: jsonObject.keySet()) {
|
||||
|
||||
@@ -79,7 +79,7 @@ public class JmxConnectorWrap {
|
||||
try {
|
||||
Map<String, Object> environment = new HashMap<String, Object>();
|
||||
if (!ValidateUtils.isBlank(this.jmxConfig.getUsername()) && !ValidateUtils.isBlank(this.jmxConfig.getPassword())) {
|
||||
environment.put(javax.management.remote.JMXConnector.CREDENTIALS, Arrays.asList(this.jmxConfig.getUsername(), this.jmxConfig.getPassword()));
|
||||
environment.put(JMXConnector.CREDENTIALS, Arrays.asList(this.jmxConfig.getUsername(), this.jmxConfig.getPassword()));
|
||||
}
|
||||
if (jmxConfig.isOpenSSL() != null && this.jmxConfig.isOpenSSL()) {
|
||||
environment.put(Context.SECURITY_PROTOCOL, "ssl");
|
||||
|
||||
@@ -33,7 +33,9 @@ public class ZkPathUtil {
|
||||
|
||||
private static final String D_METRICS_CONFIG_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "KafkaExMetrics";
|
||||
|
||||
public static final String D_CONTROLLER_CANDIDATES = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "extension/candidates";
|
||||
public static final String D_CONFIG_EXTENSION_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "extension";
|
||||
|
||||
public static final String D_CONTROLLER_CANDIDATES = D_CONFIG_EXTENSION_ROOT_NODE + ZOOKEEPER_SEPARATOR + "candidates";
|
||||
|
||||
public static String getBrokerIdNodePath(Integer brokerId) {
|
||||
return BROKER_IDS_ROOT + ZOOKEEPER_SEPARATOR + String.valueOf(brokerId);
|
||||
@@ -111,6 +113,10 @@ public class ZkPathUtil {
|
||||
}
|
||||
|
||||
public static String getKafkaExtraMetricsPath(Integer brokerId) {
|
||||
return D_METRICS_CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + String.valueOf(brokerId);
|
||||
return D_METRICS_CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + brokerId;
|
||||
}
|
||||
|
||||
public static String getControllerCandidatePath(Integer brokerId) {
|
||||
return D_CONTROLLER_CANDIDATES + ZOOKEEPER_SEPARATOR + brokerId;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
package com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
@@ -18,12 +15,11 @@ import java.util.List;
|
||||
* "host":null,
|
||||
* "timestamp":"1546632983233",
|
||||
* "port":-1,
|
||||
* "version":4
|
||||
* "version":4,
|
||||
* "rack": "CY"
|
||||
* }
|
||||
*/
|
||||
public class BrokerMetadata implements Cloneable {
|
||||
private final static Logger LOGGER = LoggerFactory.getLogger(TopicMetadata.class);
|
||||
|
||||
private long clusterId;
|
||||
|
||||
private int brokerId;
|
||||
@@ -43,6 +39,8 @@ public class BrokerMetadata implements Cloneable {
|
||||
|
||||
private long timestamp;
|
||||
|
||||
private String rack;
|
||||
|
||||
public long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
@@ -107,14 +105,12 @@ public class BrokerMetadata implements Cloneable {
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object clone() {
|
||||
try {
|
||||
return super.clone();
|
||||
} catch (CloneNotSupportedException var3) {
|
||||
LOGGER.error("clone BrokerMetadata failed.", var3);
|
||||
}
|
||||
return null;
|
||||
public String getRack() {
|
||||
return rack;
|
||||
}
|
||||
|
||||
public void setRack(String rack) {
|
||||
this.rack = rack;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -128,6 +124,7 @@ public class BrokerMetadata implements Cloneable {
|
||||
", jmxPort=" + jmx_port +
|
||||
", version='" + version + '\'' +
|
||||
", timestamp=" + timestamp +
|
||||
", rack='" + rack + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "mobx-ts-example",
|
||||
"version": "1.0.0",
|
||||
"name": "logi-kafka",
|
||||
"version": "2.3.1",
|
||||
"description": "",
|
||||
"scripts": {
|
||||
"start": "webpack-dev-server",
|
||||
@@ -21,7 +21,7 @@
|
||||
"@types/spark-md5": "^3.0.2",
|
||||
"antd": "^3.26.15",
|
||||
"clean-webpack-plugin": "^3.0.0",
|
||||
"clipboard": "^2.0.6",
|
||||
"clipboard": "2.0.6",
|
||||
"cross-env": "^7.0.2",
|
||||
"css-loader": "^2.1.0",
|
||||
"echarts": "^4.5.0",
|
||||
@@ -56,4 +56,4 @@
|
||||
"dependencies": {
|
||||
"format-to-json": "^1.0.4"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -126,7 +126,7 @@ export class SearchAndFilterContainer extends React.Component<any, ISearchAndFil
|
||||
);
|
||||
}
|
||||
|
||||
public renderSearch(text?: string, placeholder?: string, keyName: string = 'searchKey') {
|
||||
public renderSearch(text?: string, placeholder?: string, keyName: string = 'searchKey',) {
|
||||
const value = this.state[keyName] as string;
|
||||
return (
|
||||
<li className="render-box">
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package com.xiaojukeji.kafka.manager.service.cache;
|
||||
|
||||
import com.alibaba.fastjson.JSONObject;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.factory.KafkaConsumerFactory;
|
||||
import kafka.admin.AdminClient;
|
||||
import org.apache.commons.pool2.impl.GenericObjectPool;
|
||||
@@ -103,6 +103,21 @@ public class KafkaClientPool {
|
||||
}
|
||||
}
|
||||
|
||||
public static void closeKafkaConsumerPool(Long clusterId) {
|
||||
lock.lock();
|
||||
try {
|
||||
GenericObjectPool<KafkaConsumer> objectPool = KAFKA_CONSUMER_POOL.remove(clusterId);
|
||||
if (objectPool == null) {
|
||||
return;
|
||||
}
|
||||
objectPool.close();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("close kafka consumer pool failed, clusterId:{}.", clusterId, e);
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public static KafkaConsumer borrowKafkaConsumerClient(ClusterDO clusterDO) {
|
||||
if (ValidateUtils.isNull(clusterDO)) {
|
||||
return null;
|
||||
@@ -132,7 +147,11 @@ public class KafkaClientPool {
|
||||
if (ValidateUtils.isNull(objectPool)) {
|
||||
return;
|
||||
}
|
||||
objectPool.returnObject(kafkaConsumer);
|
||||
try {
|
||||
objectPool.returnObject(kafkaConsumer);
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("return kafka consumer client failed, clusterId:{}", physicalClusterId, e);
|
||||
}
|
||||
}
|
||||
|
||||
public static AdminClient getAdminClient(Long clusterId) {
|
||||
|
||||
@@ -4,24 +4,23 @@ import com.xiaojukeji.kafka.manager.common.bizenum.KafkaBrokerRoleEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.KafkaConstant;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.KafkaVersion;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConfig;
|
||||
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.BrokerMetadata;
|
||||
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.ControllerData;
|
||||
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
|
||||
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkConfigImpl;
|
||||
import com.xiaojukeji.kafka.manager.dao.ControllerDao;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConnectorWrap;
|
||||
import com.xiaojukeji.kafka.manager.dao.TopicDao;
|
||||
import com.xiaojukeji.kafka.manager.dao.gateway.AuthorityDao;
|
||||
import com.xiaojukeji.kafka.manager.service.service.JmxService;
|
||||
import com.xiaojukeji.kafka.manager.service.utils.ConfigUtils;
|
||||
import com.xiaojukeji.kafka.manager.service.zookeeper.*;
|
||||
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
|
||||
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkConfigImpl;
|
||||
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkPathUtil;
|
||||
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.ControllerData;
|
||||
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.BrokerMetadata;
|
||||
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
|
||||
import com.xiaojukeji.kafka.manager.dao.ControllerDao;
|
||||
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
|
||||
import com.xiaojukeji.kafka.manager.service.service.JmxService;
|
||||
import com.xiaojukeji.kafka.manager.service.zookeeper.BrokerStateListener;
|
||||
import com.xiaojukeji.kafka.manager.service.zookeeper.ControllerStateListener;
|
||||
import com.xiaojukeji.kafka.manager.service.zookeeper.TopicStateListener;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
@@ -49,15 +48,6 @@ public class PhysicalClusterMetadataManager {
|
||||
@Autowired
|
||||
private ClusterService clusterService;
|
||||
|
||||
@Autowired
|
||||
private ConfigUtils configUtils;
|
||||
|
||||
@Autowired
|
||||
private TopicDao topicDao;
|
||||
|
||||
@Autowired
|
||||
private AuthorityDao authorityDao;
|
||||
|
||||
private final static Map<Long, ClusterDO> CLUSTER_MAP = new ConcurrentHashMap<>();
|
||||
|
||||
private final static Map<Long, ControllerData> CONTROLLER_DATA_MAP = new ConcurrentHashMap<>();
|
||||
@@ -133,7 +123,7 @@ public class PhysicalClusterMetadataManager {
|
||||
zkConfig.watchChildren(ZkPathUtil.BROKER_IDS_ROOT, brokerListener);
|
||||
|
||||
//增加Topic监控
|
||||
TopicStateListener topicListener = new TopicStateListener(clusterDO.getId(), zkConfig, topicDao, authorityDao);
|
||||
TopicStateListener topicListener = new TopicStateListener(clusterDO.getId(), zkConfig);
|
||||
topicListener.init();
|
||||
zkConfig.watchChildren(ZkPathUtil.BROKER_TOPICS_ROOT, topicListener);
|
||||
|
||||
@@ -172,8 +162,12 @@ public class PhysicalClusterMetadataManager {
|
||||
CLUSTER_MAP.remove(clusterId);
|
||||
}
|
||||
|
||||
public Set<Long> getClusterIdSet() {
|
||||
return CLUSTER_MAP.keySet();
|
||||
public static Map<Long, ClusterDO> getClusterMap() {
|
||||
return CLUSTER_MAP;
|
||||
}
|
||||
|
||||
public static void updateClusterMap(ClusterDO clusterDO) {
|
||||
CLUSTER_MAP.put(clusterDO.getId(), clusterDO);
|
||||
}
|
||||
|
||||
public static ClusterDO getClusterFromCache(Long clusterId) {
|
||||
|
||||
@@ -51,4 +51,20 @@ public interface ClusterService {
|
||||
* @return void
|
||||
*/
|
||||
Result<List<ControllerPreferredCandidate>> getControllerPreferredCandidates(Long clusterId);
|
||||
|
||||
/**
|
||||
* 增加优先被选举为controller的broker
|
||||
* @param clusterId 集群ID
|
||||
* @param brokerIdList brokerId列表
|
||||
* @return
|
||||
*/
|
||||
Result addControllerPreferredCandidates(Long clusterId, List<Integer> brokerIdList);
|
||||
|
||||
/**
|
||||
* 减少优先被选举为controller的broker
|
||||
* @param clusterId 集群ID
|
||||
* @param brokerIdList brokerId列表
|
||||
* @return
|
||||
*/
|
||||
Result deleteControllerPreferredCandidates(Long clusterId, List<Integer> brokerIdList);
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.rd.RegionDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.RegionDO;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
@@ -22,6 +22,8 @@ import java.util.Map;
|
||||
public interface TopicManagerService {
|
||||
List<TopicDO> listAll();
|
||||
|
||||
List<TopicDO> getByClusterIdFromCache(Long clusterId);
|
||||
|
||||
List<TopicDO> getByClusterId(Long clusterId);
|
||||
|
||||
TopicDO getByTopicName(Long clusterId, String topicName);
|
||||
|
||||
@@ -26,4 +26,20 @@ public interface ZookeeperService {
|
||||
* @return 操作结果
|
||||
*/
|
||||
Result<List<Integer>> getControllerPreferredCandidates(Long clusterId);
|
||||
|
||||
/**
|
||||
* 增加优先被选举为controller的broker
|
||||
* @param clusterId 集群ID
|
||||
* @param brokerId brokerId
|
||||
* @return
|
||||
*/
|
||||
Result addControllerPreferredCandidate(Long clusterId, Integer brokerId);
|
||||
|
||||
/**
|
||||
* 减少优先被选举为controller的broker
|
||||
* @param clusterId 集群ID
|
||||
* @param brokerId brokerId
|
||||
* @return
|
||||
*/
|
||||
Result deleteControllerPreferredCandidate(Long clusterId, Integer brokerId);
|
||||
}
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service.gateway.impl;
|
||||
|
||||
import com.alibaba.fastjson.JSONObject;
|
||||
import com.xiaojukeji.kafka.manager.common.bizenum.ModuleEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.bizenum.OperateEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.bizenum.OperationStatusEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.bizenum.TopicAuthorityEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ao.gateway.TopicQuota;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.OperateRecordDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.AuthorityDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.KafkaAclDO;
|
||||
import com.xiaojukeji.kafka.manager.dao.gateway.AuthorityDao;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ao.gateway.TopicQuota;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.kafka.manager.dao.gateway.AuthorityDao;
|
||||
import com.xiaojukeji.kafka.manager.dao.gateway.KafkaAclDao;
|
||||
import com.xiaojukeji.kafka.manager.service.service.OperateRecordService;
|
||||
import com.xiaojukeji.kafka.manager.service.service.gateway.AuthorityService;
|
||||
@@ -20,10 +21,8 @@ import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* @author zhongyuankai
|
||||
@@ -120,7 +119,7 @@ public class AuthorityServiceImpl implements AuthorityService {
|
||||
operateRecordDO.setModuleId(ModuleEnum.AUTHORITY.getCode());
|
||||
operateRecordDO.setOperateId(OperateEnum.DELETE.getCode());
|
||||
operateRecordDO.setResource(topicName);
|
||||
operateRecordDO.setContent(JSONObject.toJSONString(content));
|
||||
operateRecordDO.setContent(JsonUtils.toJSONString(content));
|
||||
operateRecordDO.setOperator(operator);
|
||||
operateRecordService.insert(operateRecordDO);
|
||||
} catch (Exception e) {
|
||||
@@ -150,7 +149,7 @@ public class AuthorityServiceImpl implements AuthorityService {
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("get authority failed, clusterId:{} topicName:{}.", clusterId, topicName, e);
|
||||
}
|
||||
return null;
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -164,7 +163,11 @@ public class AuthorityServiceImpl implements AuthorityService {
|
||||
if (ValidateUtils.isEmptyList(doList)) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
return doList;
|
||||
|
||||
// 去除掉权限列表中无权限的数据
|
||||
return doList.stream()
|
||||
.filter(authorityDO -> !TopicAuthorityEnum.DENY.getCode().equals(authorityDO.getAccess()))
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -221,13 +221,24 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
|
||||
if (ValidateUtils.isNull(oldGatewayConfigDO)) {
|
||||
return Result.buildFrom(ResultStatus.RESOURCE_NOT_EXIST);
|
||||
}
|
||||
|
||||
if (!oldGatewayConfigDO.getName().equals(newGatewayConfigDO.getName())
|
||||
|| !oldGatewayConfigDO.getType().equals(newGatewayConfigDO.getType())
|
||||
|| ValidateUtils.isBlank(newGatewayConfigDO.getValue())) {
|
||||
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
|
||||
}
|
||||
newGatewayConfigDO.setVersion(oldGatewayConfigDO.getVersion() + 1);
|
||||
if (gatewayConfigDao.updateById(oldGatewayConfigDO) > 0) {
|
||||
|
||||
// 获取当前同类配置, 插入之后需要增大这个version
|
||||
List<GatewayConfigDO> gatewayConfigDOList = gatewayConfigDao.getByConfigType(newGatewayConfigDO.getType());
|
||||
Long version = 1L;
|
||||
for (GatewayConfigDO elem: gatewayConfigDOList) {
|
||||
if (elem.getVersion() > version) {
|
||||
version = elem.getVersion() + 1L;
|
||||
}
|
||||
}
|
||||
|
||||
newGatewayConfigDO.setVersion(version);
|
||||
if (gatewayConfigDao.updateById(newGatewayConfigDO) > 0) {
|
||||
return Result.buildSuc();
|
||||
}
|
||||
return Result.buildFrom(ResultStatus.MYSQL_ERROR);
|
||||
|
||||
@@ -340,10 +340,6 @@ public class AdminServiceImpl implements AdminService {
|
||||
@Override
|
||||
public ResultStatus modifyTopicConfig(ClusterDO clusterDO, String topicName, Properties properties, String operator) {
|
||||
ResultStatus rs = TopicCommands.modifyTopicConfig(clusterDO, topicName, properties);
|
||||
if (!ResultStatus.SUCCESS.equals(rs)) {
|
||||
return rs;
|
||||
}
|
||||
|
||||
return rs;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -111,12 +111,13 @@ public class ClusterServiceImpl implements ClusterService {
|
||||
// 不允许修改zk地址
|
||||
return ResultStatus.CHANGE_ZOOKEEPER_FORBIDDEN;
|
||||
}
|
||||
clusterDO.setStatus(originClusterDO.getStatus());
|
||||
Map<String, String> content = new HashMap<>();
|
||||
content.put("cluster id", clusterDO.getId().toString());
|
||||
content.put("security properties", clusterDO.getSecurityProperties());
|
||||
content.put("jmx properties", clusterDO.getJmxProperties());
|
||||
operateRecordService.insert(operator, ModuleEnum.CLUSTER, clusterDO.getClusterName(), OperateEnum.EDIT, content);
|
||||
|
||||
clusterDO.setStatus(originClusterDO.getStatus());
|
||||
return updateById(clusterDO);
|
||||
}
|
||||
|
||||
@@ -204,21 +205,31 @@ public class ClusterServiceImpl implements ClusterService {
|
||||
}
|
||||
|
||||
private boolean isZookeeperLegal(String zookeeper) {
|
||||
boolean status = false;
|
||||
|
||||
ZooKeeper zk = null;
|
||||
try {
|
||||
zk = new ZooKeeper(zookeeper, 1000, null);
|
||||
} catch (Throwable t) {
|
||||
return false;
|
||||
for (int i = 0; i < 15; ++i) {
|
||||
if (zk.getState().isConnected()) {
|
||||
// 只有状态是connected的时候,才表示地址是合法的
|
||||
status = true;
|
||||
break;
|
||||
}
|
||||
Thread.sleep(1000);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("class=ClusterServiceImpl||method=isZookeeperLegal||zookeeper={}||msg=zk address illegal||errMsg={}", zookeeper, e.getMessage());
|
||||
} finally {
|
||||
try {
|
||||
if (zk != null) {
|
||||
zk.close();
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
return false;
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("class=ClusterServiceImpl||method=isZookeeperLegal||zookeeper={}||msg=close zk client failed||errMsg={}", zookeeper, e.getMessage());
|
||||
}
|
||||
}
|
||||
return true;
|
||||
return status;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -275,7 +286,7 @@ public class ClusterServiceImpl implements ClusterService {
|
||||
try {
|
||||
Map<String, String> content = new HashMap<>();
|
||||
content.put("cluster id", clusterId.toString());
|
||||
operateRecordService.insert(operator, ModuleEnum.CLUSTER, getClusterName(clusterId).getPhysicalClusterName(), OperateEnum.DELETE, content);
|
||||
operateRecordService.insert(operator, ModuleEnum.CLUSTER, String.valueOf(clusterId), OperateEnum.DELETE, content);
|
||||
if (clusterDao.deleteById(clusterId) <= 0) {
|
||||
LOGGER.error("delete cluster failed, clusterId:{}.", clusterId);
|
||||
return ResultStatus.MYSQL_ERROR;
|
||||
@@ -289,8 +300,9 @@ public class ClusterServiceImpl implements ClusterService {
|
||||
|
||||
private ClusterDetailDTO getClusterDetailDTO(ClusterDO clusterDO, Boolean needDetail) {
|
||||
if (ValidateUtils.isNull(clusterDO)) {
|
||||
return null;
|
||||
return new ClusterDetailDTO();
|
||||
}
|
||||
|
||||
ClusterDetailDTO dto = new ClusterDetailDTO();
|
||||
dto.setClusterId(clusterDO.getId());
|
||||
dto.setClusterName(clusterDO.getClusterName());
|
||||
@@ -299,6 +311,7 @@ public class ClusterServiceImpl implements ClusterService {
|
||||
dto.setKafkaVersion(physicalClusterMetadataManager.getKafkaVersionFromCache(clusterDO.getId()));
|
||||
dto.setIdc(configUtils.getIdc());
|
||||
dto.setSecurityProperties(clusterDO.getSecurityProperties());
|
||||
dto.setJmxProperties(clusterDO.getJmxProperties());
|
||||
dto.setStatus(clusterDO.getStatus());
|
||||
dto.setGmtCreate(clusterDO.getGmtCreate());
|
||||
dto.setGmtModify(clusterDO.getGmtModify());
|
||||
@@ -337,4 +350,39 @@ public class ClusterServiceImpl implements ClusterService {
|
||||
}
|
||||
return Result.buildSuc(controllerPreferredCandidateList);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result addControllerPreferredCandidates(Long clusterId, List<Integer> brokerIdList) {
|
||||
if (ValidateUtils.isNull(clusterId) || ValidateUtils.isEmptyList(brokerIdList)) {
|
||||
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
|
||||
}
|
||||
|
||||
// 增加的BrokerId需要判断是否存活
|
||||
for (Integer brokerId: brokerIdList) {
|
||||
if (!PhysicalClusterMetadataManager.isBrokerAlive(clusterId, brokerId)) {
|
||||
return Result.buildFrom(ResultStatus.BROKER_NOT_EXIST);
|
||||
}
|
||||
|
||||
Result result = zookeeperService.addControllerPreferredCandidate(clusterId, brokerId);
|
||||
if (result.failed()) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
return Result.buildSuc();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result deleteControllerPreferredCandidates(Long clusterId, List<Integer> brokerIdList) {
|
||||
if (ValidateUtils.isNull(clusterId) || ValidateUtils.isEmptyList(brokerIdList)) {
|
||||
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
|
||||
}
|
||||
|
||||
for (Integer brokerId: brokerIdList) {
|
||||
Result result = zookeeperService.deleteControllerPreferredCandidate(clusterId, brokerId);
|
||||
if (result.failed()) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
return Result.buildSuc();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumeDetailDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroup;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupSummary;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
|
||||
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ao.PartitionOffsetDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.exception.ConfigException;
|
||||
@@ -129,7 +128,7 @@ public class ConsumerServiceImpl implements ConsumerService {
|
||||
}
|
||||
summary.setState(consumerGroupSummary.state());
|
||||
|
||||
java.util.Iterator<scala.collection.immutable.List<AdminClient.ConsumerSummary>> it = JavaConversions.asJavaIterator(consumerGroupSummary.consumers().iterator());
|
||||
Iterator<scala.collection.immutable.List<AdminClient.ConsumerSummary>> it = JavaConversions.asJavaIterator(consumerGroupSummary.consumers().iterator());
|
||||
while (it.hasNext()) {
|
||||
List<AdminClient.ConsumerSummary> consumerSummaryList = JavaConversions.asJavaList(it.next());
|
||||
for (AdminClient.ConsumerSummary consumerSummary: consumerSummaryList) {
|
||||
|
||||
@@ -95,6 +95,14 @@ public class TopicManagerServiceImpl implements TopicManagerService {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TopicDO> getByClusterIdFromCache(Long clusterId) {
|
||||
if (clusterId == null) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
return topicDao.getByClusterIdFromCache(clusterId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TopicDO> getByClusterId(Long clusterId) {
|
||||
if (clusterId == null) {
|
||||
|
||||
@@ -381,7 +381,7 @@ public class TopicServiceImpl implements TopicService {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
|
||||
List<TopicDO> topicDOList = topicManagerService.getByClusterId(clusterId);
|
||||
List<TopicDO> topicDOList = topicManagerService.getByClusterIdFromCache(clusterId);
|
||||
if (ValidateUtils.isNull(topicDOList)) {
|
||||
topicDOList = new ArrayList<>();
|
||||
}
|
||||
|
||||
@@ -70,4 +70,58 @@ public class ZookeeperServiceImpl implements ZookeeperService {
|
||||
}
|
||||
return Result.buildFrom(ResultStatus.ZOOKEEPER_READ_FAILED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result addControllerPreferredCandidate(Long clusterId, Integer brokerId) {
|
||||
if (ValidateUtils.isNull(clusterId)) {
|
||||
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
|
||||
}
|
||||
ZkConfigImpl zkConfig = PhysicalClusterMetadataManager.getZKConfig(clusterId);
|
||||
if (ValidateUtils.isNull(zkConfig)) {
|
||||
return Result.buildFrom(ResultStatus.ZOOKEEPER_CONNECT_FAILED);
|
||||
}
|
||||
|
||||
try {
|
||||
if (zkConfig.checkPathExists(ZkPathUtil.getControllerCandidatePath(brokerId))) {
|
||||
// 节点已经存在, 则直接忽略
|
||||
return Result.buildSuc();
|
||||
}
|
||||
|
||||
if (!zkConfig.checkPathExists(ZkPathUtil.D_CONFIG_EXTENSION_ROOT_NODE)) {
|
||||
zkConfig.setOrCreatePersistentNodeStat(ZkPathUtil.D_CONFIG_EXTENSION_ROOT_NODE, "");
|
||||
}
|
||||
|
||||
if (!zkConfig.checkPathExists(ZkPathUtil.D_CONTROLLER_CANDIDATES)) {
|
||||
zkConfig.setOrCreatePersistentNodeStat(ZkPathUtil.D_CONTROLLER_CANDIDATES, "");
|
||||
}
|
||||
|
||||
zkConfig.setOrCreatePersistentNodeStat(ZkPathUtil.getControllerCandidatePath(brokerId), "");
|
||||
return Result.buildSuc();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("class=ZookeeperServiceImpl||method=addControllerPreferredCandidate||clusterId={}||brokerId={}||errMsg={}||", clusterId, brokerId, e.getMessage());
|
||||
}
|
||||
return Result.buildFrom(ResultStatus.ZOOKEEPER_WRITE_FAILED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result deleteControllerPreferredCandidate(Long clusterId, Integer brokerId) {
|
||||
if (ValidateUtils.isNull(clusterId)) {
|
||||
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
|
||||
}
|
||||
ZkConfigImpl zkConfig = PhysicalClusterMetadataManager.getZKConfig(clusterId);
|
||||
if (ValidateUtils.isNull(zkConfig)) {
|
||||
return Result.buildFrom(ResultStatus.ZOOKEEPER_CONNECT_FAILED);
|
||||
}
|
||||
|
||||
try {
|
||||
if (!zkConfig.checkPathExists(ZkPathUtil.getControllerCandidatePath(brokerId))) {
|
||||
return Result.buildSuc();
|
||||
}
|
||||
zkConfig.delete(ZkPathUtil.getControllerCandidatePath(brokerId));
|
||||
return Result.buildSuc();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("class=ZookeeperServiceImpl||method=deleteControllerPreferredCandidate||clusterId={}||brokerId={}||errMsg={}||", clusterId, brokerId, e.getMessage());
|
||||
}
|
||||
return Result.buildFrom(ResultStatus.ZOOKEEPER_DELETE_FAILED);
|
||||
}
|
||||
}
|
||||
@@ -44,7 +44,7 @@ public class TopicCommands {
|
||||
);
|
||||
|
||||
// 生成分配策略
|
||||
scala.collection.Map<Object, scala.collection.Seq<Object>> replicaAssignment =
|
||||
scala.collection.Map<Object, Seq<Object>> replicaAssignment =
|
||||
AdminUtils.assignReplicasToBrokers(
|
||||
convert2BrokerMetadataSeq(brokerIdList),
|
||||
partitionNum,
|
||||
@@ -177,7 +177,7 @@ public class TopicCommands {
|
||||
)
|
||||
);
|
||||
|
||||
Map<TopicAndPartition, scala.collection.Seq<Object>> existingAssignJavaMap =
|
||||
Map<TopicAndPartition, Seq<Object>> existingAssignJavaMap =
|
||||
JavaConversions.asJavaMap(existingAssignScalaMap);
|
||||
// 新增分区的分配策略和旧的分配策略合并
|
||||
Map<Object, Seq<Object>> targetMap = new HashMap<>();
|
||||
|
||||
@@ -5,8 +5,6 @@ import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata
|
||||
import com.xiaojukeji.kafka.manager.common.zookeeper.StateChangeListener;
|
||||
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkConfigImpl;
|
||||
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkPathUtil;
|
||||
import com.xiaojukeji.kafka.manager.dao.TopicDao;
|
||||
import com.xiaojukeji.kafka.manager.dao.gateway.AuthorityDao;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ThreadPool;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
@@ -24,28 +22,17 @@ import java.util.concurrent.*;
|
||||
* @date 20/5/14
|
||||
*/
|
||||
public class TopicStateListener implements StateChangeListener {
|
||||
private final static Logger LOGGER = LoggerFactory.getLogger(TopicStateListener.class);
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(TopicStateListener.class);
|
||||
|
||||
private Long clusterId;
|
||||
|
||||
private ZkConfigImpl zkConfig;
|
||||
|
||||
private TopicDao topicDao;
|
||||
|
||||
private AuthorityDao authorityDao;
|
||||
|
||||
public TopicStateListener(Long clusterId, ZkConfigImpl zkConfig) {
|
||||
this.clusterId = clusterId;
|
||||
this.zkConfig = zkConfig;
|
||||
}
|
||||
|
||||
public TopicStateListener(Long clusterId, ZkConfigImpl zkConfig, TopicDao topicDao, AuthorityDao authorityDao) {
|
||||
this.clusterId = clusterId;
|
||||
this.zkConfig = zkConfig;
|
||||
this.topicDao = topicDao;
|
||||
this.authorityDao = authorityDao;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init() {
|
||||
try {
|
||||
@@ -53,7 +40,7 @@ public class TopicStateListener implements StateChangeListener {
|
||||
FutureTask[] taskList = new FutureTask[topicNameList.size()];
|
||||
for (int i = 0; i < topicNameList.size(); i++) {
|
||||
String topicName = topicNameList.get(i);
|
||||
taskList[i] = new FutureTask(new Callable() {
|
||||
taskList[i] = new FutureTask(new Callable<Object>() {
|
||||
@Override
|
||||
public Object call() throws Exception {
|
||||
processTopicAdded(topicName);
|
||||
@@ -65,7 +52,6 @@ public class TopicStateListener implements StateChangeListener {
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("init topics metadata failed, clusterId:{}.", clusterId, e);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -92,8 +78,6 @@ public class TopicStateListener implements StateChangeListener {
|
||||
private void processTopicDelete(String topicName) {
|
||||
LOGGER.warn("delete topic, clusterId:{} topicName:{}.", clusterId, topicName);
|
||||
PhysicalClusterMetadataManager.removeTopicMetadata(clusterId, topicName);
|
||||
topicDao.removeTopicInCache(clusterId, topicName);
|
||||
authorityDao.removeAuthorityInCache(clusterId, topicName);
|
||||
}
|
||||
|
||||
private void processTopicAdded(String topicName) {
|
||||
@@ -122,4 +106,4 @@ public class TopicStateListener implements StateChangeListener {
|
||||
LOGGER.error("add topic failed, clusterId:{} topicMetadata:{}.", clusterId, topicMetadata, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,8 @@ public interface TopicDao {
|
||||
|
||||
TopicDO getByTopicName(Long clusterId, String topicName);
|
||||
|
||||
List<TopicDO> getByClusterIdFromCache(Long clusterId);
|
||||
|
||||
List<TopicDO> getByClusterId(Long clusterId);
|
||||
|
||||
List<TopicDO> getByAppId(String appId);
|
||||
@@ -22,6 +24,4 @@ public interface TopicDao {
|
||||
List<TopicDO> listAll();
|
||||
|
||||
TopicDO getTopic(Long clusterId, String topicName, String appId);
|
||||
|
||||
TopicDO removeTopicInCache(Long clusterId, String topicName);
|
||||
}
|
||||
@@ -16,8 +16,6 @@ public interface AppDao {
|
||||
*/
|
||||
int insert(AppDO appDO);
|
||||
|
||||
int insertIgnoreGatewayDB(AppDO appDO);
|
||||
|
||||
/**
|
||||
* 删除appId
|
||||
* @param appName App名称
|
||||
@@ -60,6 +58,4 @@ public interface AppDao {
|
||||
* @return int
|
||||
*/
|
||||
int updateById(AppDO appDO);
|
||||
|
||||
List<AppDO> listNewAll();
|
||||
}
|
||||
@@ -15,8 +15,6 @@ public interface AuthorityDao {
|
||||
*/
|
||||
int insert(AuthorityDO authorityDO);
|
||||
|
||||
int replaceIgnoreGatewayDB(AuthorityDO authorityDO);
|
||||
|
||||
/**
|
||||
* 获取权限
|
||||
* @param clusterId 集群id
|
||||
@@ -38,7 +36,5 @@ public interface AuthorityDao {
|
||||
|
||||
Map<String, Map<Long, Map<String, AuthorityDO>>> getAllAuthority();
|
||||
|
||||
void removeAuthorityInCache(Long clusterId, String topicName);
|
||||
|
||||
int deleteAuthorityByTopic(Long clusterId, String topicName);
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package com.xiaojukeji.kafka.manager.dao.gateway.impl;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.AppDO;
|
||||
import com.xiaojukeji.kafka.manager.dao.gateway.AppDao;
|
||||
import com.xiaojukeji.kafka.manager.task.Constant;
|
||||
import org.mybatis.spring.SqlSessionTemplate;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Repository;
|
||||
@@ -21,7 +22,7 @@ public class AppDaoImpl implements AppDao {
|
||||
/**
|
||||
* APP最近的一次更新时间, 更新之后的缓存
|
||||
*/
|
||||
private static Long APP_CACHE_LATEST_UPDATE_TIME = 0L;
|
||||
private static volatile long APP_CACHE_LATEST_UPDATE_TIME = Constant.START_TIMESTAMP;
|
||||
private static final Map<String, AppDO> APP_MAP = new ConcurrentHashMap<>();
|
||||
|
||||
@Override
|
||||
@@ -29,11 +30,6 @@ public class AppDaoImpl implements AppDao {
|
||||
return sqlSession.insert("AppDao.insert", appDO);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int insertIgnoreGatewayDB(AppDO appDO) {
|
||||
return sqlSession.insert("AppDao.insert", appDO);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int deleteByName(String appName) {
|
||||
return sqlSession.delete("AppDao.deleteByName", appName);
|
||||
@@ -66,7 +62,12 @@ public class AppDaoImpl implements AppDao {
|
||||
}
|
||||
|
||||
private void updateTopicCache() {
|
||||
Long timestamp = System.currentTimeMillis();
|
||||
long timestamp = System.currentTimeMillis();
|
||||
|
||||
if (timestamp + 1000 <= APP_CACHE_LATEST_UPDATE_TIME) {
|
||||
// 近一秒内的请求不走db
|
||||
return;
|
||||
}
|
||||
|
||||
Date afterTime = new Date(APP_CACHE_LATEST_UPDATE_TIME);
|
||||
List<AppDO> doList = sqlSession.selectList("AppDao.listAfterTime", afterTime);
|
||||
@@ -76,19 +77,23 @@ public class AppDaoImpl implements AppDao {
|
||||
/**
|
||||
* 更新APP缓存
|
||||
*/
|
||||
synchronized private void updateTopicCache(List<AppDO> doList, Long timestamp) {
|
||||
private synchronized void updateTopicCache(List<AppDO> doList, long timestamp) {
|
||||
if (APP_CACHE_LATEST_UPDATE_TIME == Constant.START_TIMESTAMP) {
|
||||
APP_MAP.clear();
|
||||
}
|
||||
|
||||
if (doList == null || doList.isEmpty() || APP_CACHE_LATEST_UPDATE_TIME >= timestamp) {
|
||||
// 本次无数据更新, 或者本次更新过时 时, 忽略本次更新
|
||||
return;
|
||||
}
|
||||
|
||||
for (AppDO elem: doList) {
|
||||
APP_MAP.put(elem.getAppId(), elem);
|
||||
}
|
||||
APP_CACHE_LATEST_UPDATE_TIME = timestamp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<AppDO> listNewAll() {
|
||||
return sqlSession.selectList("AppDao.listNewAll");
|
||||
public static void resetCache() {
|
||||
APP_CACHE_LATEST_UPDATE_TIME = Constant.START_TIMESTAMP;
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
package com.xiaojukeji.kafka.manager.dao.gateway.impl;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.AuthorityDO;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.kafka.manager.dao.gateway.AuthorityDao;
|
||||
import com.xiaojukeji.kafka.manager.task.Constant;
|
||||
import org.mybatis.spring.SqlSessionTemplate;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Repository;
|
||||
@@ -23,7 +23,8 @@ public class AuthorityDaoImpl implements AuthorityDao {
|
||||
* Authority最近的一次更新时间, 更新之后的缓存
|
||||
* <AppID, <clusterId, <TopicName, AuthorityDO>>>
|
||||
*/
|
||||
private static Long AUTHORITY_CACHE_LATEST_UPDATE_TIME = 0L;
|
||||
private static volatile long AUTHORITY_CACHE_LATEST_UPDATE_TIME = Constant.START_TIMESTAMP;
|
||||
|
||||
private static final Map<String, Map<Long, Map<String, AuthorityDO>>> AUTHORITY_MAP = new ConcurrentHashMap<>();
|
||||
|
||||
@Override
|
||||
@@ -31,11 +32,6 @@ public class AuthorityDaoImpl implements AuthorityDao {
|
||||
return sqlSession.insert("AuthorityDao.replace", authorityDO);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int replaceIgnoreGatewayDB(AuthorityDO authorityDO) {
|
||||
return sqlSession.insert("AuthorityDao.replace", authorityDO);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<AuthorityDO> getAuthority(Long clusterId, String topicName, String appId) {
|
||||
Map<String, Object> params = new HashMap<>(3);
|
||||
@@ -62,8 +58,8 @@ public class AuthorityDaoImpl implements AuthorityDao {
|
||||
}
|
||||
|
||||
List<AuthorityDO> authorityDOList = new ArrayList<>();
|
||||
for (Long clusterId: doMap.keySet()) {
|
||||
authorityDOList.addAll(doMap.get(clusterId).values());
|
||||
for (Map.Entry<Long, Map<String, AuthorityDO>> entry: doMap.entrySet()) {
|
||||
authorityDOList.addAll(entry.getValue().values());
|
||||
}
|
||||
return authorityDOList;
|
||||
}
|
||||
@@ -87,23 +83,6 @@ public class AuthorityDaoImpl implements AuthorityDao {
|
||||
return AUTHORITY_MAP;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAuthorityInCache(Long clusterId, String topicName) {
|
||||
AUTHORITY_MAP.forEach((appId, map) -> {
|
||||
map.forEach((id, subMap) -> {
|
||||
if (id.equals(clusterId)) {
|
||||
subMap.remove(topicName);
|
||||
if (subMap.isEmpty()) {
|
||||
map.remove(id);
|
||||
}
|
||||
}
|
||||
});
|
||||
if (map.isEmpty()) {
|
||||
AUTHORITY_MAP.remove(appId);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public int deleteAuthorityByTopic(Long clusterId, String topicName) {
|
||||
Map<String, Object> params = new HashMap<>(2);
|
||||
@@ -114,7 +93,12 @@ public class AuthorityDaoImpl implements AuthorityDao {
|
||||
|
||||
|
||||
private void updateAuthorityCache() {
|
||||
Long timestamp = System.currentTimeMillis();
|
||||
long timestamp = System.currentTimeMillis();
|
||||
|
||||
if (timestamp + 1000 <= AUTHORITY_CACHE_LATEST_UPDATE_TIME) {
|
||||
// 近一秒内的请求不走db
|
||||
return;
|
||||
}
|
||||
|
||||
Date afterTime = new Date(AUTHORITY_CACHE_LATEST_UPDATE_TIME);
|
||||
List<AuthorityDO> doList = sqlSession.selectList("AuthorityDao.listAfterTime", afterTime);
|
||||
@@ -124,11 +108,16 @@ public class AuthorityDaoImpl implements AuthorityDao {
|
||||
/**
|
||||
* 更新Topic缓存
|
||||
*/
|
||||
synchronized private void updateAuthorityCache(List<AuthorityDO> doList, Long timestamp) {
|
||||
private synchronized void updateAuthorityCache(List<AuthorityDO> doList, Long timestamp) {
|
||||
if (AUTHORITY_CACHE_LATEST_UPDATE_TIME == Constant.START_TIMESTAMP) {
|
||||
AUTHORITY_MAP.clear();
|
||||
}
|
||||
|
||||
if (doList == null || doList.isEmpty() || AUTHORITY_CACHE_LATEST_UPDATE_TIME >= timestamp) {
|
||||
// 本次无数据更新, 或者本次更新过时 时, 忽略本次更新
|
||||
return;
|
||||
}
|
||||
|
||||
for (AuthorityDO elem: doList) {
|
||||
Map<Long, Map<String, AuthorityDO>> doMap =
|
||||
AUTHORITY_MAP.getOrDefault(elem.getAppId(), new ConcurrentHashMap<>());
|
||||
@@ -139,4 +128,8 @@ public class AuthorityDaoImpl implements AuthorityDao {
|
||||
}
|
||||
AUTHORITY_CACHE_LATEST_UPDATE_TIME = timestamp;
|
||||
}
|
||||
|
||||
public static void resetCache() {
|
||||
AUTHORITY_CACHE_LATEST_UPDATE_TIME = Constant.START_TIMESTAMP;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package com.xiaojukeji.kafka.manager.dao.impl;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.TopicDO;
|
||||
import com.xiaojukeji.kafka.manager.dao.TopicDao;
|
||||
import com.xiaojukeji.kafka.manager.task.Constant;
|
||||
import org.mybatis.spring.SqlSessionTemplate;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Repository;
|
||||
@@ -18,7 +19,8 @@ public class TopicDaoImpl implements TopicDao {
|
||||
/**
|
||||
* Topic最近的一次更新时间, 更新之后的缓存
|
||||
*/
|
||||
private static Long TOPIC_CACHE_LATEST_UPDATE_TIME = 0L;
|
||||
private static volatile long TOPIC_CACHE_LATEST_UPDATE_TIME = Constant.START_TIMESTAMP;
|
||||
|
||||
private static final Map<Long, Map<String, TopicDO>> TOPIC_MAP = new ConcurrentHashMap<>();
|
||||
|
||||
@Autowired
|
||||
@@ -60,9 +62,14 @@ public class TopicDaoImpl implements TopicDao {
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TopicDO> getByClusterId(Long clusterId) {
|
||||
public List<TopicDO> getByClusterIdFromCache(Long clusterId) {
|
||||
updateTopicCache();
|
||||
return new ArrayList<>(TOPIC_MAP.getOrDefault(clusterId, new ConcurrentHashMap<>(0)).values());
|
||||
return new ArrayList<>(TOPIC_MAP.getOrDefault(clusterId, Collections.emptyMap()).values());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TopicDO> getByClusterId(Long clusterId) {
|
||||
return sqlSession.selectList("TopicDao.getByClusterId", clusterId);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -75,27 +82,27 @@ public class TopicDaoImpl implements TopicDao {
|
||||
updateTopicCache();
|
||||
List<TopicDO> doList = new ArrayList<>();
|
||||
for (Long clusterId: TOPIC_MAP.keySet()) {
|
||||
doList.addAll(TOPIC_MAP.getOrDefault(clusterId, new ConcurrentHashMap<>(0)).values());
|
||||
doList.addAll(TOPIC_MAP.getOrDefault(clusterId, Collections.emptyMap()).values());
|
||||
}
|
||||
return doList;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TopicDO getTopic(Long clusterId, String topicName, String appId) {
|
||||
Map<String, Object> params = new HashMap<>(2);
|
||||
Map<String, Object> params = new HashMap<>(3);
|
||||
params.put("clusterId", clusterId);
|
||||
params.put("topicName", topicName);
|
||||
params.put("appId", appId);
|
||||
return sqlSession.selectOne("TopicDao.getTopic", params);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TopicDO removeTopicInCache(Long clusterId, String topicName) {
|
||||
return TOPIC_MAP.getOrDefault(clusterId, new HashMap<>(0)).remove(topicName);
|
||||
}
|
||||
|
||||
private void updateTopicCache() {
|
||||
Long timestamp = System.currentTimeMillis();
|
||||
long timestamp = System.currentTimeMillis();
|
||||
|
||||
if (timestamp + 1000 <= TOPIC_CACHE_LATEST_UPDATE_TIME) {
|
||||
// 近一秒内的请求不走db
|
||||
return;
|
||||
}
|
||||
|
||||
Date afterTime = new Date(TOPIC_CACHE_LATEST_UPDATE_TIME);
|
||||
List<TopicDO> doList = sqlSession.selectList("TopicDao.listAfterTime", afterTime);
|
||||
@@ -105,11 +112,16 @@ public class TopicDaoImpl implements TopicDao {
|
||||
/**
|
||||
* 更新Topic缓存
|
||||
*/
|
||||
synchronized private void updateTopicCache(List<TopicDO> doList, Long timestamp) {
|
||||
private synchronized void updateTopicCache(List<TopicDO> doList, Long timestamp) {
|
||||
if (TOPIC_CACHE_LATEST_UPDATE_TIME == Constant.START_TIMESTAMP) {
|
||||
TOPIC_MAP.clear();
|
||||
}
|
||||
|
||||
if (doList == null || doList.isEmpty() || TOPIC_CACHE_LATEST_UPDATE_TIME >= timestamp) {
|
||||
// 本次无数据更新, 或者本次更新过时 时, 忽略本次更新
|
||||
return;
|
||||
}
|
||||
|
||||
for (TopicDO elem: doList) {
|
||||
Map<String, TopicDO> doMap = TOPIC_MAP.getOrDefault(elem.getClusterId(), new ConcurrentHashMap<>());
|
||||
doMap.put(elem.getTopicName(), elem);
|
||||
@@ -117,4 +129,8 @@ public class TopicDaoImpl implements TopicDao {
|
||||
}
|
||||
TOPIC_CACHE_LATEST_UPDATE_TIME = timestamp;
|
||||
}
|
||||
|
||||
public static void resetCache() {
|
||||
TOPIC_CACHE_LATEST_UPDATE_TIME = Constant.START_TIMESTAMP;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
package com.xiaojukeji.kafka.manager.task;
|
||||
|
||||
public class Constant {
|
||||
public static final long START_TIMESTAMP = 0;
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
package com.xiaojukeji.kafka.manager.task;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.utils.factory.DefaultThreadFactory;
|
||||
import com.xiaojukeji.kafka.manager.dao.gateway.impl.AppDaoImpl;
|
||||
import com.xiaojukeji.kafka.manager.dao.gateway.impl.AuthorityDaoImpl;
|
||||
import com.xiaojukeji.kafka.manager.dao.impl.TopicDaoImpl;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* 后台任务线程
|
||||
* @author zengqiao
|
||||
* @date 21/02/02
|
||||
*/
|
||||
@Service
|
||||
public class DaoBackgroundTask {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(DaoBackgroundTask.class);
|
||||
|
||||
private static final ScheduledExecutorService SYNC_CACHE_THREAD_POOL = Executors.newSingleThreadScheduledExecutor(new DefaultThreadFactory("syncCacheTask"));
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
SYNC_CACHE_THREAD_POOL.scheduleAtFixedRate(() -> {
|
||||
LOGGER.info("class=DaoBackgroundTask||method=init||msg=sync cache start");
|
||||
|
||||
TopicDaoImpl.resetCache();
|
||||
|
||||
AppDaoImpl.resetCache();
|
||||
|
||||
AuthorityDaoImpl.resetCache();
|
||||
|
||||
LOGGER.info("class=DaoBackgroundTask||method=init||msg=sync cache finished");
|
||||
}, 1, 10, TimeUnit.MINUTES);
|
||||
}
|
||||
}
|
||||
@@ -8,6 +8,7 @@
|
||||
<result column="name" property="name" />
|
||||
<result column="value" property="value" />
|
||||
<result column="version" property="version" />
|
||||
<result column="description" property="description" />
|
||||
<result column="create_time" property="createTime" />
|
||||
<result column="modify_time" property="modifyTime" />
|
||||
</resultMap>
|
||||
@@ -27,9 +28,9 @@
|
||||
<insert id="insert" parameterType="com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.GatewayConfigDO">
|
||||
<![CDATA[
|
||||
INSERT INTO gateway_config
|
||||
(`type`, name, value, version)
|
||||
(`type`, name, value, version, description)
|
||||
VALUES
|
||||
(#{type}, #{name}, #{value}, #{version})
|
||||
(#{type}, #{name}, #{value}, #{version}, #{description})
|
||||
]]>
|
||||
</insert>
|
||||
|
||||
@@ -45,7 +46,8 @@
|
||||
`type`=#{type},
|
||||
`name`=#{name},
|
||||
`value`=#{value},
|
||||
`version`=#{version}
|
||||
`version`=#{version},
|
||||
`description`=#{description}
|
||||
WHERE id=#{id}
|
||||
]]>
|
||||
</update>
|
||||
|
||||
@@ -16,5 +16,5 @@ public interface LoginService {
|
||||
|
||||
void logout(HttpServletRequest request, HttpServletResponse response, Boolean needJump2LoginPage);
|
||||
|
||||
boolean checkLogin(HttpServletRequest request, HttpServletResponse response);
|
||||
boolean checkLogin(HttpServletRequest request, HttpServletResponse response, String classRequestMappingValue);
|
||||
}
|
||||
@@ -0,0 +1,130 @@
|
||||
package com.xiaojukeji.kafka.manager.account.component.ldap;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.naming.AuthenticationException;
|
||||
import javax.naming.Context;
|
||||
import javax.naming.NamingEnumeration;
|
||||
import javax.naming.NamingException;
|
||||
import javax.naming.directory.SearchControls;
|
||||
import javax.naming.directory.SearchResult;
|
||||
import javax.naming.ldap.InitialLdapContext;
|
||||
import javax.naming.ldap.LdapContext;
|
||||
import java.util.Hashtable;
|
||||
|
||||
@Component
|
||||
public class LdapAuthentication {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(LdapAuthentication.class);
|
||||
|
||||
@Value(value = "${account.ldap.url:}")
|
||||
private String ldapUrl;
|
||||
|
||||
@Value(value = "${account.ldap.basedn:}")
|
||||
private String ldapBasedn;
|
||||
|
||||
@Value(value = "${account.ldap.factory:}")
|
||||
private String ldapFactory;
|
||||
|
||||
@Value(value = "${account.ldap.filter:}")
|
||||
private String ldapFilter;
|
||||
|
||||
@Value(value = "${account.ldap.security.authentication:}")
|
||||
private String securityAuthentication;
|
||||
|
||||
@Value(value = "${account.ldap.security.principal:}")
|
||||
private String securityPrincipal;
|
||||
|
||||
@Value(value = "${account.ldap.security.credentials:}")
|
||||
private String securityCredentials;
|
||||
|
||||
private LdapContext getLdapContext() {
|
||||
Hashtable<String, String> env = new Hashtable<String, String>();
|
||||
env.put(Context.INITIAL_CONTEXT_FACTORY, ldapFactory);
|
||||
env.put(Context.PROVIDER_URL, ldapUrl + ldapBasedn);
|
||||
env.put(Context.SECURITY_AUTHENTICATION, securityAuthentication);
|
||||
|
||||
// 此处若不指定用户名和密码,则自动转换为匿名登录
|
||||
env.put(Context.SECURITY_PRINCIPAL, securityPrincipal);
|
||||
env.put(Context.SECURITY_CREDENTIALS, securityCredentials);
|
||||
try {
|
||||
return new InitialLdapContext(env, null);
|
||||
} catch (AuthenticationException e) {
|
||||
LOGGER.warn("class=LdapAuthentication||method=getLdapContext||errMsg={}", e);
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("class=LdapAuthentication||method=getLdapContext||errMsg={}", e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private String getUserDN(String account, LdapContext ctx) {
|
||||
String userDN = "";
|
||||
try {
|
||||
SearchControls constraints = new SearchControls();
|
||||
constraints.setSearchScope(SearchControls.SUBTREE_SCOPE);
|
||||
String filter = "(&(objectClass=*)("+ldapFilter+"=" + account + "))";
|
||||
|
||||
NamingEnumeration<SearchResult> en = ctx.search("", filter, constraints);
|
||||
if (en == null || !en.hasMoreElements()) {
|
||||
return "";
|
||||
}
|
||||
// maybe more than one element
|
||||
while (en.hasMoreElements()) {
|
||||
Object obj = en.nextElement();
|
||||
if (obj instanceof SearchResult) {
|
||||
SearchResult si = (SearchResult) obj;
|
||||
userDN += si.getName();
|
||||
userDN += "," + ldapBasedn;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("class=LdapAuthentication||method=getUserDN||account={}||errMsg={}", account, e);
|
||||
}
|
||||
return userDN;
|
||||
}
|
||||
|
||||
/**
|
||||
* LDAP账密验证
|
||||
* @param account
|
||||
* @param password
|
||||
* @return
|
||||
*/
|
||||
public boolean authenticate(String account, String password) {
|
||||
LdapContext ctx = getLdapContext();
|
||||
if (ValidateUtils.isNull(ctx)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
String userDN = getUserDN(account, ctx);
|
||||
if(ValidateUtils.isBlank(userDN)){
|
||||
return false;
|
||||
}
|
||||
|
||||
ctx.addToEnvironment(Context.SECURITY_PRINCIPAL, userDN);
|
||||
ctx.addToEnvironment(Context.SECURITY_CREDENTIALS, password);
|
||||
ctx.reconnect(null);
|
||||
|
||||
return true;
|
||||
} catch (AuthenticationException e) {
|
||||
LOGGER.warn("class=LdapAuthentication||method=authenticate||account={}||errMsg={}", account, e);
|
||||
} catch (NamingException e) {
|
||||
LOGGER.warn("class=LdapAuthentication||method=authenticate||account={}||errMsg={}", account, e);
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("class=LdapAuthentication||method=authenticate||account={}||errMsg={}", account, e);
|
||||
} finally {
|
||||
if(ctx != null) {
|
||||
try {
|
||||
ctx.close();
|
||||
} catch (NamingException e) {
|
||||
LOGGER.error("class=LdapAuthentication||method=authenticate||account={}||errMsg={}", account, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -2,13 +2,17 @@ package com.xiaojukeji.kafka.manager.account.component.sso;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.account.AccountService;
|
||||
import com.xiaojukeji.kafka.manager.account.component.AbstractSingleSignOn;
|
||||
import com.xiaojukeji.kafka.manager.common.bizenum.AccountRoleEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.LoginConstant;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.normal.LoginDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.AccountDO;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.EncryptUtil;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.kafka.manager.account.component.ldap.LdapAuthentication;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
@@ -23,12 +27,48 @@ public class BaseSessionSignOn extends AbstractSingleSignOn {
|
||||
@Autowired
|
||||
private AccountService accountService;
|
||||
|
||||
@Autowired
|
||||
private LdapAuthentication ldapAuthentication;
|
||||
|
||||
//是否开启ldap验证
|
||||
@Value(value = "${account.ldap.enabled:}")
|
||||
private Boolean accountLdapEnabled;
|
||||
|
||||
//ldap自动注册的默认角色。请注意:它通常来说都是低权限角色
|
||||
@Value(value = "${account.ldap.auth-user-registration-role:}")
|
||||
private String authUserRegistrationRole;
|
||||
|
||||
//ldap自动注册是否开启
|
||||
@Value(value = "${account.ldap.auth-user-registration:}")
|
||||
private boolean authUserRegistration;
|
||||
|
||||
@Override
|
||||
public Result<String> loginAndGetLdap(HttpServletRequest request, HttpServletResponse response, LoginDTO dto) {
|
||||
if (ValidateUtils.isBlank(dto.getUsername()) || ValidateUtils.isNull(dto.getPassword())) {
|
||||
return null;
|
||||
return Result.buildFailure("Missing parameters");
|
||||
}
|
||||
|
||||
Result<AccountDO> accountResult = accountService.getAccountDO(dto.getUsername());
|
||||
|
||||
//判断是否激活了LDAP验证, 若激活则也可使用ldap进行认证
|
||||
if(!ValidateUtils.isNull(accountLdapEnabled) && accountLdapEnabled){
|
||||
//去LDAP验证账密
|
||||
if(!ldapAuthentication.authenticate(dto.getUsername(),dto.getPassword())){
|
||||
return Result.buildFrom(ResultStatus.LDAP_AUTHENTICATION_FAILED);
|
||||
}
|
||||
|
||||
if((ValidateUtils.isNull(accountResult) || ValidateUtils.isNull(accountResult.getData())) && authUserRegistration){
|
||||
//自动注册
|
||||
AccountDO accountDO = new AccountDO();
|
||||
accountDO.setUsername(dto.getUsername());
|
||||
accountDO.setRole(AccountRoleEnum.getUserRoleEnum(authUserRegistrationRole).getRole());
|
||||
accountDO.setPassword(dto.getPassword());
|
||||
accountService.createAccount(accountDO);
|
||||
}
|
||||
|
||||
return Result.buildSuc(dto.getUsername());
|
||||
}
|
||||
|
||||
if (ValidateUtils.isNull(accountResult) || accountResult.failed()) {
|
||||
return new Result<>(accountResult.getCode(), accountResult.getMessage());
|
||||
}
|
||||
@@ -64,4 +104,4 @@ public class BaseSessionSignOn extends AbstractSingleSignOn {
|
||||
response.setStatus(AbstractSingleSignOn.REDIRECT_CODE);
|
||||
response.addHeader(AbstractSingleSignOn.HEADER_REDIRECT_KEY, "");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,12 +63,17 @@ public class LoginServiceImpl implements LoginService {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean checkLogin(HttpServletRequest request, HttpServletResponse response) {
|
||||
String uri = request.getRequestURI();
|
||||
if (!(uri.contains(ApiPrefix.API_V1_NORMAL_PREFIX)
|
||||
|| uri.contains(ApiPrefix.API_V1_RD_PREFIX)
|
||||
|| uri.contains(ApiPrefix.API_V1_OP_PREFIX))) {
|
||||
// 白名单接口, 直接忽略登录
|
||||
public boolean checkLogin(HttpServletRequest request, HttpServletResponse response, String classRequestMappingValue) {
|
||||
if (ValidateUtils.isNull(classRequestMappingValue)) {
|
||||
LOGGER.error("class=LoginServiceImpl||method=checkLogin||msg=uri illegal||uri={}", request.getRequestURI());
|
||||
singleSignOn.setRedirectToLoginPage(response);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (classRequestMappingValue.equals(ApiPrefix.API_V1_SSO_PREFIX)
|
||||
|| classRequestMappingValue.equals(ApiPrefix.API_V1_THIRD_PART_PREFIX)
|
||||
|| classRequestMappingValue.equals(ApiPrefix.GATEWAY_API_V1_PREFIX)) {
|
||||
// 白名单接口直接true
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -79,7 +84,7 @@ public class LoginServiceImpl implements LoginService {
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean status = checkAuthority(request, accountService.getAccountRoleFromCache(username));
|
||||
boolean status = checkAuthority(classRequestMappingValue, accountService.getAccountRoleFromCache(username));
|
||||
if (status) {
|
||||
HttpSession session = request.getSession();
|
||||
session.setAttribute(LoginConstant.SESSION_USERNAME_KEY, username);
|
||||
@@ -89,19 +94,18 @@ public class LoginServiceImpl implements LoginService {
|
||||
return false;
|
||||
}
|
||||
|
||||
private boolean checkAuthority(HttpServletRequest request, AccountRoleEnum accountRoleEnum) {
|
||||
String uri = request.getRequestURI();
|
||||
if (uri.contains(ApiPrefix.API_V1_NORMAL_PREFIX)) {
|
||||
private boolean checkAuthority(String classRequestMappingValue, AccountRoleEnum accountRoleEnum) {
|
||||
if (classRequestMappingValue.equals(ApiPrefix.API_V1_NORMAL_PREFIX)) {
|
||||
// normal 接口都可以访问
|
||||
return true;
|
||||
}
|
||||
|
||||
if (uri.contains(ApiPrefix.API_V1_RD_PREFIX) ) {
|
||||
// RD 接口 OP 或者 RD 可以访问
|
||||
if (classRequestMappingValue.equals(ApiPrefix.API_V1_RD_PREFIX) ) {
|
||||
// RD 接口, OP 或者 RD 可以访问
|
||||
return AccountRoleEnum.RD.equals(accountRoleEnum) || AccountRoleEnum.OP.equals(accountRoleEnum);
|
||||
}
|
||||
|
||||
if (uri.contains(ApiPrefix.API_V1_OP_PREFIX)) {
|
||||
if (classRequestMappingValue.equals(ApiPrefix.API_V1_OP_PREFIX)) {
|
||||
// OP 接口只有 OP 可以访问
|
||||
return AccountRoleEnum.OP.equals(accountRoleEnum);
|
||||
}
|
||||
|
||||
@@ -18,6 +18,9 @@ public class OrderExtensionAddGatewayConfigDTO {
|
||||
@ApiModelProperty(value = "值")
|
||||
private String value;
|
||||
|
||||
@ApiModelProperty(value = "描述说明")
|
||||
private String description;
|
||||
|
||||
public String getType() {
|
||||
return type;
|
||||
}
|
||||
@@ -42,12 +45,21 @@ public class OrderExtensionAddGatewayConfigDTO {
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public void setDescription(String description) {
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "OrderExtensionAddGatewayConfigDTO{" +
|
||||
"type='" + type + '\'' +
|
||||
", name='" + name + '\'' +
|
||||
", value='" + value + '\'' +
|
||||
", description='" + description + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
|
||||
@@ -23,6 +23,9 @@ public class OrderExtensionModifyGatewayConfigDTO {
|
||||
@ApiModelProperty(value = "值")
|
||||
private String value;
|
||||
|
||||
@ApiModelProperty(value = "描述说明")
|
||||
private String description;
|
||||
|
||||
public Long getId() {
|
||||
return id;
|
||||
}
|
||||
@@ -55,6 +58,14 @@ public class OrderExtensionModifyGatewayConfigDTO {
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public void setDescription(String description) {
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "OrderExtensionModifyGatewayConfigDTO{" +
|
||||
@@ -62,6 +73,7 @@ public class OrderExtensionModifyGatewayConfigDTO {
|
||||
", type='" + type + '\'' +
|
||||
", name='" + name + '\'' +
|
||||
", value='" + value + '\'' +
|
||||
", description='" + description + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ public class DeleteAppOrder extends AbstractAppOrder {
|
||||
// 判断app是否对topic有权限
|
||||
List<AuthorityDO> authorityList = authorityService.getAuthority(orderAppExtension.getAppId());
|
||||
if (!ValidateUtils.isEmptyList(authorityList)) {
|
||||
return ResultStatus.OPERATION_FORBIDDEN;
|
||||
return ResultStatus.APP_OFFLINE_FORBIDDEN;
|
||||
}
|
||||
if (appService.deleteApp(appDO, userName) > 0) {
|
||||
return ResultStatus.SUCCESS;
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
package com.xiaojukeji.kafka.manager.kcm.common;
|
||||
|
||||
public class Constant {
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public static final String TASK_TITLE_PREFIX = "Logi-Kafka";
|
||||
|
||||
/**
|
||||
* 并发度,顺序执行
|
||||
*/
|
||||
public static final Integer AGENT_TASK_BATCH = 1;
|
||||
|
||||
/**
|
||||
* 失败的容忍度为0
|
||||
*/
|
||||
public static final Integer AGENT_TASK_TOLERANCE = 0;
|
||||
}
|
||||
@@ -6,34 +6,35 @@ package com.xiaojukeji.kafka.manager.kcm.common.bizenum;
|
||||
* @date 20/4/26
|
||||
*/
|
||||
public enum ClusterTaskActionEnum {
|
||||
START(0, "start"),
|
||||
PAUSE(1, "pause"),
|
||||
IGNORE(2, "ignore"),
|
||||
CANCEL(3, "cancel"),
|
||||
ROLLBACK(4, "rollback"),
|
||||
UNKNOWN("unknown"),
|
||||
|
||||
START("start"),
|
||||
PAUSE("pause"),
|
||||
|
||||
IGNORE("ignore"),
|
||||
CANCEL("cancel"),
|
||||
|
||||
REDO("redo"),
|
||||
KILL("kill"),
|
||||
|
||||
ROLLBACK("rollback"),
|
||||
|
||||
;
|
||||
private Integer code;
|
||||
|
||||
private String message;
|
||||
private String action;
|
||||
|
||||
ClusterTaskActionEnum(Integer code, String message) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
ClusterTaskActionEnum(String action) {
|
||||
this.action = action;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
public String getAction() {
|
||||
return action;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TaskActionEnum{" +
|
||||
"code=" + code +
|
||||
", message='" + message + '\'' +
|
||||
return "ClusterTaskActionEnum{" +
|
||||
"action='" + action + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
package com.xiaojukeji.kafka.manager.kcm.common.entry.ao;
|
||||
|
||||
public class ClusterTaskLog {
|
||||
private String stdout;
|
||||
|
||||
public ClusterTaskLog(String stdout) {
|
||||
this.stdout = stdout;
|
||||
}
|
||||
|
||||
public String getStdout() {
|
||||
return stdout;
|
||||
}
|
||||
|
||||
public void setStdout(String stdout) {
|
||||
this.stdout = stdout;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AgentOperationTaskLog{" +
|
||||
"stdout='" + stdout + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,7 @@
|
||||
package com.xiaojukeji.kafka.manager.kcm.common.entry.ao;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
@@ -119,7 +121,7 @@ public class CreationTaskData {
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "CreationTaskDTO{" +
|
||||
return "CreationTaskData{" +
|
||||
"uuid='" + uuid + '\'' +
|
||||
", clusterId=" + clusterId +
|
||||
", hostList=" + hostList +
|
||||
|
||||
@@ -1,9 +1,18 @@
|
||||
package com.xiaojukeji.kafka.manager.kcm.component.agent;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.bizenum.ClusterTaskActionEnum;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.bizenum.ClusterTaskStateEnum;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.bizenum.ClusterTaskSubStateEnum;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.entry.ao.ClusterTaskLog;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.entry.ao.CreationTaskData;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.util.Map;
|
||||
|
||||
|
||||
@@ -13,33 +22,79 @@ import java.util.Map;
|
||||
* @date 20/4/26
|
||||
*/
|
||||
public abstract class AbstractAgent {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractAgent.class);
|
||||
|
||||
/**
|
||||
* 创建任务
|
||||
* @param creationTaskData 创建任务参数
|
||||
* @return 任务ID
|
||||
*/
|
||||
public abstract Long createTask(CreationTaskData dto);
|
||||
public abstract Result<Long> createTask(CreationTaskData creationTaskData);
|
||||
|
||||
/**
|
||||
* 任务动作
|
||||
* 执行任务
|
||||
* @param taskId 任务ID
|
||||
* @param actionEnum 执行动作
|
||||
* @return true:触发成功, false:触发失败
|
||||
*/
|
||||
public abstract Boolean actionTask(Long taskId, String action);
|
||||
public abstract boolean actionTask(Long taskId, ClusterTaskActionEnum actionEnum);
|
||||
|
||||
/**
|
||||
* 任务动作
|
||||
* 执行任务
|
||||
* @param taskId 任务ID
|
||||
* @param actionEnum 执行动作
|
||||
* @param hostname 具体主机
|
||||
* @return true:触发成功, false:触发失败
|
||||
*/
|
||||
public abstract Boolean actionHostTask(Long taskId, String action, String hostname);
|
||||
public abstract boolean actionHostTask(Long taskId, ClusterTaskActionEnum actionEnum, String hostname);
|
||||
|
||||
/**
|
||||
* 获取任务状态
|
||||
* 获取任务运行的状态[阻塞, 执行中, 完成等]
|
||||
* @param taskId 任务ID
|
||||
* @return 任务状态
|
||||
*/
|
||||
public abstract ClusterTaskStateEnum getTaskState(Long agentTaskId);
|
||||
public abstract Result<ClusterTaskStateEnum> getTaskExecuteState(Long taskId);
|
||||
|
||||
/**
|
||||
* 获取任务结果
|
||||
* @param taskId 任务ID
|
||||
* @return 任务结果
|
||||
*/
|
||||
public abstract Map<String, ClusterTaskSubStateEnum> getTaskResult(Long taskId);
|
||||
public abstract Result<Map<String, ClusterTaskSubStateEnum>> getTaskResult(Long taskId);
|
||||
|
||||
/**
|
||||
* 获取任务日志
|
||||
* 获取任务执行日志
|
||||
* @param taskId 任务ID
|
||||
* @param hostname 具体主机
|
||||
* @return 机器运行日志
|
||||
*/
|
||||
public abstract String getTaskLog(Long agentTaskId, String hostname);
|
||||
public abstract Result<ClusterTaskLog> getTaskLog(Long taskId, String hostname);
|
||||
|
||||
protected static String readScriptInJarFile(String fileName) {
|
||||
InputStream inputStream = AbstractAgent.class.getClassLoader().getResourceAsStream(fileName);
|
||||
if (inputStream == null) {
|
||||
LOGGER.error("class=AbstractAgent||method=readScriptInJarFile||fileName={}||msg=read script failed", fileName);
|
||||
return "";
|
||||
}
|
||||
|
||||
try {
|
||||
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
|
||||
String line = null;
|
||||
|
||||
StringBuilder sb = new StringBuilder();
|
||||
while ((line = bufferedReader.readLine()) != null) {
|
||||
sb.append(line).append("\n");
|
||||
}
|
||||
return sb.toString();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("class=AbstractAgent||method=readScriptInJarFile||fileName={}||errMsg={}||msg=read script failed", fileName, e.getMessage());
|
||||
} finally {
|
||||
try {
|
||||
inputStream.close();
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("class=AbstractAgent||method=readScriptInJarFile||fileName={}||errMsg={}||msg=close reading script failed", fileName, e.getMessage());
|
||||
}
|
||||
}
|
||||
return "";
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,11 @@
|
||||
package com.xiaojukeji.kafka.manager.kcm.component.agent.n9e;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.xiaojukeji.kafka.manager.common.bizenum.KafkaFileEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.Constant;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.bizenum.ClusterTaskActionEnum;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.bizenum.ClusterTaskTypeEnum;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.entry.ao.ClusterTaskLog;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.entry.ao.CreationTaskData;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.HttpUtils;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
|
||||
@@ -11,20 +14,17 @@ import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.bizenum.ClusterTaskStateEnum;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.bizenum.ClusterTaskSubStateEnum;
|
||||
import com.xiaojukeji.kafka.manager.kcm.component.agent.AbstractAgent;
|
||||
import com.xiaojukeji.kafka.manager.kcm.component.agent.n9e.entry.N9eCreationTask;
|
||||
import com.xiaojukeji.kafka.manager.kcm.component.agent.n9e.entry.N9eResult;
|
||||
import com.xiaojukeji.kafka.manager.kcm.component.agent.n9e.entry.N9eTaskResultDTO;
|
||||
import com.xiaojukeji.kafka.manager.kcm.component.agent.n9e.entry.N9eTaskStatusEnum;
|
||||
import com.xiaojukeji.kafka.manager.kcm.component.agent.n9e.entry.N9eTaskStdoutDTO;
|
||||
import com.xiaojukeji.kafka.manager.kcm.component.agent.n9e.entry.N9eTaskResult;
|
||||
import com.xiaojukeji.kafka.manager.kcm.component.agent.n9e.entry.N9eTaskStdoutLog;
|
||||
import com.xiaojukeji.kafka.manager.kcm.component.agent.n9e.entry.bizenum.N9eTaskStatusEnum;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
@@ -54,16 +54,6 @@ public class N9e extends AbstractAgent {
|
||||
|
||||
private String script;
|
||||
|
||||
/**
|
||||
* 并发度,顺序执行
|
||||
*/
|
||||
private static final Integer BATCH = 1;
|
||||
|
||||
/**
|
||||
* 失败的容忍度为0
|
||||
*/
|
||||
private static final Integer TOLERANCE = 0;
|
||||
|
||||
private static final String CREATE_TASK_URI = "/api/job-ce/tasks";
|
||||
|
||||
private static final String ACTION_TASK_URI = "/api/job-ce/task/{taskId}/action";
|
||||
@@ -82,143 +72,134 @@ public class N9e extends AbstractAgent {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long createTask(CreationTaskData creationTaskData) {
|
||||
Map<String, Object> param = buildCreateTaskParam(creationTaskData);
|
||||
public Result<Long> createTask(CreationTaskData creationTaskData) {
|
||||
String content = JsonUtils.toJSONString(buildCreateTaskParam(creationTaskData));
|
||||
|
||||
String response = null;
|
||||
try {
|
||||
response = HttpUtils.postForString(
|
||||
baseUrl + CREATE_TASK_URI,
|
||||
JsonUtils.toJSONString(param),
|
||||
buildHeader()
|
||||
);
|
||||
N9eResult zr = JSON.parseObject(response, N9eResult.class);
|
||||
if (!ValidateUtils.isBlank(zr.getErr())) {
|
||||
LOGGER.warn("class=N9e||method=createTask||param={}||errMsg={}||msg=call create task fail", JsonUtils.toJSONString(param),zr.getErr());
|
||||
return null;
|
||||
response = HttpUtils.postForString(baseUrl + CREATE_TASK_URI, content, buildHeader());
|
||||
N9eResult nr = JsonUtils.stringToObj(response, N9eResult.class);
|
||||
if (!ValidateUtils.isBlank(nr.getErr())) {
|
||||
LOGGER.error("class=N9e||method=createTask||param={}||response={}||msg=call create task failed", content, response);
|
||||
return Result.buildFailure(nr.getErr());
|
||||
}
|
||||
return Long.valueOf(zr.getDat().toString());
|
||||
return Result.buildSuc(Long.valueOf(nr.getDat().toString()));
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("create task failed, req:{}.", creationTaskData, e);
|
||||
LOGGER.error("class=N9e||method=createTask||param={}||response={}||errMsg={}||msg=call create task failed", content, response, e.getMessage());
|
||||
}
|
||||
return null;
|
||||
return Result.buildFailure("create n9e task failed");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean actionTask(Long taskId, String action) {
|
||||
public boolean actionTask(Long taskId, ClusterTaskActionEnum actionEnum) {
|
||||
Map<String, Object> param = new HashMap<>(1);
|
||||
param.put("action", action);
|
||||
param.put("action", actionEnum.getAction());
|
||||
|
||||
String response = null;
|
||||
try {
|
||||
response = HttpUtils.putForString(
|
||||
baseUrl + ACTION_TASK_URI.replace("{taskId}", taskId.toString()),
|
||||
JSON.toJSONString(param),
|
||||
buildHeader()
|
||||
);
|
||||
N9eResult zr = JSON.parseObject(response, N9eResult.class);
|
||||
if (ValidateUtils.isBlank(zr.getErr())) {
|
||||
response = HttpUtils.putForString(baseUrl + ACTION_TASK_URI.replace("{taskId}", String.valueOf(taskId)), JsonUtils.toJSONString(param), buildHeader());
|
||||
N9eResult nr = JsonUtils.stringToObj(response, N9eResult.class);
|
||||
if (ValidateUtils.isBlank(nr.getErr())) {
|
||||
return true;
|
||||
}
|
||||
LOGGER.warn("class=N9e||method=actionTask||param={}||errMsg={}||msg=call action task fail", JSON.toJSONString(param),zr.getErr());
|
||||
|
||||
LOGGER.error("class=N9e||method=actionTask||param={}||response={}||msg=call action task fail", JsonUtils.toJSONString(param), response);
|
||||
return false;
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("action task failed, taskId:{}, action:{}.", taskId, action, e);
|
||||
LOGGER.error("class=N9e||method=actionTask||param={}||response={}||errMsg={}||msg=call action task fail", JsonUtils.toJSONString(param), response, e.getMessage());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean actionHostTask(Long taskId, String action, String hostname) {
|
||||
Map<String, Object> param = new HashMap<>(2);
|
||||
param.put("action", action);
|
||||
param.put("hostname", hostname);
|
||||
public boolean actionHostTask(Long taskId, ClusterTaskActionEnum actionEnum, String hostname) {
|
||||
Map<String, Object> params = new HashMap<>(2);
|
||||
params.put("action", actionEnum.getAction());
|
||||
params.put("hostname", hostname);
|
||||
|
||||
String response = null;
|
||||
try {
|
||||
response = HttpUtils.putForString(
|
||||
baseUrl + ACTION_HOST_TASK_URI.replace("{taskId}", taskId.toString()),
|
||||
JSON.toJSONString(param),
|
||||
buildHeader()
|
||||
);
|
||||
N9eResult zr = JSON.parseObject(response, N9eResult.class);
|
||||
if (ValidateUtils.isBlank(zr.getErr())) {
|
||||
response = HttpUtils.putForString(baseUrl + ACTION_HOST_TASK_URI.replace("{taskId}", String.valueOf(taskId)), JsonUtils.toJSONString(params), buildHeader());
|
||||
N9eResult nr = JsonUtils.stringToObj(response, N9eResult.class);
|
||||
if (ValidateUtils.isBlank(nr.getErr())) {
|
||||
return true;
|
||||
}
|
||||
LOGGER.warn("class=N9e||method=actionHostTask||param={}||errMsg={}||msg=call action host task fail", JSON.toJSONString(param),zr.getErr());
|
||||
|
||||
LOGGER.error("class=N9e||method=actionHostTask||params={}||response={}||msg=call action host task fail", JsonUtils.toJSONString(params), response);
|
||||
return false;
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("action task failed, taskId:{} action:{} hostname:{}.", taskId, action, hostname, e);
|
||||
LOGGER.error("class=N9e||method=actionHostTask||params={}||response={}||errMsg={}||msg=call action host task fail", JsonUtils.toJSONString(params), response, e.getMessage());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterTaskStateEnum getTaskState(Long agentTaskId) {
|
||||
public Result<ClusterTaskStateEnum> getTaskExecuteState(Long taskId) {
|
||||
String response = null;
|
||||
try {
|
||||
// 获取任务的state
|
||||
response = HttpUtils.get(
|
||||
baseUrl + TASK_STATE_URI.replace("{taskId}", agentTaskId.toString()), null
|
||||
);
|
||||
N9eResult n9eResult = JSON.parseObject(response, N9eResult.class);
|
||||
if (!ValidateUtils.isBlank(n9eResult.getErr())) {
|
||||
LOGGER.error("get response result failed, agentTaskId:{} response:{}.", agentTaskId, response);
|
||||
return null;
|
||||
response = HttpUtils.get(baseUrl + TASK_STATE_URI.replace("{taskId}", String.valueOf(taskId)), null);
|
||||
N9eResult nr = JsonUtils.stringToObj(response, N9eResult.class);
|
||||
if (!ValidateUtils.isBlank(nr.getErr())) {
|
||||
return Result.buildFailure(nr.getErr());
|
||||
}
|
||||
String state = JSON.parseObject(JSON.toJSONString(n9eResult.getDat()), String.class);
|
||||
|
||||
String state = JsonUtils.stringToObj(JsonUtils.toJSONString(nr.getDat()), String.class);
|
||||
|
||||
N9eTaskStatusEnum n9eTaskStatusEnum = N9eTaskStatusEnum.getByMessage(state);
|
||||
if (ValidateUtils.isNull(n9eTaskStatusEnum)) {
|
||||
LOGGER.error("get task status failed, agentTaskId:{} state:{}.", agentTaskId, state);
|
||||
return null;
|
||||
LOGGER.error("class=N9e||method=getTaskExecuteState||taskId={}||response={}||msg=get task state failed", taskId, response);
|
||||
return Result.buildFailure("unknown state, state:" + state);
|
||||
}
|
||||
return n9eTaskStatusEnum.getStatus();
|
||||
return Result.buildSuc(n9eTaskStatusEnum.getStatus());
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("get task status failed, agentTaskId:{} response:{}.", agentTaskId, response, e);
|
||||
LOGGER.error("class=N9e||method=getTaskExecuteState||taskId={}||response={}||errMsg={}||msg=get task state failed", taskId, response, e.getMessage());
|
||||
}
|
||||
return null;
|
||||
return Result.buildFailure("get task state failed");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, ClusterTaskSubStateEnum> getTaskResult(Long agentTaskId) {
|
||||
public Result<Map<String, ClusterTaskSubStateEnum>> getTaskResult(Long taskId) {
|
||||
String response = null;
|
||||
try {
|
||||
// 获取子任务的state
|
||||
response = HttpUtils.get(baseUrl + TASK_SUB_STATE_URI.replace("{taskId}", agentTaskId.toString()), null);
|
||||
N9eResult n9eResult = JSON.parseObject(response, N9eResult.class);
|
||||
response = HttpUtils.get(baseUrl + TASK_SUB_STATE_URI.replace("{taskId}", String.valueOf(taskId)), null);
|
||||
N9eResult nr = JsonUtils.stringToObj(response, N9eResult.class);
|
||||
if (!ValidateUtils.isBlank(nr.getErr())) {
|
||||
LOGGER.error("class=N9e||method=getTaskResult||taskId={}||response={}||msg=get task result failed", taskId, response);
|
||||
return Result.buildFailure(nr.getErr());
|
||||
}
|
||||
|
||||
N9eTaskResultDTO n9eTaskResultDTO =
|
||||
JSON.parseObject(JSON.toJSONString(n9eResult.getDat()), N9eTaskResultDTO.class);
|
||||
return n9eTaskResultDTO.convert2HostnameStatusMap();
|
||||
return Result.buildSuc(JsonUtils.stringToObj(JsonUtils.toJSONString(nr.getDat()), N9eTaskResult.class).convert2HostnameStatusMap());
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("get task result failed, agentTaskId:{} response:{}.", agentTaskId, response, e);
|
||||
LOGGER.error("class=N9e||method=getTaskResult||taskId={}||response={}||errMsg={}||msg=get task result failed", taskId, response, e.getMessage());
|
||||
}
|
||||
return null;
|
||||
return Result.buildFailure("get task result failed");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getTaskLog(Long agentTaskId, String hostname) {
|
||||
public Result<ClusterTaskLog> getTaskLog(Long taskId, String hostname) {
|
||||
Map<String, String> params = new HashMap<>(1);
|
||||
params.put("hostname", hostname);
|
||||
|
||||
String response = null;
|
||||
try {
|
||||
Map<String, String> params = new HashMap<>(1);
|
||||
params.put("hostname", hostname);
|
||||
response = HttpUtils.get(baseUrl + TASK_STD_LOG_URI.replace("{taskId}", String.valueOf(taskId)), params);
|
||||
N9eResult nr = JsonUtils.stringToObj(response, N9eResult.class);
|
||||
if (!ValidateUtils.isBlank(nr.getErr())) {
|
||||
LOGGER.error("class=N9e||method=getTaskLog||taskId={}||response={}||msg=get task log failed", taskId, response);
|
||||
return Result.buildFailure(nr.getErr());
|
||||
}
|
||||
|
||||
response = HttpUtils.get(baseUrl + TASK_STD_LOG_URI.replace("{taskId}", agentTaskId.toString()), params);
|
||||
N9eResult n9eResult = JSON.parseObject(response, N9eResult.class);
|
||||
if (!ValidateUtils.isBlank(n9eResult.getErr())) {
|
||||
LOGGER.error("get task log failed, agentTaskId:{} response:{}.", agentTaskId, response);
|
||||
return null;
|
||||
}
|
||||
List<N9eTaskStdoutDTO> dtoList =
|
||||
JSON.parseArray(JSON.toJSONString(n9eResult.getDat()), N9eTaskStdoutDTO.class);
|
||||
List<N9eTaskStdoutLog> dtoList = JsonUtils.stringToArrObj(JsonUtils.toJSONString(nr.getDat()), N9eTaskStdoutLog.class);
|
||||
if (ValidateUtils.isEmptyList(dtoList)) {
|
||||
return "";
|
||||
return Result.buildSuc(new ClusterTaskLog(""));
|
||||
}
|
||||
return dtoList.get(0).getStdout();
|
||||
return Result.buildSuc(new ClusterTaskLog(dtoList.get(0).getStdout()));
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("get task log failed, agentTaskId:{}.", agentTaskId, e);
|
||||
LOGGER.error("class=N9e||method=getTaskLog||taskId={}||response={}||errMsg={}||msg=get task log failed", taskId, response, e.getMessage());
|
||||
}
|
||||
return null;
|
||||
return Result.buildFailure("get task log failed");
|
||||
}
|
||||
|
||||
private Map<String, String> buildHeader() {
|
||||
@@ -228,7 +209,7 @@ public class N9e extends AbstractAgent {
|
||||
return headers;
|
||||
}
|
||||
|
||||
private Map<String, Object> buildCreateTaskParam(CreationTaskData creationTaskData) {
|
||||
private N9eCreationTask buildCreateTaskParam(CreationTaskData creationTaskData) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(creationTaskData.getUuid()).append(",,");
|
||||
sb.append(creationTaskData.getClusterId()).append(",,");
|
||||
@@ -240,46 +221,17 @@ public class N9e extends AbstractAgent {
|
||||
sb.append(creationTaskData.getServerPropertiesMd5()).append(",,");
|
||||
sb.append(creationTaskData.getServerPropertiesUrl());
|
||||
|
||||
Map<String, Object> params = new HashMap<>(10);
|
||||
params.put("title", String.format("集群ID=%d-升级部署", creationTaskData.getClusterId()));
|
||||
params.put("batch", BATCH);
|
||||
params.put("tolerance", TOLERANCE);
|
||||
params.put("timeout", timeout);
|
||||
params.put("pause", ListUtils.strList2String(creationTaskData.getPauseList()));
|
||||
params.put("script", this.script);
|
||||
params.put("args", sb.toString());
|
||||
params.put("account", account);
|
||||
params.put("action", "pause");
|
||||
params.put("hosts", creationTaskData.getHostList());
|
||||
return params;
|
||||
}
|
||||
|
||||
private static String readScriptInJarFile(String fileName) {
|
||||
InputStream inputStream = N9e.class.getClassLoader().getResourceAsStream(fileName);
|
||||
if (inputStream == null) {
|
||||
LOGGER.error("read kcm script failed, filename:{}", fileName);
|
||||
return "";
|
||||
}
|
||||
|
||||
try {
|
||||
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
|
||||
String line = null;
|
||||
StringBuilder stringBuilder = new StringBuilder("");
|
||||
|
||||
while ((line = bufferedReader.readLine()) != null) {
|
||||
stringBuilder.append(line);
|
||||
stringBuilder.append("\n");
|
||||
}
|
||||
return stringBuilder.toString();
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("read kcm script failed, filename:{}", fileName, e);
|
||||
return "";
|
||||
} finally {
|
||||
try {
|
||||
inputStream.close();
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("close reading kcm script failed, filename:{}", fileName, e);
|
||||
}
|
||||
}
|
||||
N9eCreationTask n9eCreationTask = new N9eCreationTask();
|
||||
n9eCreationTask.setTitle(Constant.TASK_TITLE_PREFIX + "-集群ID:" + creationTaskData.getClusterId());
|
||||
n9eCreationTask.setBatch(Constant.AGENT_TASK_BATCH);
|
||||
n9eCreationTask.setTolerance(Constant.AGENT_TASK_TOLERANCE);
|
||||
n9eCreationTask.setTimeout(this.timeout);
|
||||
n9eCreationTask.setPause(ListUtils.strList2String(creationTaskData.getPauseList()));
|
||||
n9eCreationTask.setScript(this.script);
|
||||
n9eCreationTask.setArgs(sb.toString());
|
||||
n9eCreationTask.setAccount(this.account);
|
||||
n9eCreationTask.setAction(ClusterTaskActionEnum.PAUSE.getAction());
|
||||
n9eCreationTask.setHosts(creationTaskData.getHostList());
|
||||
return n9eCreationTask;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,151 @@
|
||||
package com.xiaojukeji.kafka.manager.kcm.component.agent.n9e.entry;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class N9eCreationTask {
|
||||
/**
|
||||
* 任务标题
|
||||
*/
|
||||
private String title;
|
||||
|
||||
/**
|
||||
* 并发度, =2则表示两台并发执行
|
||||
*/
|
||||
private Integer batch;
|
||||
|
||||
/**
|
||||
* 错误容忍度, 达到容忍度之上时, 任务会被暂停并不可以继续执行
|
||||
*/
|
||||
private Integer tolerance;
|
||||
|
||||
/**
|
||||
* 单台任务的超时时间(秒)
|
||||
*/
|
||||
private Integer timeout;
|
||||
|
||||
/**
|
||||
* 暂停点, 格式: host1,host2,host3
|
||||
*/
|
||||
private String pause;
|
||||
|
||||
/**
|
||||
* 任务执行对应的脚本
|
||||
*/
|
||||
private String script;
|
||||
|
||||
/**
|
||||
* 任务参数
|
||||
*/
|
||||
private String args;
|
||||
|
||||
/**
|
||||
* 使用的账号
|
||||
*/
|
||||
private String account;
|
||||
|
||||
/**
|
||||
* 动作
|
||||
*/
|
||||
private String action;
|
||||
|
||||
/**
|
||||
* 操作的主机列表
|
||||
*/
|
||||
private List<String> hosts;
|
||||
|
||||
public String getTitle() {
|
||||
return title;
|
||||
}
|
||||
|
||||
public void setTitle(String title) {
|
||||
this.title = title;
|
||||
}
|
||||
|
||||
public Integer getBatch() {
|
||||
return batch;
|
||||
}
|
||||
|
||||
public void setBatch(Integer batch) {
|
||||
this.batch = batch;
|
||||
}
|
||||
|
||||
public Integer getTolerance() {
|
||||
return tolerance;
|
||||
}
|
||||
|
||||
public void setTolerance(Integer tolerance) {
|
||||
this.tolerance = tolerance;
|
||||
}
|
||||
|
||||
public Integer getTimeout() {
|
||||
return timeout;
|
||||
}
|
||||
|
||||
public void setTimeout(Integer timeout) {
|
||||
this.timeout = timeout;
|
||||
}
|
||||
|
||||
public String getPause() {
|
||||
return pause;
|
||||
}
|
||||
|
||||
public void setPause(String pause) {
|
||||
this.pause = pause;
|
||||
}
|
||||
|
||||
public String getScript() {
|
||||
return script;
|
||||
}
|
||||
|
||||
public void setScript(String script) {
|
||||
this.script = script;
|
||||
}
|
||||
|
||||
public String getArgs() {
|
||||
return args;
|
||||
}
|
||||
|
||||
public void setArgs(String args) {
|
||||
this.args = args;
|
||||
}
|
||||
|
||||
public String getAccount() {
|
||||
return account;
|
||||
}
|
||||
|
||||
public void setAccount(String account) {
|
||||
this.account = account;
|
||||
}
|
||||
|
||||
public String getAction() {
|
||||
return action;
|
||||
}
|
||||
|
||||
public void setAction(String action) {
|
||||
this.action = action;
|
||||
}
|
||||
|
||||
public List<String> getHosts() {
|
||||
return hosts;
|
||||
}
|
||||
|
||||
public void setHosts(List<String> hosts) {
|
||||
this.hosts = hosts;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "N9eCreationTask{" +
|
||||
"title='" + title + '\'' +
|
||||
", batch=" + batch +
|
||||
", tolerance=" + tolerance +
|
||||
", timeout=" + timeout +
|
||||
", pause='" + pause + '\'' +
|
||||
", script='" + script + '\'' +
|
||||
", args='" + args + '\'' +
|
||||
", account='" + account + '\'' +
|
||||
", action='" + action + '\'' +
|
||||
", hosts=" + hosts +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,7 @@ import java.util.Map;
|
||||
* @author zengqiao
|
||||
* @date 20/9/7
|
||||
*/
|
||||
public class N9eTaskResultDTO {
|
||||
public class N9eTaskResult {
|
||||
private List<String> waiting;
|
||||
|
||||
private List<String> running;
|
||||
@@ -0,0 +1,35 @@
|
||||
package com.xiaojukeji.kafka.manager.kcm.component.agent.n9e.entry;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/9/7
|
||||
*/
|
||||
public class N9eTaskStdoutLog {
|
||||
private String host;
|
||||
|
||||
private String stdout;
|
||||
|
||||
public String getHost() {
|
||||
return host;
|
||||
}
|
||||
|
||||
public void setHost(String host) {
|
||||
this.host = host;
|
||||
}
|
||||
|
||||
public String getStdout() {
|
||||
return stdout;
|
||||
}
|
||||
|
||||
public void setStdout(String stdout) {
|
||||
this.stdout = stdout;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "N9eTaskStdoutDTO{" +
|
||||
"host='" + host + '\'' +
|
||||
", stdout='" + stdout + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,59 @@
|
||||
package com.xiaojukeji.kafka.manager.kcm.component.agent.n9e.entry.bizenum;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.bizenum.ClusterTaskStateEnum;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/9/3
|
||||
*/
|
||||
public enum N9eTaskStatusEnum {
|
||||
DONE(0, "done", ClusterTaskStateEnum.FINISHED),
|
||||
PAUSE(1, "pause", ClusterTaskStateEnum.BLOCKED),
|
||||
START(2, "start", ClusterTaskStateEnum.RUNNING),
|
||||
;
|
||||
|
||||
private Integer code;
|
||||
|
||||
private String message;
|
||||
|
||||
private ClusterTaskStateEnum status;
|
||||
|
||||
N9eTaskStatusEnum(Integer code, String message, ClusterTaskStateEnum status) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public void setCode(Integer code) {
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
public void setMessage(String message) {
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public ClusterTaskStateEnum getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public void setStatus(ClusterTaskStateEnum status) {
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public static N9eTaskStatusEnum getByMessage(String message) {
|
||||
for (N9eTaskStatusEnum elem: N9eTaskStatusEnum.values()) {
|
||||
if (elem.message.equals(message)) {
|
||||
return elem;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -54,7 +54,7 @@ public class S3Service extends AbstractStorageService {
|
||||
InputStream inputStream = null;
|
||||
try {
|
||||
if (!createBucketIfNotExist()) {
|
||||
return false;
|
||||
return false;
|
||||
}
|
||||
|
||||
inputStream = uploadFile.getInputStream();
|
||||
@@ -95,7 +95,10 @@ public class S3Service extends AbstractStorageService {
|
||||
|
||||
@Override
|
||||
public String getDownloadBaseUrl() {
|
||||
return this.endpoint + "/" + this.bucket;
|
||||
if (this.endpoint.startsWith("http://")) {
|
||||
return this.endpoint + "/" + this.bucket;
|
||||
}
|
||||
return "http://" + this.endpoint + "/" + this.bucket;
|
||||
}
|
||||
|
||||
private boolean createBucketIfNotExist() {
|
||||
|
||||
@@ -6,6 +6,7 @@ import com.xiaojukeji.kafka.manager.kcm.ClusterTaskService;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.Converters;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.bizenum.ClusterTaskActionEnum;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.entry.ClusterTaskConstant;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.entry.ao.ClusterTaskLog;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.entry.ao.ClusterTaskSubStatus;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.bizenum.ClusterTaskStateEnum;
|
||||
import com.xiaojukeji.kafka.manager.kcm.common.bizenum.ClusterTaskSubStateEnum;
|
||||
@@ -34,7 +35,7 @@ import java.util.*;
|
||||
*/
|
||||
@Service("clusterTaskService")
|
||||
public class ClusterTaskServiceImpl implements ClusterTaskService {
|
||||
private final static Logger LOGGER = LoggerFactory.getLogger(ClusterTaskServiceImpl.class);
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(ClusterTaskServiceImpl.class);
|
||||
|
||||
@Autowired
|
||||
private AbstractAgent abstractAgent;
|
||||
@@ -63,13 +64,13 @@ public class ClusterTaskServiceImpl implements ClusterTaskService {
|
||||
}
|
||||
|
||||
// 创建任务
|
||||
Long agentTaskId = abstractAgent.createTask(dtoResult.getData());
|
||||
if (ValidateUtils.isNull(agentTaskId)) {
|
||||
Result<Long> createResult = abstractAgent.createTask(dtoResult.getData());
|
||||
if (ValidateUtils.isNull(createResult) || createResult.failed()) {
|
||||
return Result.buildFrom(ResultStatus.CALL_CLUSTER_TASK_AGENT_FAILED);
|
||||
}
|
||||
|
||||
try {
|
||||
if (clusterTaskDao.insert(Converters.convert2ClusterTaskDO(agentTaskId, dtoResult.getData(), operator)) > 0) {
|
||||
if (clusterTaskDao.insert(Converters.convert2ClusterTaskDO(createResult.getData(), dtoResult.getData(), operator)) > 0) {
|
||||
return Result.buildFrom(ResultStatus.SUCCESS);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
@@ -87,45 +88,44 @@ public class ClusterTaskServiceImpl implements ClusterTaskService {
|
||||
Long agentTaskId = getActiveAgentTaskId(clusterTaskDO);
|
||||
Boolean rollback = inRollback(clusterTaskDO);
|
||||
|
||||
ClusterTaskStateEnum stateEnum = abstractAgent.getTaskState(agentTaskId);
|
||||
if (ClusterTaskActionEnum.START.getMessage().equals(action)
|
||||
&& ClusterTaskStateEnum.BLOCKED.equals(stateEnum)) {
|
||||
Result<ClusterTaskStateEnum> stateEnumResult = abstractAgent.getTaskExecuteState(agentTaskId);
|
||||
if (ValidateUtils.isNull(stateEnumResult) || stateEnumResult.failed()) {
|
||||
return ResultStatus.CALL_CLUSTER_TASK_AGENT_FAILED;
|
||||
}
|
||||
|
||||
if (ClusterTaskActionEnum.START.getAction().equals(action) && ClusterTaskStateEnum.BLOCKED.equals(stateEnumResult.getData())) {
|
||||
// 暂停状态, 可以执行开始
|
||||
return actionTaskExceptRollbackAction(agentTaskId, action, "");
|
||||
return actionTaskExceptRollbackAction(agentTaskId, ClusterTaskActionEnum.START, "");
|
||||
}
|
||||
if (ClusterTaskActionEnum.PAUSE.getMessage().equals(action)
|
||||
&& ClusterTaskStateEnum.RUNNING.equals(stateEnum)) {
|
||||
if (ClusterTaskActionEnum.PAUSE.getAction().equals(action) && ClusterTaskStateEnum.RUNNING.equals(stateEnumResult.getData())) {
|
||||
// 运行状态, 可以执行暂停
|
||||
return actionTaskExceptRollbackAction(agentTaskId, action, "");
|
||||
return actionTaskExceptRollbackAction(agentTaskId, ClusterTaskActionEnum.PAUSE, "");
|
||||
}
|
||||
if (ClusterTaskActionEnum.IGNORE.getMessage().equals(action)
|
||||
|| ClusterTaskActionEnum.CANCEL.getMessage().equals(action)) {
|
||||
if (ClusterTaskActionEnum.IGNORE.getAction().equals(action)) {
|
||||
// 忽略 & 取消随时都可以操作
|
||||
return actionTaskExceptRollbackAction(agentTaskId, action, hostname);
|
||||
return actionTaskExceptRollbackAction(agentTaskId, ClusterTaskActionEnum.IGNORE, hostname);
|
||||
}
|
||||
if ((!ClusterTaskStateEnum.FINISHED.equals(stateEnum) || !rollback)
|
||||
&& ClusterTaskActionEnum.ROLLBACK.getMessage().equals(action)) {
|
||||
if (ClusterTaskActionEnum.CANCEL.getAction().equals(action)) {
|
||||
// 忽略 & 取消随时都可以操作
|
||||
return actionTaskExceptRollbackAction(agentTaskId, ClusterTaskActionEnum.CANCEL, hostname);
|
||||
}
|
||||
if ((!ClusterTaskStateEnum.FINISHED.equals(stateEnumResult.getData()) || !rollback)
|
||||
&& ClusterTaskActionEnum.ROLLBACK.getAction().equals(action)) {
|
||||
// 暂未操作完时可以回滚, 回滚所有操作过的机器到上一个版本
|
||||
return actionTaskRollback(clusterTaskDO);
|
||||
}
|
||||
return ResultStatus.OPERATION_FAILED;
|
||||
}
|
||||
|
||||
private ResultStatus actionTaskExceptRollbackAction(Long agentId, String action, String hostname) {
|
||||
private ResultStatus actionTaskExceptRollbackAction(Long agentId, ClusterTaskActionEnum actionEnum, String hostname) {
|
||||
if (!ValidateUtils.isBlank(hostname)) {
|
||||
return actionHostTaskExceptRollbackAction(agentId, action, hostname);
|
||||
return actionHostTaskExceptRollbackAction(agentId, actionEnum, hostname);
|
||||
}
|
||||
if (abstractAgent.actionTask(agentId, action)) {
|
||||
return ResultStatus.SUCCESS;
|
||||
}
|
||||
return ResultStatus.OPERATION_FAILED;
|
||||
return abstractAgent.actionTask(agentId, actionEnum)? ResultStatus.SUCCESS: ResultStatus.OPERATION_FAILED;
|
||||
}
|
||||
|
||||
private ResultStatus actionHostTaskExceptRollbackAction(Long agentId, String action, String hostname) {
|
||||
if (abstractAgent.actionHostTask(agentId, action, hostname)) {
|
||||
return ResultStatus.SUCCESS;
|
||||
}
|
||||
return ResultStatus.OPERATION_FAILED;
|
||||
private ResultStatus actionHostTaskExceptRollbackAction(Long agentId, ClusterTaskActionEnum actionEnum, String hostname) {
|
||||
return abstractAgent.actionHostTask(agentId, actionEnum, hostname)? ResultStatus.SUCCESS: ResultStatus.OPERATION_FAILED;
|
||||
}
|
||||
|
||||
private ResultStatus actionTaskRollback(ClusterTaskDO clusterTaskDO) {
|
||||
@@ -133,9 +133,9 @@ public class ClusterTaskServiceImpl implements ClusterTaskService {
|
||||
return ResultStatus.OPERATION_FORBIDDEN;
|
||||
}
|
||||
|
||||
Map<String, ClusterTaskSubStateEnum> subStatusEnumMap =
|
||||
Result<Map<String, ClusterTaskSubStateEnum>> subStatusEnumMapResult =
|
||||
abstractAgent.getTaskResult(clusterTaskDO.getAgentTaskId());
|
||||
if (ValidateUtils.isNull(subStatusEnumMap)) {
|
||||
if (ValidateUtils.isNull(subStatusEnumMapResult) || subStatusEnumMapResult.failed()) {
|
||||
return ResultStatus.CALL_CLUSTER_TASK_AGENT_FAILED;
|
||||
}
|
||||
|
||||
@@ -143,7 +143,7 @@ public class ClusterTaskServiceImpl implements ClusterTaskService {
|
||||
List<String> rollbackHostList = new ArrayList<>();
|
||||
List<String> rollbackPauseHostList = new ArrayList<>();
|
||||
for (String host: ListUtils.string2StrList(clusterTaskDO.getHostList())) {
|
||||
ClusterTaskSubStateEnum subStateEnum = subStatusEnumMap.get(host);
|
||||
ClusterTaskSubStateEnum subStateEnum = subStatusEnumMapResult.getData().get(host);
|
||||
if (ValidateUtils.isNull(subStateEnum)) {
|
||||
// 机器对应的任务查询失败
|
||||
return ResultStatus.OPERATION_FAILED;
|
||||
@@ -166,17 +166,17 @@ public class ClusterTaskServiceImpl implements ClusterTaskService {
|
||||
clusterTaskDO.setRollbackPauseHostList(ListUtils.strList2String(rollbackPauseHostList));
|
||||
|
||||
// 创建任务
|
||||
Long agentTaskId = abstractAgent.createTask(Converters.convert2CreationTaskData(clusterTaskDO));
|
||||
if (ValidateUtils.isNull(agentTaskId)) {
|
||||
Result<Long> createResult = abstractAgent.createTask(Converters.convert2CreationTaskData(clusterTaskDO));
|
||||
if (ValidateUtils.isNull(createResult) || createResult.failed()) {
|
||||
return ResultStatus.CALL_CLUSTER_TASK_AGENT_FAILED;
|
||||
}
|
||||
|
||||
try {
|
||||
clusterTaskDO.setAgentRollbackTaskId(agentTaskId);
|
||||
clusterTaskDO.setAgentRollbackTaskId(createResult.getData());
|
||||
if (clusterTaskDao.updateRollback(clusterTaskDO) <= 0) {
|
||||
return ResultStatus.MYSQL_ERROR;
|
||||
}
|
||||
abstractAgent.actionTask(clusterTaskDO.getAgentTaskId(), ClusterTaskActionEnum.CANCEL.getMessage());
|
||||
abstractAgent.actionTask(clusterTaskDO.getAgentTaskId(), ClusterTaskActionEnum.CANCEL);
|
||||
return ResultStatus.SUCCESS;
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("create cluster task failed, clusterTaskDO:{}.", clusterTaskDO, e);
|
||||
@@ -191,11 +191,11 @@ public class ClusterTaskServiceImpl implements ClusterTaskService {
|
||||
return Result.buildFrom(ResultStatus.TASK_NOT_EXIST);
|
||||
}
|
||||
|
||||
String stdoutLog = abstractAgent.getTaskLog(getActiveAgentTaskId(clusterTaskDO, hostname), hostname);
|
||||
if (ValidateUtils.isNull(stdoutLog)) {
|
||||
Result<ClusterTaskLog> stdoutLogResult = abstractAgent.getTaskLog(getActiveAgentTaskId(clusterTaskDO, hostname), hostname);
|
||||
if (ValidateUtils.isNull(stdoutLogResult) || stdoutLogResult.failed()) {
|
||||
return Result.buildFrom(ResultStatus.CALL_CLUSTER_TASK_AGENT_FAILED);
|
||||
}
|
||||
return new Result<>(stdoutLog);
|
||||
return new Result<>(stdoutLogResult.getData().getStdout());
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -205,24 +205,33 @@ public class ClusterTaskServiceImpl implements ClusterTaskService {
|
||||
return Result.buildFrom(ResultStatus.TASK_NOT_EXIST);
|
||||
}
|
||||
|
||||
Result<ClusterTaskStateEnum> statusEnumResult = abstractAgent.getTaskExecuteState(getActiveAgentTaskId(clusterTaskDO));
|
||||
if (ValidateUtils.isNull(statusEnumResult) || statusEnumResult.failed()) {
|
||||
return new Result<>(statusEnumResult.getCode(), statusEnumResult.getMessage());
|
||||
}
|
||||
|
||||
return new Result<>(new ClusterTaskStatus(
|
||||
clusterTaskDO.getId(),
|
||||
clusterTaskDO.getClusterId(),
|
||||
inRollback(clusterTaskDO),
|
||||
abstractAgent.getTaskState(getActiveAgentTaskId(clusterTaskDO)),
|
||||
statusEnumResult.getData(),
|
||||
getTaskSubStatus(clusterTaskDO)
|
||||
));
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterTaskStateEnum getTaskState(Long agentTaskId) {
|
||||
return abstractAgent.getTaskState(agentTaskId);
|
||||
Result<ClusterTaskStateEnum> statusEnumResult = abstractAgent.getTaskExecuteState(agentTaskId);
|
||||
if (ValidateUtils.isNull(statusEnumResult) || statusEnumResult.failed()) {
|
||||
return null;
|
||||
}
|
||||
return statusEnumResult.getData();
|
||||
}
|
||||
|
||||
private List<ClusterTaskSubStatus> getTaskSubStatus(ClusterTaskDO clusterTaskDO) {
|
||||
Map<String, ClusterTaskSubStateEnum> statusMap = this.getClusterTaskSubState(clusterTaskDO);
|
||||
if (ValidateUtils.isNull(statusMap)) {
|
||||
return null;
|
||||
return Collections.emptyList();
|
||||
}
|
||||
List<String> pauseList = ListUtils.string2StrList(clusterTaskDO.getPauseHostList());
|
||||
|
||||
@@ -242,20 +251,22 @@ public class ClusterTaskServiceImpl implements ClusterTaskService {
|
||||
}
|
||||
|
||||
private Map<String, ClusterTaskSubStateEnum> getClusterTaskSubState(ClusterTaskDO clusterTaskDO) {
|
||||
Map<String, ClusterTaskSubStateEnum> statusMap = abstractAgent.getTaskResult(clusterTaskDO.getAgentTaskId());
|
||||
if (ValidateUtils.isNull(statusMap)) {
|
||||
Result<Map<String, ClusterTaskSubStateEnum>> statusMapResult = abstractAgent.getTaskResult(clusterTaskDO.getAgentTaskId());
|
||||
if (ValidateUtils.isNull(statusMapResult) || statusMapResult.failed()) {
|
||||
return null;
|
||||
}
|
||||
Map<String, ClusterTaskSubStateEnum> statusMap = statusMapResult.getData();
|
||||
if (!inRollback(clusterTaskDO)) {
|
||||
return statusMap;
|
||||
}
|
||||
|
||||
Map<String, ClusterTaskSubStateEnum> rollbackStatusMap =
|
||||
Result<Map<String, ClusterTaskSubStateEnum>> rollbackStatusMapResult =
|
||||
abstractAgent.getTaskResult(clusterTaskDO.getAgentRollbackTaskId());
|
||||
if (ValidateUtils.isNull(rollbackStatusMap)) {
|
||||
if (ValidateUtils.isNull(rollbackStatusMapResult) || rollbackStatusMapResult.failed()) {
|
||||
return null;
|
||||
}
|
||||
statusMap.putAll(rollbackStatusMap);
|
||||
|
||||
statusMap.putAll(rollbackStatusMapResult.getData());
|
||||
return statusMap;
|
||||
}
|
||||
|
||||
@@ -276,7 +287,7 @@ public class ClusterTaskServiceImpl implements ClusterTaskService {
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("get all cluster task failed.");
|
||||
}
|
||||
return null;
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -302,9 +313,6 @@ public class ClusterTaskServiceImpl implements ClusterTaskService {
|
||||
}
|
||||
|
||||
private boolean inRollback(ClusterTaskDO clusterTaskDO) {
|
||||
if (ClusterTaskConstant.INVALID_AGENT_TASK_ID.equals(clusterTaskDO.getAgentRollbackTaskId())) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
return !ClusterTaskConstant.INVALID_AGENT_TASK_ID.equals(clusterTaskDO.getAgentRollbackTaskId());
|
||||
}
|
||||
}
|
||||
@@ -4,12 +4,12 @@ import com.xiaojukeji.kafka.manager.common.bizenum.KafkaFileEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.normal.KafkaFileDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.KafkaFileDO;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.CopyUtils;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.kafka.manager.dao.KafkaFileDao;
|
||||
import com.xiaojukeji.kafka.manager.kcm.KafkaFileService;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.KafkaFileDO;
|
||||
import com.xiaojukeji.kafka.manager.kcm.component.storage.AbstractStorageService;
|
||||
import com.xiaojukeji.kafka.manager.kcm.KafkaFileService;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
|
||||
@@ -5,6 +5,8 @@ import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.kafka.manager.monitor.common.entry.*;
|
||||
import com.xiaojukeji.kafka.manager.monitor.component.n9e.entry.*;
|
||||
import com.xiaojukeji.kafka.manager.monitor.component.n9e.entry.bizenum.CategoryEnum;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
@@ -13,6 +15,8 @@ import java.util.*;
|
||||
* @date 20/8/26
|
||||
*/
|
||||
public class N9eConverter {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(N9eConverter.class);
|
||||
|
||||
public static List<N9eMetricSinkPoint> convert2N9eMetricSinkPointList(String nid, List<MetricSinkPoint> pointList) {
|
||||
if (pointList == null || pointList.isEmpty()) {
|
||||
return new ArrayList<>();
|
||||
@@ -98,8 +102,8 @@ public class N9eConverter {
|
||||
|
||||
n9eStrategy.setNotify_user(new ArrayList<>());
|
||||
n9eStrategy.setCallback(strategyAction.getCallback());
|
||||
n9eStrategy.setEnable_stime("00:00");
|
||||
n9eStrategy.setEnable_etime("23:59");
|
||||
n9eStrategy.setEnable_stime(String.format("%02d:00", ListUtils.string2IntList(strategy.getPeriodHoursOfDay()).stream().distinct().min((e1, e2) -> e1.compareTo(e2)).get()));
|
||||
n9eStrategy.setEnable_etime(String.format("%02d:59", ListUtils.string2IntList(strategy.getPeriodHoursOfDay()).stream().distinct().max((e1, e2) -> e1.compareTo(e2)).get()));
|
||||
n9eStrategy.setEnable_days_of_week(ListUtils.string2IntList(strategy.getPeriodDaysOfWeek()));
|
||||
|
||||
n9eStrategy.setNeed_upgrade(0);
|
||||
@@ -120,6 +124,15 @@ public class N9eConverter {
|
||||
return strategyList;
|
||||
}
|
||||
|
||||
private static Integer getEnableHour(String enableTime) {
|
||||
try {
|
||||
return Integer.valueOf(enableTime.split(":")[0]);
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("class=N9eConverter||method=getEnableHour||enableTime={}||errMsg={}", enableTime, e.getMessage());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public static Strategy convert2Strategy(N9eStrategy n9eStrategy, Map<String, NotifyGroup> notifyGroupMap) {
|
||||
if (n9eStrategy == null) {
|
||||
return null;
|
||||
@@ -137,7 +150,16 @@ public class N9eConverter {
|
||||
strategy.setId(n9eStrategy.getId().longValue());
|
||||
strategy.setName(n9eStrategy.getName());
|
||||
strategy.setPriority(n9eStrategy.getPriority());
|
||||
strategy.setPeriodHoursOfDay("0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23");
|
||||
|
||||
List<Integer> hourList = new ArrayList<>();
|
||||
Integer startHour = N9eConverter.getEnableHour(n9eStrategy.getEnable_stime());
|
||||
Integer endHour = N9eConverter.getEnableHour(n9eStrategy.getEnable_etime());
|
||||
if (!(ValidateUtils.isNullOrLessThanZero(startHour) || ValidateUtils.isNullOrLessThanZero(endHour) || endHour < startHour)) {
|
||||
for (Integer hour = startHour; hour <= endHour; ++hour) {
|
||||
hourList.add(hour);
|
||||
}
|
||||
}
|
||||
strategy.setPeriodHoursOfDay(ListUtils.intList2String(hourList));
|
||||
strategy.setPeriodDaysOfWeek(ListUtils.intList2String(n9eStrategy.getEnable_days_of_week()));
|
||||
|
||||
List<StrategyExpression> strategyExpressionList = new ArrayList<>();
|
||||
|
||||
@@ -1,14 +1,7 @@
|
||||
package com.xiaojukeji.kafka.manager.notify;
|
||||
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ao.account.Account;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.OrderDO;
|
||||
import com.xiaojukeji.kafka.manager.common.events.OrderApplyEvent;
|
||||
import com.xiaojukeji.kafka.manager.notify.common.NotifyConstant;
|
||||
import com.xiaojukeji.kafka.manager.notify.notifyer.AbstractNotifyService;
|
||||
import com.xiaojukeji.kafka.manager.notify.common.OrderNotifyTemplate;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.context.ApplicationListener;
|
||||
import org.springframework.scheduling.annotation.Async;
|
||||
import org.springframework.stereotype.Service;
|
||||
@@ -19,27 +12,10 @@ import org.springframework.stereotype.Service;
|
||||
*/
|
||||
@Service("orderApplyNotifyService")
|
||||
public class OrderApplyNotifyService implements ApplicationListener<OrderApplyEvent> {
|
||||
@Autowired
|
||||
private AbstractNotifyService notifyService;
|
||||
|
||||
@Value("${notify.order.detail-url}")
|
||||
private String orderDetailUrl;
|
||||
|
||||
@Async
|
||||
@Override
|
||||
public void onApplicationEvent(OrderApplyEvent orderApplyEvent) {
|
||||
OrderDO orderDO = orderApplyEvent.getOrderDO();
|
||||
String detailUrl = String.format(orderDetailUrl, orderDO.getId(), orderApplyEvent.getIdc());
|
||||
for (Account account : NotifyConstant.accountList) {
|
||||
notifyService.sendMsg(account.getUsername(),
|
||||
OrderNotifyTemplate.getNotify2OrderHandlerMessage(
|
||||
account.getChineseName(),
|
||||
orderDO.getApplicant(),
|
||||
orderDO.getTitle(),
|
||||
detailUrl
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// todo 工单通知
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
package com.xiaojukeji.kafka.manager.notify.common;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.bizenum.AccountRoleEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ao.account.Account;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/8/27
|
||||
*/
|
||||
public class NotifyConstant {
|
||||
|
||||
public static final List<Account> accountList = Arrays.asList(
|
||||
new Account("xuzhengxi", "徐正熙", "", AccountRoleEnum.OP)
|
||||
);
|
||||
}
|
||||
@@ -125,7 +125,7 @@ public class SyncTopic2DB extends AbstractScheduledTask<EmptyEntry> {
|
||||
|
||||
if (ValidateUtils.isNull(syncTopic2DBConfig.isAddAuthority()) || !syncTopic2DBConfig.isAddAuthority()) {
|
||||
// 不增加权限信息, 则直接忽略
|
||||
return;
|
||||
continue;
|
||||
}
|
||||
|
||||
// TODO 当前添加 Topic 和 添加 Authority 是非事务的, 中间出现异常之后, 会导致数据错误, 后续还需要优化一下
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
package com.xiaojukeji.kafka.manager.task.schedule.metadata;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.KafkaClientPool;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
|
||||
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.scheduling.annotation.Scheduled;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
@@ -25,24 +27,63 @@ public class FlushClusterMetadata {
|
||||
|
||||
@Scheduled(cron="0/30 * * * * ?")
|
||||
public void flush() {
|
||||
List<ClusterDO> doList = clusterService.list();
|
||||
Map<Long, ClusterDO> dbClusterMap = clusterService.list().stream().collect(Collectors.toMap(ClusterDO::getId, Function.identity(), (key1, key2) -> key2));
|
||||
|
||||
Set<Long> newClusterIdSet = new HashSet<>();
|
||||
Set<Long> oldClusterIdSet = physicalClusterMetadataManager.getClusterIdSet();
|
||||
for (ClusterDO clusterDO: doList) {
|
||||
newClusterIdSet.add(clusterDO.getId());
|
||||
Map<Long, ClusterDO> cacheClusterMap = PhysicalClusterMetadataManager.getClusterMap();
|
||||
|
||||
// 添加集群
|
||||
physicalClusterMetadataManager.addNew(clusterDO);
|
||||
}
|
||||
// 新增的集群
|
||||
for (ClusterDO clusterDO: dbClusterMap.values()) {
|
||||
if (cacheClusterMap.containsKey(clusterDO.getId())) {
|
||||
// 已经存在
|
||||
continue;
|
||||
}
|
||||
add(clusterDO);
|
||||
}
|
||||
|
||||
for (Long clusterId: oldClusterIdSet) {
|
||||
if (newClusterIdSet.contains(clusterId)) {
|
||||
continue;
|
||||
}
|
||||
// 移除的集群
|
||||
for (ClusterDO clusterDO: cacheClusterMap.values()) {
|
||||
if (dbClusterMap.containsKey(clusterDO.getId())) {
|
||||
// 已经存在
|
||||
continue;
|
||||
}
|
||||
remove(clusterDO.getId());
|
||||
}
|
||||
|
||||
// 移除集群
|
||||
physicalClusterMetadataManager.remove(clusterId);
|
||||
}
|
||||
// 被修改配置的集群
|
||||
for (ClusterDO dbClusterDO: dbClusterMap.values()) {
|
||||
ClusterDO cacheClusterDO = cacheClusterMap.get(dbClusterDO.getId());
|
||||
if (ValidateUtils.anyNull(cacheClusterDO) || dbClusterDO.equals(cacheClusterDO)) {
|
||||
// 不存在 || 相等
|
||||
continue;
|
||||
}
|
||||
modifyConfig(dbClusterDO);
|
||||
}
|
||||
}
|
||||
|
||||
private void add(ClusterDO clusterDO) {
|
||||
if (ValidateUtils.anyNull(clusterDO)) {
|
||||
return;
|
||||
}
|
||||
physicalClusterMetadataManager.addNew(clusterDO);
|
||||
}
|
||||
|
||||
private void modifyConfig(ClusterDO clusterDO) {
|
||||
if (ValidateUtils.anyNull(clusterDO)) {
|
||||
return;
|
||||
}
|
||||
PhysicalClusterMetadataManager.updateClusterMap(clusterDO);
|
||||
KafkaClientPool.closeKafkaConsumerPool(clusterDO.getId());
|
||||
}
|
||||
|
||||
private void remove(Long clusterId) {
|
||||
if (ValidateUtils.anyNull(clusterId)) {
|
||||
return;
|
||||
}
|
||||
// 移除缓存信息
|
||||
physicalClusterMetadataManager.remove(clusterId);
|
||||
|
||||
// 清除客户端池子
|
||||
KafkaClientPool.closeKafkaConsumerPool(clusterId);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
package com.xiaojukeji.kafka.manager.web.api;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.ApiPrefix;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import io.swagger.annotations.Api;
|
||||
import io.swagger.annotations.ApiOperation;
|
||||
@@ -14,9 +15,9 @@ import springfox.documentation.annotations.ApiIgnore;
|
||||
* @date 20/6/18
|
||||
*/
|
||||
@ApiIgnore
|
||||
@Api(description = "web应用探活接口(REST)")
|
||||
@Api(tags = "web应用探活接口(REST)")
|
||||
@RestController
|
||||
@RequestMapping("api/")
|
||||
@RequestMapping(ApiPrefix.API_V1_THIRD_PART_PREFIX)
|
||||
public class HealthController {
|
||||
|
||||
@ApiIgnore
|
||||
|
||||
@@ -50,7 +50,7 @@ public class GatewayHeartbeatController {
|
||||
doList = JsonUtils.parseTopicConnections(clusterId, jsonObject, System.currentTimeMillis());
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("class=GatewayHeartbeatController||method=receiveTopicConnections||clusterId={}||brokerId={}||msg=parse data failed||exception={}", clusterId, brokerId, e.getMessage());
|
||||
return Result.buildFailure("fail");
|
||||
return Result.buildGatewayFailure("fail");
|
||||
}
|
||||
|
||||
topicConnectionService.batchAdd(doList);
|
||||
|
||||
@@ -31,7 +31,6 @@ import java.util.Map;
|
||||
@RestController
|
||||
@RequestMapping(ApiPrefix.GATEWAY_API_V1_PREFIX)
|
||||
public class GatewayServiceDiscoveryController {
|
||||
|
||||
private final static Logger LOGGER = LoggerFactory.getLogger(GatewayHeartbeatController.class);
|
||||
|
||||
@Autowired
|
||||
@@ -65,7 +64,7 @@ public class GatewayServiceDiscoveryController {
|
||||
KafkaBootstrapServerConfig config =
|
||||
gatewayConfigService.getKafkaBootstrapServersConfig(Long.MIN_VALUE);
|
||||
if (ValidateUtils.isNull(config) || ValidateUtils.isNull(config.getClusterIdBootstrapServersMap())) {
|
||||
return Result.buildFailure("call init kafka bootstrap servers failed");
|
||||
return Result.buildGatewayFailure("call init kafka bootstrap servers failed");
|
||||
}
|
||||
if (ValidateUtils.isEmptyMap(config.getClusterIdBootstrapServersMap())) {
|
||||
return Result.buildSuc();
|
||||
@@ -81,7 +80,7 @@ public class GatewayServiceDiscoveryController {
|
||||
KafkaBootstrapServerConfig config =
|
||||
gatewayConfigService.getKafkaBootstrapServersConfig(versionNumber);
|
||||
if (ValidateUtils.isNull(config) || ValidateUtils.isNull(config.getClusterIdBootstrapServersMap())) {
|
||||
return Result.buildFailure("call update kafka bootstrap servers failed");
|
||||
return Result.buildGatewayFailure("call update kafka bootstrap servers failed");
|
||||
}
|
||||
if (ValidateUtils.isEmptyMap(config.getClusterIdBootstrapServersMap())) {
|
||||
return Result.buildSuc();
|
||||
@@ -99,7 +98,7 @@ public class GatewayServiceDiscoveryController {
|
||||
public Result<String> getMaxRequestNum(@RequestParam("versionNumber") long versionNumber) {
|
||||
RequestQueueConfig config = gatewayConfigService.getRequestQueueConfig(versionNumber);
|
||||
if (ValidateUtils.isNull(config)) {
|
||||
return Result.buildFailure("call get request queue size config failed");
|
||||
return Result.buildGatewayFailure("call get request queue size config failed");
|
||||
}
|
||||
if (ValidateUtils.isNull(config.getMaxRequestQueueSize())) {
|
||||
return Result.buildSuc();
|
||||
@@ -119,7 +118,7 @@ public class GatewayServiceDiscoveryController {
|
||||
public Result<String> getAppIdRate(@RequestParam("versionNumber") long versionNumber) {
|
||||
AppRateConfig config = gatewayConfigService.getAppRateConfig(versionNumber);
|
||||
if (ValidateUtils.isNull(config)) {
|
||||
return Result.buildFailure("call get app rate config failed");
|
||||
return Result.buildGatewayFailure("call get app rate config failed");
|
||||
}
|
||||
if (ValidateUtils.isNull(config.getAppRateLimit())) {
|
||||
return Result.buildSuc();
|
||||
@@ -139,7 +138,7 @@ public class GatewayServiceDiscoveryController {
|
||||
public Result getIpRate(@RequestParam("versionNumber") long versionNumber) {
|
||||
IpRateConfig config = gatewayConfigService.getIpRateConfig(versionNumber);
|
||||
if (ValidateUtils.isNull(config)) {
|
||||
return Result.buildFailure("call get ip rate config failed");
|
||||
return Result.buildGatewayFailure("call get ip rate config failed");
|
||||
}
|
||||
if (ValidateUtils.isNull(config.getIpRateLimit())) {
|
||||
return Result.buildSuc();
|
||||
@@ -160,7 +159,7 @@ public class GatewayServiceDiscoveryController {
|
||||
SpRateConfig config =
|
||||
gatewayConfigService.getSpRateConfig(versionNumber);
|
||||
if (ValidateUtils.isNull(config) || ValidateUtils.isNull(config.getSpRateMap())) {
|
||||
return Result.buildFailure("call update kafka bootstrap servers failed");
|
||||
return Result.buildGatewayFailure("call update kafka bootstrap servers failed");
|
||||
}
|
||||
if (ValidateUtils.isEmptyMap(config.getSpRateMap())) {
|
||||
return Result.buildSuc();
|
||||
|
||||
@@ -9,7 +9,6 @@ import com.xiaojukeji.kafka.manager.common.entity.vo.common.AccountSummaryVO;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.SpringTool;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.ApiPrefix;
|
||||
import com.xiaojukeji.kafka.manager.web.api.versionone.gateway.GatewayHeartbeatController;
|
||||
import io.swagger.annotations.Api;
|
||||
import io.swagger.annotations.ApiOperation;
|
||||
import org.slf4j.Logger;
|
||||
@@ -62,4 +61,4 @@ public class NormalAccountController {
|
||||
AccountRoleEnum accountRoleEnum = accountService.getAccountRoleFromCache(username);
|
||||
return new Result<>(new AccountRoleVO(username, accountRoleEnum.getRole()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package com.xiaojukeji.kafka.manager.web.api.versionone.op;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.op.ControllerPreferredCandidateDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.rd.ClusterDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
|
||||
@@ -13,6 +14,7 @@ import io.swagger.annotations.ApiOperation;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.web.bind.annotation.*;
|
||||
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/4/23
|
||||
@@ -25,48 +27,56 @@ public class OpClusterController {
|
||||
private ClusterService clusterService;
|
||||
|
||||
@ApiOperation(value = "接入集群")
|
||||
@RequestMapping(value = "clusters", method = RequestMethod.POST)
|
||||
@PostMapping(value = "clusters")
|
||||
@ResponseBody
|
||||
public Result addNew(@RequestBody ClusterDTO dto) {
|
||||
if (ValidateUtils.isNull(dto) || !dto.legal()) {
|
||||
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
|
||||
}
|
||||
return Result.buildFrom(
|
||||
clusterService.addNew(
|
||||
ClusterModelConverter.convert2ClusterDO(dto),
|
||||
SpringTool.getUserName()
|
||||
)
|
||||
clusterService.addNew(ClusterModelConverter.convert2ClusterDO(dto), SpringTool.getUserName())
|
||||
);
|
||||
}
|
||||
|
||||
@ApiOperation(value = "删除集群")
|
||||
@RequestMapping(value = "clusters", method = RequestMethod.DELETE)
|
||||
@DeleteMapping(value = "clusters")
|
||||
@ResponseBody
|
||||
public Result delete(@RequestParam(value = "clusterId") Long clusterId) {
|
||||
return Result.buildFrom(clusterService.deleteById(clusterId, SpringTool.getUserName()));
|
||||
}
|
||||
|
||||
@ApiOperation(value = "修改集群信息")
|
||||
@RequestMapping(value = "clusters", method = RequestMethod.PUT)
|
||||
@PutMapping(value = "clusters")
|
||||
@ResponseBody
|
||||
public Result modify(@RequestBody ClusterDTO reqObj) {
|
||||
if (ValidateUtils.isNull(reqObj) || !reqObj.legal() || ValidateUtils.isNull(reqObj.getClusterId())) {
|
||||
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
|
||||
}
|
||||
ResultStatus rs = clusterService.updateById(
|
||||
ClusterModelConverter.convert2ClusterDO(reqObj),
|
||||
SpringTool.getUserName()
|
||||
return Result.buildFrom(
|
||||
clusterService.updateById(ClusterModelConverter.convert2ClusterDO(reqObj), SpringTool.getUserName())
|
||||
);
|
||||
return Result.buildFrom(rs);
|
||||
}
|
||||
|
||||
@ApiOperation(value = "开启|关闭集群监控")
|
||||
@RequestMapping(value = "clusters/{clusterId}/monitor", method = RequestMethod.PUT)
|
||||
@PutMapping(value = "clusters/{clusterId}/monitor")
|
||||
@ResponseBody
|
||||
public Result modifyStatus(@PathVariable Long clusterId,
|
||||
@RequestParam("status") Integer status) {
|
||||
public Result modifyStatus(@PathVariable Long clusterId, @RequestParam("status") Integer status) {
|
||||
return Result.buildFrom(
|
||||
clusterService.modifyStatus(clusterId, status, SpringTool.getUserName())
|
||||
);
|
||||
}
|
||||
|
||||
@ApiOperation(value = "增加Controller优先候选的Broker", notes = "滴滴内部引擎特性")
|
||||
@PostMapping(value = "cluster-controller/preferred-candidates")
|
||||
@ResponseBody
|
||||
public Result addControllerPreferredCandidates(@RequestBody ControllerPreferredCandidateDTO dto) {
|
||||
return clusterService.addControllerPreferredCandidates(dto.getClusterId(), dto.getBrokerIdList());
|
||||
}
|
||||
|
||||
@ApiOperation(value = "删除Controller优先候选的Broker", notes = "滴滴内部引擎特性")
|
||||
@DeleteMapping(value = "cluster-controller/preferred-candidates")
|
||||
@ResponseBody
|
||||
public Result deleteControllerPreferredCandidates(@RequestBody ControllerPreferredCandidateDTO dto) {
|
||||
return clusterService.deleteControllerPreferredCandidates(dto.getClusterId(), dto.getBrokerIdList());
|
||||
}
|
||||
}
|
||||
@@ -3,8 +3,11 @@ package com.xiaojukeji.kafka.manager.web.api.versionone.op;
|
||||
import com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway.OrderExtensionAddGatewayConfigDTO;
|
||||
import com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway.OrderExtensionDeleteGatewayConfigDTO;
|
||||
import com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway.OrderExtensionModifyGatewayConfigDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.bizenum.gateway.GatewayConfigKeyEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.ApiPrefix;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.kafka.manager.service.service.gateway.GatewayConfigService;
|
||||
import com.xiaojukeji.kafka.manager.web.converters.GatewayModelConverter;
|
||||
@@ -16,12 +19,20 @@ import org.springframework.web.bind.annotation.*;
|
||||
|
||||
@Api(tags = "OP-Gateway配置相关接口(REST)")
|
||||
@RestController
|
||||
@RequestMapping(ApiPrefix.API_V1_OP_PREFIX)
|
||||
public class OpGatewayConfigController {
|
||||
@Autowired
|
||||
private GatewayConfigService gatewayConfigService;
|
||||
|
||||
@ApiOperation(value = "Gateway配置类型", notes = "")
|
||||
@GetMapping(value = "gateway-configs/type-enums")
|
||||
@ResponseBody
|
||||
public Result getClusterModesEnum() {
|
||||
return new Result<>(JsonUtils.toJson(GatewayConfigKeyEnum.class));
|
||||
}
|
||||
|
||||
@ApiOperation(value = "创建Gateway配置", notes = "")
|
||||
@RequestMapping(value = "gateway-configs", method = RequestMethod.POST)
|
||||
@PostMapping(value = "gateway-configs")
|
||||
@ResponseBody
|
||||
public Result createGatewayConfig(@RequestBody OrderExtensionAddGatewayConfigDTO dto) {
|
||||
if (ValidateUtils.isNull(dto) || !dto.legal()) {
|
||||
@@ -31,7 +42,7 @@ public class OpGatewayConfigController {
|
||||
}
|
||||
|
||||
@ApiOperation(value = "修改Gateway配置", notes = "")
|
||||
@RequestMapping(value = "gateway-configs", method = RequestMethod.PUT)
|
||||
@PutMapping(value = "gateway-configs")
|
||||
@ResponseBody
|
||||
public Result modifyGatewayConfig(@RequestBody OrderExtensionModifyGatewayConfigDTO dto) {
|
||||
if (ValidateUtils.isNull(dto) || !dto.legal()) {
|
||||
@@ -41,7 +52,7 @@ public class OpGatewayConfigController {
|
||||
}
|
||||
|
||||
@ApiOperation(value = "删除Gateway配置", notes = "")
|
||||
@RequestMapping(value = "gateway-configs", method = RequestMethod.DELETE)
|
||||
@DeleteMapping(value = "gateway-configs")
|
||||
@ResponseBody
|
||||
public Result deleteGatewayConfig(@RequestBody OrderExtensionDeleteGatewayConfigDTO dto) {
|
||||
if (ValidateUtils.isNull(dto) || !dto.legal()) {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package com.xiaojukeji.kafka.manager.web.api.versionone.rd;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.ApiPrefix;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.GatewayConfigDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.vo.rd.GatewayConfigVO;
|
||||
@@ -15,12 +16,13 @@ import java.util.List;
|
||||
|
||||
@Api(tags = "RD-Gateway配置相关接口(REST)")
|
||||
@RestController
|
||||
@RequestMapping(ApiPrefix.API_V1_RD_PREFIX)
|
||||
public class RdGatewayConfigController {
|
||||
@Autowired
|
||||
private GatewayConfigService gatewayConfigService;
|
||||
|
||||
@ApiOperation(value = "Gateway相关配置信息", notes = "")
|
||||
@RequestMapping(value = "gateway-configs", method = RequestMethod.GET)
|
||||
@GetMapping(value = "gateway-configs")
|
||||
@ResponseBody
|
||||
public Result<List<GatewayConfigVO>> getGatewayConfigs() {
|
||||
List<GatewayConfigDO> doList = gatewayConfigService.list();
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
package com.xiaojukeji.kafka.manager.web.api.versionone.rd;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.bizenum.KafkaFileEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.ApiPrefix;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.normal.KafkaFileDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.KafkaFileDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.vo.rd.KafkaFileVO;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.kafka.manager.kcm.component.storage.common.StorageEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.KafkaFileDO;
|
||||
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
|
||||
import com.xiaojukeji.kafka.manager.kcm.KafkaFileService;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.SpringTool;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.kafka.manager.kcm.KafkaFileService;
|
||||
import com.xiaojukeji.kafka.manager.kcm.component.storage.common.StorageEnum;
|
||||
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.ApiPrefix;
|
||||
import com.xiaojukeji.kafka.manager.web.converters.KafkaFileConverter;
|
||||
import io.swagger.annotations.Api;
|
||||
import io.swagger.annotations.ApiOperation;
|
||||
|
||||
@@ -24,14 +24,13 @@ import java.util.List;
|
||||
@RestController
|
||||
@RequestMapping(ApiPrefix.API_V1_RD_PREFIX)
|
||||
public class RdOperateRecordController {
|
||||
|
||||
private static final int MAX_RECORD_COUNT = 200;
|
||||
|
||||
@Autowired
|
||||
private OperateRecordService operateRecordService;
|
||||
|
||||
@ApiOperation(value = "查询操作记录", notes = "")
|
||||
@RequestMapping(value = "operate-record", method = RequestMethod.POST)
|
||||
@PostMapping(value = "operate-record")
|
||||
@ResponseBody
|
||||
public Result<List<OperateRecordVO>> geOperateRecords(@RequestBody OperateRecordDTO dto) {
|
||||
if (ValidateUtils.isNull(dto) || !dto.legal()) {
|
||||
|
||||
@@ -7,7 +7,6 @@ import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.BrokerMetadata;
|
||||
import com.xiaojukeji.kafka.manager.openapi.common.vo.ThirdPartBrokerOverviewVO;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
|
||||
import com.xiaojukeji.kafka.manager.service.service.BrokerService;
|
||||
import io.swagger.annotations.Api;
|
||||
@@ -52,4 +51,4 @@ public class ThirdPartClusterController {
|
||||
|
||||
return new Result<>(underReplicated.equals(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,6 +67,7 @@ public class GatewayModelConverter {
|
||||
vo.setName(configDO.getName());
|
||||
vo.setValue(configDO.getValue());
|
||||
vo.setVersion(configDO.getVersion());
|
||||
vo.setDescription(configDO.getDescription());
|
||||
vo.setCreateTime(configDO.getCreateTime());
|
||||
vo.setModifyTime(configDO.getModifyTime());
|
||||
voList.add(vo);
|
||||
@@ -76,18 +77,20 @@ public class GatewayModelConverter {
|
||||
|
||||
public static GatewayConfigDO convert2GatewayConfigDO(OrderExtensionAddGatewayConfigDTO configDTO) {
|
||||
GatewayConfigDO configDO = new GatewayConfigDO();
|
||||
configDO.setType(configDO.getType());
|
||||
configDO.setName(configDO.getName());
|
||||
configDO.setValue(configDO.getValue());
|
||||
configDO.setType(configDTO.getType());
|
||||
configDO.setName(configDTO.getName());
|
||||
configDO.setValue(configDTO.getValue());
|
||||
configDO.setDescription(ValidateUtils.isNull(configDTO.getDescription())? "": configDTO.getDescription());
|
||||
return configDO;
|
||||
}
|
||||
|
||||
public static GatewayConfigDO convert2GatewayConfigDO(OrderExtensionModifyGatewayConfigDTO configDTO) {
|
||||
GatewayConfigDO configDO = new GatewayConfigDO();
|
||||
configDO.setId(configDO.getId());
|
||||
configDO.setType(configDO.getType());
|
||||
configDO.setName(configDO.getName());
|
||||
configDO.setValue(configDO.getValue());
|
||||
configDO.setId(configDTO.getId());
|
||||
configDO.setType(configDTO.getType());
|
||||
configDO.setName(configDTO.getName());
|
||||
configDO.setValue(configDTO.getValue());
|
||||
configDO.setDescription(ValidateUtils.isNull(configDTO.getDescription())? "": configDTO.getDescription());
|
||||
return configDO;
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,13 @@
|
||||
package com.xiaojukeji.kafka.manager.web.inteceptor;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.account.LoginService;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.method.HandlerMethod;
|
||||
import org.springframework.web.servlet.HandlerInterceptor;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
@@ -15,6 +20,8 @@ import javax.servlet.http.HttpServletResponse;
|
||||
*/
|
||||
@Component
|
||||
public class PermissionInterceptor implements HandlerInterceptor {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(PermissionInterceptor.class);
|
||||
|
||||
@Autowired
|
||||
private LoginService loginService;
|
||||
|
||||
@@ -28,6 +35,31 @@ public class PermissionInterceptor implements HandlerInterceptor {
|
||||
public boolean preHandle(HttpServletRequest request,
|
||||
HttpServletResponse response,
|
||||
Object handler) throws Exception {
|
||||
return loginService.checkLogin(request, response);
|
||||
|
||||
String classRequestMappingValue = null;
|
||||
try {
|
||||
classRequestMappingValue = getClassRequestMappingValue(handler);
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("class=PermissionInterceptor||method=preHandle||uri={}||msg=parse class request-mapping failed", request.getRequestURI(), e);
|
||||
}
|
||||
return loginService.checkLogin(request, response, classRequestMappingValue);
|
||||
}
|
||||
|
||||
private String getClassRequestMappingValue(Object handler) {
|
||||
RequestMapping classRM = null;
|
||||
if(handler instanceof HandlerMethod) {
|
||||
HandlerMethod hm = (HandlerMethod)handler;
|
||||
classRM = hm.getMethod().getDeclaringClass().getAnnotation(RequestMapping.class);
|
||||
} else if(handler instanceof org.springframework.web.servlet.mvc.Controller) {
|
||||
org.springframework.web.servlet.mvc.Controller hm = (org.springframework.web.servlet.mvc.Controller)handler;
|
||||
Class<? extends org.springframework.web.servlet.mvc.Controller> hmClass = hm.getClass();
|
||||
classRM = hmClass.getAnnotation(RequestMapping.class);
|
||||
} else {
|
||||
classRM = handler.getClass().getAnnotation(RequestMapping.class);
|
||||
}
|
||||
if (ValidateUtils.isNull(classRM) || classRM.value().length < 0) {
|
||||
return null;
|
||||
}
|
||||
return classRM.value()[0];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,7 +119,7 @@ public class WebMetricsInterceptor {
|
||||
ServletRequestAttributes attributes = (ServletRequestAttributes) RequestContextHolder.getRequestAttributes();
|
||||
String uri = attributes.getRequest().getRequestURI();
|
||||
if (uri.contains(ApiPrefix.GATEWAY_API_V1_PREFIX)) {
|
||||
return Result.buildFailure("api limited");
|
||||
return Result.buildGatewayFailure("api limited");
|
||||
}
|
||||
return new Result<>(ResultStatus.OPERATION_FORBIDDEN);
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ spring:
|
||||
jdbc-url: jdbc:mysql://127.0.0.1:3306/logi_kafka_manager?characterEncoding=UTF-8&useSSL=false&serverTimezone=GMT%2B8
|
||||
username: admin
|
||||
password: admin
|
||||
driver-class-name: com.mysql.jdbc.Driver
|
||||
driver-class-name: com.mysql.cj.jdbc.Driver
|
||||
main:
|
||||
allow-bean-definition-overriding: true
|
||||
|
||||
@@ -31,7 +31,7 @@ logging:
|
||||
custom:
|
||||
idc: cn
|
||||
jmx:
|
||||
max-conn: 10
|
||||
max-conn: 10 # 2.3版本配置不在这个地方生效
|
||||
store-metrics-task:
|
||||
community:
|
||||
broker-metrics-enabled: true
|
||||
@@ -49,11 +49,22 @@ task:
|
||||
|
||||
account:
|
||||
ldap:
|
||||
enabled: false
|
||||
url: ldap://127.0.0.1:389/
|
||||
basedn: dc=tsign,dc=cn
|
||||
factory: com.sun.jndi.ldap.LdapCtxFactory
|
||||
filter: sAMAccountName
|
||||
security:
|
||||
authentication: simple
|
||||
principal: cn=admin,dc=tsign,dc=cn
|
||||
credentials: admin
|
||||
auth-user-registration: true
|
||||
auth-user-registration-role: normal
|
||||
|
||||
kcm:
|
||||
enabled: false
|
||||
s3:
|
||||
endpoint: 127.0.0.1
|
||||
endpoint: s3.didiyunapi.com
|
||||
access-key: 1234567890
|
||||
secret-key: 0987654321
|
||||
bucket: logi-kafka
|
||||
|
||||
4
pom.xml
4
pom.xml
@@ -16,7 +16,7 @@
|
||||
</parent>
|
||||
|
||||
<properties>
|
||||
<kafka-manager.revision>2.2.0-SNAPSHOT</kafka-manager.revision>
|
||||
<kafka-manager.revision>2.3.1-SNAPSHOT</kafka-manager.revision>
|
||||
<swagger2.version>2.7.0</swagger2.version>
|
||||
<swagger.version>1.5.13</swagger.version>
|
||||
|
||||
@@ -180,7 +180,7 @@
|
||||
<dependency>
|
||||
<groupId>mysql</groupId>
|
||||
<artifactId>mysql-connector-java</artifactId>
|
||||
<version>5.1.41</version>
|
||||
<version>8.0.11</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
||||
Reference in New Issue
Block a user