Merge branch 'didi-dev' into devLookUserName

This commit is contained in:
Hu.Yue
2022-01-18 14:09:05 +08:00
137 changed files with 2241 additions and 837 deletions

View File

@@ -5,9 +5,9 @@
**一站式`Apache Kafka`集群指标监控与运维管控平台**
`LogiKM开源至今备受关注考虑到开源项目应该更贴合Apache Kafka未来发展方向经项目组慎重考虑预计22年5月份将其品牌升级成Know Streaming届时项目名称和Logo也将统一更新感谢大家一如既往的支持敬请期待`
阅读本README文档您可以了解到滴滴Logi-KafkaManager的用户群体、产品定位等信息并通过体验地址快速体验Kafka集群指标监控与运维管控的全流程。<br>若滴滴Logi-KafkaManager已在贵司的生产环境进行使用并想要获得官方更好地支持和指导可以通过[`OCE认证`](http://obsuite.didiyun.com/open/openAuth),加入官方交流平台。
阅读本README文档您可以了解到滴滴Logi-KafkaManager的用户群体、产品定位等信息并通过体验地址快速体验Kafka集群指标监控与运维管控的全流程。
## 1 产品简介
@@ -55,35 +55,56 @@
## 2 相关文档
### 2.1 产品文档
- [滴滴Logi-KafkaManager 安装手册](docs/install_guide/install_guide_cn.md)
- [滴滴Logi-KafkaManager 接入集群](docs/user_guide/add_cluster/add_cluster.md)
- [滴滴Logi-KafkaManager 用户使用手册](docs/user_guide/user_guide_cn.md)
- [滴滴Logi-KafkaManager FAQ](docs/user_guide/faq.md)
- [滴滴LogiKM 安装手册](docs/install_guide/install_guide_cn.md)
- [滴滴LogiKM 接入集群](docs/user_guide/add_cluster/add_cluster.md)
- [滴滴LogiKM 用户使用手册](docs/user_guide/user_guide_cn.md)
- [滴滴LogiKM FAQ](docs/user_guide/faq.md)
### 2.2 社区文章
- [kafka最强最全知识图谱](https://www.szzdzhp.com/kafka/)
- [LogiKM新用户入门系列文章专栏 --石臻臻](https://www.szzdzhp.com/categories/LogIKM/)
- [滴滴云官网产品介绍](https://www.didiyun.com/production/logi-KafkaManager.html)
- [7年沉淀之作--滴滴Logi日志服务套件](https://mp.weixin.qq.com/s/-KQp-Qo3WKEOc9wIR2iFnw)
- [滴滴Logi-KafkaManager 一站式Kafka监控与管控平台](https://mp.weixin.qq.com/s/9qSZIkqCnU6u9nLMvOOjIQ)
- [滴滴Logi-KafkaManager 开源之路](https://xie.infoq.cn/article/0223091a99e697412073c0d64)
- [滴滴Logi-KafkaManager 系列视频教程](https://mp.weixin.qq.com/s/9X7gH0tptHPtfjPPSdGO8g)
- [滴滴Logi-KafkaManager 系列视频教程](https://space.bilibili.com/442531657/channel/seriesdetail?sid=571649)
- [kafka实践十五滴滴开源Kafka管控平台 Logi-KafkaManager研究--A叶子叶来](https://blog.csdn.net/yezonggang/article/details/113106244)
- [kafka的灵魂伴侣Logi-KafkaManager系列文章专栏 --石臻](https://blog.csdn.net/u010634066/category_10977588.html)
## 3 滴滴Logi开源用户交流群
![image](https://user-images.githubusercontent.com/5287750/111266722-e531d800-8665-11eb-9242-3484da5a3099.png)
微信加群:关注公众号 云原生可观测性 回复 "Logi加群"
## 4 OCE认证
OCE是一个认证机制和交流平台为滴滴Logi-KafkaManager生产用户量身打造我们会为OCE企业提供更好的技术支持比如专属的技术沙龙、企业一对一的交流机会、专属的答疑群等如果贵司Logi-KafkaManager上了生产[快来加入吧](http://obsuite.didiyun.com/open/openAuth)
想跟各个大佬交流Kafka Es 等中间件/大数据相关技术请 加微信进群。
微信加群:添加<font color=red>mike_zhangliang</font><font color=red>danke-xie</font>的微信号备注Logi加群或关注公众号 云原生可观测性 回复 "Logi加群"
## 4 知识星球
<img width="447" alt="image" src="https://user-images.githubusercontent.com/71620349/147314042-843a371a-48c0-4d9a-a65e-ca40236f3300.png">
<br>
<center>
✅我们正在组建国内最大最权威的
</center>
<br>
<center>
<font color=red size=5><b>【Kafka中文社区】</b></font>
</center>
在这里你可以结交各大互联网Kafka大佬以及近2000+Kafka爱好者一起实现知识共享实时掌控最新行业资讯期待您的加入中https://z.didi.cn/5gSF9
<font color=red size=5>有问必答~ </font>
<font color=red size=5>互动有礼~ </font>
PS:提问请尽量把问题一次性描述清楚,并告知环境信息情况哦~!如使用版本、操作步骤、报错/警告信息等方便大V们快速解答
## 5 项目成员
### 5.1 内部核心人员
`iceyuhui``liuyaguang``limengmonty``zhangliangmike``nullhuangyiming``zengqiao``eilenexuzhe``huangjiaweihjw``zhaoyinrui``marzkonglingxu``joysunchao`
`iceyuhui``liuyaguang``limengmonty``zhangliangmike``xiepeng``nullhuangyiming``zengqiao``eilenexuzhe``huangjiaweihjw``zhaoyinrui``marzkonglingxu``joysunchao``石臻臻`
### 5.2 外部贡献者
@@ -93,4 +114,4 @@ OCE是一个认证机制和交流平台为滴滴Logi-KafkaManager生产用户
## 6 协议
`kafka-manager`基于`Apache-2.0`协议进行分发和使用,更多信息参见[协议文件](./LICENSE)
`LogiKM`基于`Apache-2.0`协议进行分发和使用,更多信息参见[协议文件](./LICENSE)

View File

@@ -1,72 +0,0 @@
#!/bin/bash
workspace=$(cd $(dirname $0) && pwd -P)
cd $workspace
## constant
OUTPUT_DIR=./output
KM_VERSION=2.4.2
APP_NAME=kafka-manager
APP_DIR=${APP_NAME}-${KM_VERSION}
MYSQL_TABLE_SQL_FILE=./docs/install_guide/create_mysql_table.sql
CONFIG_FILE=./kafka-manager-web/src/main/resources/application.yml
## function
function build() {
# 编译命令
mvn -U clean package -Dmaven.test.skip=true
local sc=$?
if [ $sc -ne 0 ];then
## 编译失败, 退出码为 非0
echo "$APP_NAME build error"
exit $sc
else
echo "$APP_NAME build ok"
fi
}
function make_output() {
# 新建output目录
rm -rf ${OUTPUT_DIR} &>/dev/null
mkdir -p ${OUTPUT_DIR}/${APP_DIR} &>/dev/null
# 填充output目录, output内的内容
(
cp -rf ${MYSQL_TABLE_SQL_FILE} ${OUTPUT_DIR}/${APP_DIR} && # 拷贝 sql 初始化脚本 至output目录
cp -rf ${CONFIG_FILE} ${OUTPUT_DIR}/${APP_DIR} && # 拷贝 application.yml 至output目录
# 拷贝程序包到output路径
cp kafka-manager-web/target/kafka-manager-web-${KM_VERSION}-SNAPSHOT.jar ${OUTPUT_DIR}/${APP_DIR}/${APP_NAME}.jar
echo -e "make output ok."
) || { echo -e "make output error"; exit 2; } # 填充output目录失败后, 退出码为 非0
}
function make_package() {
# 压缩output目录
(
cd ${OUTPUT_DIR} && tar cvzf ${APP_DIR}.tar.gz ${APP_DIR}
echo -e "make package ok."
) || { echo -e "make package error"; exit 2; } # 压缩output目录失败后, 退出码为 非0
}
##########################################
## main
## 其中,
## 1.进行编译
## 2.生成部署包output
## 3.生成tar.gz压缩包
##########################################
# 1.进行编译
build
# 2.生成部署包output
make_output
# 3.生成tar.gz压缩包
make_package
# 编译成功
echo -e "build done"
exit 0

View File

@@ -19,6 +19,7 @@ ENV JAVA_OPTS="-verbose:gc \
RUN wget https://github.com/didi/Logi-KafkaManager/releases/download/v${VERSION}/kafka-manager-${VERSION}.tar.gz && \
tar xvf kafka-manager-${VERSION}.tar.gz && \
mv kafka-manager-${VERSION}/kafka-manager.jar /opt/app.jar && \
mv kafka-manager-${VERSION}/application.yml /opt/application.yml && \
rm -rf kafka-manager-${VERSION}*
EXPOSE 8080 9999

View File

@@ -55,7 +55,7 @@ data:
didi:
app-topic-metrics-enabled: false
topic-request-time-metrics-enabled: false
topic-throttled-metrics: false
topic-throttled-metrics-enabled: false
save-days: 7
# 任务相关的开关
@@ -67,7 +67,16 @@ data:
# ldap settings
ldap:
enabled: false
authUserRegistration: false
url: ldap://127.0.0.1:389/
basedn: dc=tsign,dc=cn
factory: com.sun.jndi.ldap.LdapCtxFactory
filter: sAMAccountName
security:
authentication: simple
principal: cn=admin,dc=tsign,dc=cn
credentials: admin
auth-user-registration: false
auth-user-registration-role: normal
kcm:
enabled: false

View File

@@ -0,0 +1,16 @@
#!/bin/bash
cd `dirname $0`/../target
target_dir=`pwd`
pid=`ps ax | grep -i 'kafka-manager' | grep ${target_dir} | grep java | grep -v grep | awk '{print $1}'`
if [ -z "$pid" ] ; then
echo "No kafka-manager running."
exit -1;
fi
echo "The kafka-manager (${pid}) is running..."
kill ${pid}
echo "Send shutdown request to kafka-manager (${pid}) OK"

View File

@@ -0,0 +1,81 @@
error_exit ()
{
echo "ERROR: $1 !!"
exit 1
}
[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=$HOME/jdk/java
[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=/usr/java
[ ! -e "$JAVA_HOME/bin/java" ] && unset JAVA_HOME
if [ -z "$JAVA_HOME" ]; then
if $darwin; then
if [ -x '/usr/libexec/java_home' ] ; then
export JAVA_HOME=`/usr/libexec/java_home`
elif [ -d "/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home" ]; then
export JAVA_HOME="/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home"
fi
else
JAVA_PATH=`dirname $(readlink -f $(which javac))`
if [ "x$JAVA_PATH" != "x" ]; then
export JAVA_HOME=`dirname $JAVA_PATH 2>/dev/null`
fi
fi
if [ -z "$JAVA_HOME" ]; then
error_exit "Please set the JAVA_HOME variable in your environment, We need java(x64)! jdk8 or later is better!"
fi
fi
export WEB_SERVER="kafka-manager"
export JAVA_HOME
export JAVA="$JAVA_HOME/bin/java"
export BASE_DIR=`cd $(dirname $0)/..; pwd`
export CUSTOM_SEARCH_LOCATIONS=file:${BASE_DIR}/conf/
#===========================================================================================
# JVM Configuration
#===========================================================================================
JAVA_OPT="${JAVA_OPT} -server -Xms2g -Xmx2g -Xmn1g -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=320m"
JAVA_OPT="${JAVA_OPT} -XX:-OmitStackTraceInFastThrow -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${BASE_DIR}/logs/java_heapdump.hprof"
## jdk版本高的情况 有些 参数废弃了
JAVA_MAJOR_VERSION=$($JAVA -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p')
if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then
JAVA_OPT="${JAVA_OPT} -Xlog:gc*:file=${BASE_DIR}/logs/km_gc.log:time,tags:filecount=10,filesize=102400"
else
JAVA_OPT="${JAVA_OPT} -Djava.ext.dirs=${JAVA_HOME}/jre/lib/ext:${JAVA_HOME}/lib/ext"
JAVA_OPT="${JAVA_OPT} -Xloggc:${BASE_DIR}/logs/km_gc.log -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
fi
JAVA_OPT="${JAVA_OPT} -jar ${BASE_DIR}/target/${WEB_SERVER}.jar"
JAVA_OPT="${JAVA_OPT} --spring.config.additional-location=${CUSTOM_SEARCH_LOCATIONS}"
JAVA_OPT="${JAVA_OPT} --logging.config=${BASE_DIR}/conf/logback-spring.xml"
JAVA_OPT="${JAVA_OPT} --server.max-http-header-size=524288"
if [ ! -d "${BASE_DIR}/logs" ]; then
mkdir ${BASE_DIR}/logs
fi
echo "$JAVA ${JAVA_OPT}"
# check the start.out log output file
if [ ! -f "${BASE_DIR}/logs/start.out" ]; then
touch "${BASE_DIR}/logs/start.out"
fi
# start
echo -e "---- 启动脚本 ------\n $JAVA ${JAVA_OPT}" > ${BASE_DIR}/logs/start.out 2>&1 &
nohup $JAVA ${JAVA_OPT} >> ${BASE_DIR}/logs/start.out 2>&1 &
echo "${WEB_SERVER} is startingyou can check the ${BASE_DIR}/logs/start.out"

View File

@@ -0,0 +1,29 @@
## kafka-manager的配置文件该文件中的配置会覆盖默认配置
## 下面的配置信息基本就是jar中的 application.yml默认配置了;
## 可以只修改自己变更的配置,其他的删除就行了; 比如只配置一下mysql
server:
port: 8080
tomcat:
accept-count: 1000
max-connections: 10000
max-threads: 800
min-spare-threads: 100
spring:
application:
name: kafkamanager
version: @project.version@
profiles:
active: dev
datasource:
kafka-manager:
jdbc-url: jdbc:mysql://localhost:3306/logi_kafka_manager?characterEncoding=UTF-8&useSSL=false&serverTimezone=GMT%2B8
username: root
password: 123456
driver-class-name: com.mysql.cj.jdbc.Driver
main:
allow-bean-definition-overriding: true

View File

@@ -0,0 +1,136 @@
## kafka-manager的配置文件该文件中的配置会覆盖默认配置
## 下面的配置信息基本就是jar中的 application.yml默认配置了;
## 可以只修改自己变更的配置,其他的删除就行了; 比如只配置一下mysql
server:
port: 8080
tomcat:
accept-count: 1000
max-connections: 10000
max-threads: 800
min-spare-threads: 100
spring:
application:
name: kafkamanager
version: @project.version@
profiles:
active: dev
datasource:
kafka-manager:
jdbc-url: jdbc:mysql://localhost:3306/logi_kafka_manager?characterEncoding=UTF-8&useSSL=false&serverTimezone=GMT%2B8
username: root
password: 123456
driver-class-name: com.mysql.cj.jdbc.Driver
main:
allow-bean-definition-overriding: true
servlet:
multipart:
max-file-size: 100MB
max-request-size: 100MB
logging:
config: classpath:logback-spring.xml
custom:
idc: cn
store-metrics-task:
community:
topic-metrics-enabled: true
didi: # 滴滴Kafka特有的指标
app-topic-metrics-enabled: false
topic-request-time-metrics-enabled: false
topic-throttled-metrics-enabled: false
# 任务相关的配置
task:
op:
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
order-auto-exec: # 工单自动化审批线程的开关
topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启
app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启
metrics:
collect: # 收集指标
broker-metrics-enabled: true # 收集Broker指标
sink: # 上报指标
cluster-metrics: # 上报cluster指标
sink-db-enabled: true # 上报到db
broker-metrics: # 上报broker指标
sink-db-enabled: true # 上报到db
delete: # 删除指标
delete-limit-size: 1000 # 单次删除的批大小
cluster-metrics-save-days: 14 # 集群指标保存天数
broker-metrics-save-days: 14 # Broker指标保存天数
topic-metrics-save-days: 7 # Topic指标保存天数
topic-request-time-metrics-save-days: 7 # Topic请求耗时指标保存天数
topic-throttled-metrics-save-days: 7 # Topic限流指标保存天数
app-topic-metrics-save-days: 7 # App+Topic指标保存天数
thread-pool:
collect-metrics:
thread-num: 256 # 收集指标线程池大小
queue-size: 5000 # 收集指标线程池的queue大小
api-call:
thread-num: 16 # api服务线程池大小
queue-size: 5000 # api服务线程池的queue大小
client-pool:
kafka-consumer:
min-idle-client-num: 24 # 最小空闲客户端数
max-idle-client-num: 24 # 最大空闲客户端数
max-total-client-num: 24 # 最大客户端数
borrow-timeout-unit-ms: 3000 # 租借超时时间,单位毫秒
account:
jump-login:
gateway-api: false # 网关接口
third-part-api: false # 第三方接口
ldap:
enabled: false
url: ldap://127.0.0.1:389/
basedn: dc=tsign,dc=cn
factory: com.sun.jndi.ldap.LdapCtxFactory
filter: sAMAccountName
security:
authentication: simple
principal: cn=admin,dc=tsign,dc=cn
credentials: admin
auth-user-registration: true
auth-user-registration-role: normal
kcm: # 集群安装部署仅安装broker
enabled: false # 是否开启
s3: # s3 存储服务
endpoint: s3.didiyunapi.com
access-key: 1234567890
secret-key: 0987654321
bucket: logi-kafka
n9e: # 夜莺
base-url: http://127.0.0.1:8004 # 夜莺job服务地址
user-token: 12345678 # 用户的token
timeout: 300 # 当台操作的超时时间
account: root # 操作时使用的账号
script-file: kcm_script.sh # 脚本已内置好在源码的kcm模块内此处配置无需修改
logikm-url: http://127.0.0.1:8080 # logikm部署地址部署时kcm_script.sh会调用logikm检查部署中的一些状态
monitor:
enabled: false
n9e:
nid: 2
user-token: 1234567890
mon:
base-url: http://127.0.0.1:8000 # 夜莺v4版本默认端口统一调整为了8000
sink:
base-url: http://127.0.0.1:8000 # 夜莺v4版本默认端口统一调整为了8000
rdb:
base-url: http://127.0.0.1:8000 # 夜莺v4版本默认端口统一调整为了8000
notify:
kafka:
cluster-id: 95
topic-name: didi-kafka-notify
order:
detail-url: http://127.0.0.1

View File

@@ -0,0 +1,215 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="10 seconds">
<contextName>logback</contextName>
<property name="log.path" value="./logs" />
<!-- 彩色日志 -->
<!-- 彩色日志依赖的渲染类 -->
<conversionRule conversionWord="clr" converterClass="org.springframework.boot.logging.logback.ColorConverter" />
<conversionRule conversionWord="wex" converterClass="org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter" />
<conversionRule conversionWord="wEx" converterClass="org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter" />
<!-- 彩色日志格式 -->
<property name="CONSOLE_LOG_PATTERN" value="${CONSOLE_LOG_PATTERN:-%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>
<!--输出到控制台-->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>info</level>
</filter>
<encoder>
<Pattern>${CONSOLE_LOG_PATTERN}</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!--输出到文件-->
<!-- 时间滚动输出 level为 DEBUG 日志 -->
<appender name="DEBUG_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/log_debug.log</file>
<!--日志文件输出格式-->
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset> <!-- 设置字符集 -->
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- 日志归档 -->
<fileNamePattern>${log.path}/log_debug_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!--日志文件保留天数-->
<maxHistory>7</maxHistory>
</rollingPolicy>
<!-- 此日志文件只记录debug级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>debug</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- 时间滚动输出 level为 INFO 日志 -->
<appender name="INFO_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>${log.path}/log_info.log</file>
<!--日志文件输出格式-->
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- 每天日志归档路径以及格式 -->
<fileNamePattern>${log.path}/log_info_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!--日志文件保留天数-->
<maxHistory>7</maxHistory>
</rollingPolicy>
<!-- 此日志文件只记录info级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>info</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- 时间滚动输出 level为 WARN 日志 -->
<appender name="WARN_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>${log.path}/log_warn.log</file>
<!--日志文件输出格式-->
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset> <!-- 此处设置字符集 -->
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${log.path}/log_warn_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!--日志文件保留天数-->
<maxHistory>7</maxHistory>
</rollingPolicy>
<!-- 此日志文件只记录warn级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>warn</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- 时间滚动输出 level为 ERROR 日志 -->
<appender name="ERROR_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>${log.path}/log_error.log</file>
<!--日志文件输出格式-->
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset> <!-- 此处设置字符集 -->
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${log.path}/log_error_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!--日志文件保留天数-->
<maxHistory>7</maxHistory>
</rollingPolicy>
<!-- 此日志文件只记录ERROR级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- Metrics信息收集日志 -->
<appender name="COLLECTOR_METRICS_LOGGER" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/metrics/collector_metrics.log</file>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${log.path}/metrics/collector_metrics_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<maxHistory>3</maxHistory>
</rollingPolicy>
</appender>
<!-- Metrics信息收集日志 -->
<appender name="API_METRICS_LOGGER" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/metrics/api_metrics.log</file>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${log.path}/metrics/api_metrics_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<maxHistory>3</maxHistory>
</rollingPolicy>
</appender>
<!-- Metrics信息收集日志 -->
<appender name="SCHEDULED_TASK_LOGGER" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/metrics/scheduled_tasks.log</file>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${log.path}/metrics/scheduled_tasks_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<maxHistory>5</maxHistory>
</rollingPolicy>
</appender>
<logger name="COLLECTOR_METRICS_LOGGER" level="DEBUG" additivity="false">
<appender-ref ref="COLLECTOR_METRICS_LOGGER"/>
</logger>
<logger name="API_METRICS_LOGGER" level="DEBUG" additivity="false">
<appender-ref ref="API_METRICS_LOGGER"/>
</logger>
<logger name="SCHEDULED_TASK_LOGGER" level="DEBUG" additivity="false">
<appender-ref ref="SCHEDULED_TASK_LOGGER"/>
</logger>
<logger name="org.apache.ibatis" level="INFO" additivity="false" />
<logger name="org.mybatis.spring" level="INFO" additivity="false" />
<logger name="com.github.miemiedev.mybatis.paginator" level="INFO" additivity="false" />
<root level="info">
<appender-ref ref="CONSOLE" />
<appender-ref ref="DEBUG_FILE" />
<appender-ref ref="INFO_FILE" />
<appender-ref ref="WARN_FILE" />
<appender-ref ref="ERROR_FILE" />
<!--<appender-ref ref="METRICS_LOG" />-->
</root>
<!--生产环境:输出到文件-->
<!--<springProfile name="pro">-->
<!--<root level="info">-->
<!--<appender-ref ref="CONSOLE" />-->
<!--<appender-ref ref="DEBUG_FILE" />-->
<!--<appender-ref ref="INFO_FILE" />-->
<!--<appender-ref ref="ERROR_FILE" />-->
<!--<appender-ref ref="WARN_FILE" />-->
<!--</root>-->
<!--</springProfile>-->
</configuration>

64
distribution/pom.xml Normal file
View File

@@ -0,0 +1,64 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>kafka-manager</artifactId>
<groupId>com.xiaojukeji.kafka</groupId>
<version>${kafka-manager.revision}</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>distribution</artifactId>
<name>distribution</name>
<packaging>pom</packaging>
<dependencies>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>kafka-manager-web</artifactId>
<version>${kafka-manager.revision}</version>
</dependency>
</dependencies>
<profiles>
<profile>
<id>release-kafka-manager</id>
<dependencies>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>kafka-manager-web</artifactId>
<version>${kafka-manager.revision}</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<descriptors>
<descriptor>release-km.xml</descriptor>
</descriptors>
<tarLongFileMode>posix</tarLongFileMode>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>install</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
<finalName>kafka-manager</finalName>
</build>
</profile>
</profiles>
</project>

22
distribution/readme.md Normal file
View File

@@ -0,0 +1,22 @@
## 说明
### 1.创建mysql数据库文件
> conf/create_mysql_table.sql
### 2. 修改配置文件
> conf/application.yml.example
> 请将application.yml.example 复制一份改名为application.yml
> 并放在同级目录下(conf/); 并修改成自己的配置
> 这里的优先级比jar包内配置文件的默认值高;
>
### 3.启动/关闭kafka-manager
> sh bin/startup.sh 启动
>
> sh shutdown.sh 关闭
>
### 4.升级jar包
> 如果是升级, 可以看看文件 `upgrade_config.md` 的配置变更历史;
>

51
distribution/release-km.xml Executable file
View File

@@ -0,0 +1,51 @@
<?xml version="1.0" encoding="UTF-8"?>
<assembly>
<id>${project.version}</id>
<includeBaseDirectory>true</includeBaseDirectory>
<formats>
<format>dir</format>
<format>tar.gz</format>
<format>zip</format>
</formats>
<fileSets>
<fileSet>
<includes>
<include>conf/**</include>
</includes>
</fileSet>
<fileSet>
<includes>
<include>bin/*</include>
</includes>
<fileMode>0755</fileMode>
</fileSet>
</fileSets>
<files>
<file>
<source>readme.md</source>
<destName>readme.md</destName>
</file>
<file>
<source>upgrade_config.md</source>
<destName>upgrade_config.md</destName>
</file>
<file>
<!--打好的jar包名称和放置目录-->
<source>../kafka-manager-web/target/kafka-manager.jar</source>
<outputDirectory>target/</outputDirectory>
</file>
</files>
<moduleSets>
<moduleSet>
<useAllReactorProjects>true</useAllReactorProjects>
<includes>
<include>com.xiaojukeji.kafka:kafka-manager-web</include>
</includes>
</moduleSet>
</moduleSets>
</assembly>

View File

@@ -0,0 +1,42 @@
## 版本升级配置变更
> 本文件 从 V2.2.0 开始记录; 如果配置有变更则会填写到下文中; 如果没有,则表示无变更;
> 当您从一个很低的版本升级时候,应该依次执行中间有过变更的sql脚本
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
### 1.升级至`V2.2.0`版本
#### 1.mysql变更
`2.2.0`版本在`cluster`表及`logical_cluster`各增加了一个字段因此需要执行下面的sql进行字段的增加。
```sql
# cluster表中增加jmx_properties字段, 这个字段会用于存储jmx相关的认证以及配置信息
ALTER TABLE `cluster` ADD COLUMN `jmx_properties` TEXT NULL COMMENT 'JMX配置' AFTER `security_properties`;
# logical_cluster中增加identification字段, 同时数据和原先name数据相同, 最后增加一个唯一键.
# 此后, name字段还是表示集群名称, identification字段表示的是集群标识, 只能是字母数字及下划线组成,
# 数据上报到监控系统时, 集群这个标识采用的字段就是identification字段, 之前使用的是name字段.
ALTER TABLE `logical_cluster` ADD COLUMN `identification` VARCHAR(192) NOT NULL DEFAULT '' COMMENT '逻辑集群标识' AFTER `name`;
UPDATE `logical_cluster` SET `identification`=`name` WHERE id>=0;
ALTER TABLE `logical_cluster` ADD INDEX `uniq_identification` (`identification` ASC);
```
### 升级至`2.3.0`版本
#### 1.mysql变更
`2.3.0`版本在`gateway_config`表增加了一个描述说明的字段因此需要执行下面的sql进行字段的增加。
```sql
ALTER TABLE `gateway_config`
ADD COLUMN `description` TEXT NULL COMMENT '描述信息' AFTER `version`;
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 785 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.5 MiB

View File

@@ -136,7 +136,8 @@ EXPIRED_TOPIC_CONFIG
配置Value
```json
{
"minExpiredDay": 30, #过期时间大于此值才显示
"minExpiredDay": 30, #过期时间大于此值才显示,
"filterRegex": ".*XXX\\s+", #忽略符合此正则规则的Topic
"ignoreClusterIdList": [ # 忽略的集群
50
]

View File

@@ -0,0 +1,53 @@
---
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# 如何增加上报监控系统指标?
## 0、前言
LogiKM是 **一站式`Apache Kafka`集群指标监控与运维管控平台** 当前会将消费LagTopic流量等指标上报到监控系统中从而方便用户在监控系统中对这些指标配置监控告警规则进而达到监控自身客户端是否正常的目的。
那么如果我们想增加一个新的监控指标应该如何做呢比如我们想监控Broker的流量监控Broker的存活信息监控集群Controller个数等等。
在具体介绍之前我们大家都知道Kafka监控相关的信息基本都存储于Broker、Jmx以及ZK中。当前LogiKM也已经具备从这三个地方获取数据的基本能力因此基于LogiKM我们再获取其他指标总体上还是非常方便的。
这里我们就以已经获取到的Topic流量信息为例看LogiKM如何实现Topic指标的获取并上报的。
---
## 1、确定指标位置
基于对Kafka的了解我们知道Topic流量信息这个指标是存储于Jmx中的因此我们需要从Jmx中获取。大家如果对于自己所需要获取的指标存储在何处不太清楚的可以加入我们维护的Kafka中文社区(README中有二维码)中今天沟通交流。
---
## 2、指标获取
Topic流量指标的获取详细见图中说明。
![Topic流量指标采集说明](./assets/increase_the_indicators_reported_to_monitor_system/collect_topic_metrics.jpg)
---
## 3、指标上报
上一步我们已经采集到Topic流量指标了下一步就是将该指标上报到监控系统这块只需要按照监控系统要求的格式将数据上报即可。
LogiKM中有一个monitor模块具体的如下图所示
![指标上报](./assets/increase_the_indicators_reported_to_monitor_system/sink_metrcis.png)
## 4、补充说明
监控系统对接的相关内容见:
[监控系统集成](./monitor_system_integrate_with_self.md)
[监控系统集成例子——集成夜莺](./monitor_system_integrate_with_n9e.md)

View File

@@ -51,13 +51,16 @@ custom:
didi:
app-topic-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标因此默认关闭
topic-request-time-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标因此默认关闭
topic-throttled-metrics: false # 滴滴埋入的指标, 社区AK不存在该指标因此默认关闭
topic-throttled-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标因此默认关闭
save-days: 7 #指标在DB中保持的天数-1表示永久保存7表示保存近7天的数据
# 任务相关的开关
task:
op:
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
order-auto-exec: # 工单自动化审批线程的开关
topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启
app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启
account: # ldap相关的配置, 社区版本暂时支持不够完善,可以先忽略,欢迎贡献代码对这块做优化
ldap:

View File

@@ -31,17 +31,23 @@
**2、源代码进行打包**
下载好代码之后,进入`Logi-KafkaManager`的主目录,执行`sh build.sh`命令即可,执行完成之后会在`output/kafka-manager-xxx`目录下面生成一个jar包。
下载好代码之后,进入`Logi-KafkaManager`的主目录,执行`mvn -Prelease-kafka-manager -Dmaven.test.skip=true clean install -U `命令即可,
执行完成之后会在`distribution/target`目录下面生成一个`kafka-manager-*.tar.gz`
和一个`kafka-manager-*.zip` 文件,随便任意一个压缩包都可以;
当然此时同级目录有一个已经解压好的文件夹;
对于`windows`环境的用户,估计执行不了`sh build.sh`命令,因此可以直接执行`mvn install`,然后在`kafka-manager-web/target`目录下生成一个kafka-manager-web-xxx.jar的包。
获取到jar包之后我们继续下面的步骤。
---
## 3、MySQL-DB初始化
## 3. 解压安装包
解压完成后; 在文件目录中可以看到有`kafka-manager/conf/create_mysql_table.sql` 有个mysql初始化文件
先初始化DB
执行[create_mysql_table.sql](create_mysql_table.sql)中的SQL命令从而创建所需的MySQL库及表默认创建的库名是`logi_kafka_manager`
## 4、MySQL-DB初始化
执行[create_mysql_table.sql](../../distribution/conf/create_mysql_table.sql)中的SQL命令从而创建所需的MySQL库及表默认创建的库名是`logi_kafka_manager`
```
# 示例:
@@ -50,15 +56,38 @@ mysql -uXXXX -pXXX -h XXX.XXX.XXX.XXX -PXXXX < ./create_mysql_table.sql
---
## 4、启动
## 5.修该配置
请将`conf/application.yml.example` 文件复制一份出来命名为`application.yml` 放在同级目录:conf/application.yml ;
并且修改配置; 当然不修改的话 就会用默认的配置;
至少 mysql配置成自己的吧
```
# application.yml 是配置文件最简单的是仅修改MySQL相关的配置即可启动
nohup java -jar kafka-manager.jar --spring.config.location=./application.yml > /dev/null 2>&1 &
```
## 6、启动/关闭
解压包中有启动和关闭脚本
`kafka-manager/bin/shutdown.sh`
`kafka-manager/bin/startup.sh`
### 5、使用
执行 sh startup.sh 启动
执行 sh shutdown.sh 关闭
### 6、使用
本地启动的话,访问`http://localhost:8080`,输入帐号及密码(默认`admin/admin`)进行登录。更多参考:[kafka-manager 用户使用手册](../user_guide/user_guide_cn.md)
### 7. 升级
如果是升级版本,请查看文件 [kafka-manager 升级手册](../../distribution/upgrade_config.md)
在您下载的启动包(V2.5及其后)中也有记录,在 kafka-manager/upgrade_config.md 中
### 8. 在IDE中启动
> 如果想参与开发或者想在IDE中启动的话
> 先执行 `mvn -Dmaven.test.skip=true clean install -U `
>
> 然后这个时候可以选择去 [pom.xml](../../pom.xml) 中将`kafka-manager-console`模块注释掉;
> 注释是因为每次install的时候都会把前端文件`kafka-manager-console`重新打包进`kafka-manager-web`
>
> 完事之后,只需要直接用IDE启动运行`kafka-manager-web`模块中的
> com.xiaojukeji.kafka.manager.web.MainApplication main方法就行了

View File

@@ -21,15 +21,12 @@
<java_target_version>1.8</java_target_version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<file_encoding>UTF-8</file_encoding>
<spring-version>5.1.3.RELEASE</spring-version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-web</artifactId>
<version>${spring-version}</version>
</dependency>
<!-- http -->
@@ -109,5 +106,11 @@
<groupId>junit</groupId>
<artifactId>junit</artifactId>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<scope>compile</scope>
</dependency>
</dependencies>
</project>

View File

@@ -20,12 +20,6 @@ public class ApiPrefix {
// open
public static final String API_V1_THIRD_PART_PREFIX = API_V1_PREFIX + "third-part/";
// 开放给OP的接口, 后续对 应的接口的集群都需要是物理集群
public static final String API_V1_THIRD_PART_OP_PREFIX = API_V1_THIRD_PART_PREFIX + "op/";
// 开放给Normal的接口, 后续对应的接口的集群,都需要是逻辑集群
public static final String API_V1_THIRD_PART_NORMAL_PREFIX = API_V1_THIRD_PART_PREFIX + "normal/";
// gateway
public static final String GATEWAY_API_V1_PREFIX = "/gateway" + API_V1_PREFIX;

View File

@@ -9,7 +9,7 @@ public class Constant {
public static final Integer MAX_AVG_BYTES_DURATION = 10;
public static final Integer BATCH_INSERT_SIZE = 50;
public static final Integer BATCH_INSERT_SIZE = 30;
public static final Integer DEFAULT_SESSION_TIMEOUT_UNIT_MS = 30000;

View File

@@ -17,6 +17,10 @@ public class KafkaConstant {
public static final String RETENTION_MS_KEY = "retention.ms";
public static final String EXTERNAL_KEY = "EXTERNAL";
public static final String INTERNAL_KEY = "INTERNAL";
private KafkaConstant() {
}
}

View File

@@ -0,0 +1,18 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.common;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.io.Serializable;
@Data
@NoArgsConstructor
@AllArgsConstructor
public class IpPortData implements Serializable {
private static final long serialVersionUID = -428897032994630685L;
private String ip;
private String port;
}

View File

@@ -10,6 +10,8 @@ import java.util.List;
public class TopicExpiredConfig {
private Integer minExpiredDay = 30;
private String filterRegex = "";
private List<Long> ignoreClusterIdList = new ArrayList<>();
public Integer getMinExpiredDay() {
@@ -28,10 +30,19 @@ public class TopicExpiredConfig {
this.ignoreClusterIdList = ignoreClusterIdList;
}
public String getFilterRegex() {
return filterRegex;
}
public void setFilterRegex(String filterRegex) {
this.filterRegex = filterRegex;
}
@Override
public String toString() {
return "TopicExpiredConfig{" +
"minExpiredDay=" + minExpiredDay +
", filterRegex='" + filterRegex + '\'' +
", ignoreClusterIdList=" + ignoreClusterIdList +
'}';
}

View File

@@ -25,6 +25,8 @@ public class MineTopicSummary {
private Integer access;
private String description;
public Long getLogicalClusterId() {
return logicalClusterId;
}
@@ -105,6 +107,14 @@ public class MineTopicSummary {
this.access = access;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Override
public String toString() {
return "MineTopicSummary{" +

View File

@@ -27,8 +27,11 @@ public class OrderVO {
@ApiModelProperty(value = "工单状态, 0:待审批, 1:通过, 2:拒绝, 3:取消")
private Integer status;
@ApiModelProperty(value = "申请/审核时间")
private Date gmtTime;
@ApiModelProperty(value = "申请时间")
private Date gmtCreate;
@ApiModelProperty(value = "审核时间")
private Date gmtHandle;
public Long getId() {
return id;
@@ -70,12 +73,20 @@ public class OrderVO {
this.status = status;
}
public Date getGmtTime() {
return gmtTime;
public Date getGmtCreate() {
return gmtCreate;
}
public void setGmtTime(Date gmtTime) {
this.gmtTime = gmtTime;
public void setGmtCreate(Date gmtCreate) {
this.gmtCreate = gmtCreate;
}
public Date getGmtHandle() {
return gmtHandle;
}
public void setGmtHandle(Date gmtHandle) {
this.gmtHandle = gmtHandle;
}
public String getApplicant() {
@@ -95,7 +106,7 @@ public class OrderVO {
", applicant='" + applicant + '\'' +
", description='" + description + '\'' +
", status=" + status +
", gmtTime=" + gmtTime +
", gmtTime=" + gmtCreate +
'}';
}
}

View File

@@ -36,6 +36,9 @@ public class TopicMineVO {
@ApiModelProperty(value = "状态, 0:无权限, 1:可消费 2:可发送 3:可消费发送 4:可管理")
private Integer access;
@ApiModelProperty(value = "备注")
private String description;
public Long getClusterId() {
return clusterId;
}
@@ -108,6 +111,14 @@ public class TopicMineVO {
this.access = access;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Override
public String toString() {
return "TopicMineVO{" +

View File

@@ -0,0 +1,20 @@
package com.xiaojukeji.kafka.manager.common.events;
import com.xiaojukeji.kafka.manager.common.entity.pojo.RegionDO;
import lombok.Getter;
import org.springframework.context.ApplicationEvent;
/**
* Region创建事件
* @author zengqiao
* @date 22/01/1
*/
@Getter
public class RegionCreatedEvent extends ApplicationEvent {
private final RegionDO regionDO;
public RegionCreatedEvent(Object source, RegionDO regionDO) {
super(source);
this.regionDO = regionDO;
}
}

View File

@@ -0,0 +1,33 @@
package com.xiaojukeji.kafka.manager.common.events.metrics;
import org.springframework.context.ApplicationEvent;
/**
* @author zengqiao
* @date 22/01/17
*/
public class BaseMetricsCollectedEvent extends ApplicationEvent {
/**
* 物理集群ID
*/
protected final Long physicalClusterId;
/**
* 收集时间,依据业务需要来设置,可以设置任务开始时间,也可以设置任务结束时间
*/
protected final Long collectTime;
public BaseMetricsCollectedEvent(Object source, Long physicalClusterId, Long collectTime) {
super(source);
this.physicalClusterId = physicalClusterId;
this.collectTime = collectTime;
}
public Long getPhysicalClusterId() {
return physicalClusterId;
}
public Long getCollectTime() {
return collectTime;
}
}

View File

@@ -0,0 +1,22 @@
package com.xiaojukeji.kafka.manager.common.events.metrics;
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
import java.util.List;
/**
* @author zengqiao
* @date 20/8/31
*/
public class BatchBrokerMetricsCollectedEvent extends BaseMetricsCollectedEvent {
private final List<BrokerMetrics> metricsList;
public BatchBrokerMetricsCollectedEvent(Object source, Long physicalClusterId, Long collectTime, List<BrokerMetrics> metricsList) {
super(source, physicalClusterId, collectTime);
this.metricsList = metricsList;
}
public List<BrokerMetrics> getMetricsList() {
return metricsList;
}
}

View File

@@ -0,0 +1,75 @@
package com.xiaojukeji.kafka.manager.common.utils;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
public class BackoffUtils {
private BackoffUtils() {
}
/**
* 需要进行回退的事件信息
* <回退事件名,回退结束时间>
*/
private static final Map<String, Long> NEED_BACK_OFF_EVENT_MAP = new ConcurrentHashMap<>();
public static void backoff(long timeUnitMs) {
if (timeUnitMs <= 0) {
return;
}
try {
Thread.sleep(timeUnitMs);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (Exception e) {
// ignore
}
}
/**
* 记录回退设置
* @param backoffEventKey 回退事件key
* @param backoffTimeUnitMs 回退时间(ms)
*/
public static void putNeedBackoffEvent(String backoffEventKey, Long backoffTimeUnitMs) {
if (backoffEventKey == null || backoffTimeUnitMs == null || backoffTimeUnitMs <= 0) {
return;
}
NEED_BACK_OFF_EVENT_MAP.put(backoffEventKey, backoffTimeUnitMs + System.currentTimeMillis());
}
/**
* 移除回退设置
* @param backoffEventKey 回退事件key
*/
public static void removeNeedBackoffEvent(String backoffEventKey) {
NEED_BACK_OFF_EVENT_MAP.remove(backoffEventKey);
}
/**
* 检查是否需要回退
* @param backoffEventKey 回退事件key
* @return
*/
public static boolean isNeedBackoff(String backoffEventKey) {
Long backoffEventEndTimeUnitMs = NEED_BACK_OFF_EVENT_MAP.get(backoffEventKey);
if (backoffEventEndTimeUnitMs == null) {
return false;
}
if (backoffEventEndTimeUnitMs > System.currentTimeMillis()) {
return true;
}
// 移除
try {
NEED_BACK_OFF_EVENT_MAP.remove(backoffEventKey, backoffEventEndTimeUnitMs);
} catch (Exception e) {
// 如果key不存在这里可能出现NPE不过不管什么异常都可以忽略
}
return false;
}
}

View File

@@ -1,7 +1,7 @@
package com.xiaojukeji.kafka.manager.common.utils.factory;
import com.alibaba.fastjson.JSONObject;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import org.apache.commons.pool2.BasePooledObjectFactory;
import org.apache.commons.pool2.PooledObject;
@@ -16,7 +16,7 @@ import java.util.Properties;
* @author zengqiao
* @date 20/8/24
*/
public class KafkaConsumerFactory extends BasePooledObjectFactory<KafkaConsumer> {
public class KafkaConsumerFactory extends BasePooledObjectFactory<KafkaConsumer<String, String>> {
private ClusterDO clusterDO;
public KafkaConsumerFactory(ClusterDO clusterDO) {
@@ -25,17 +25,17 @@ public class KafkaConsumerFactory extends BasePooledObjectFactory<KafkaConsumer>
@Override
public KafkaConsumer create() {
return new KafkaConsumer(createKafkaConsumerProperties(clusterDO));
return new KafkaConsumer<String, String>(createKafkaConsumerProperties(clusterDO));
}
@Override
public PooledObject<KafkaConsumer> wrap(KafkaConsumer obj) {
return new DefaultPooledObject<KafkaConsumer>(obj);
public PooledObject<KafkaConsumer<String, String>> wrap(KafkaConsumer<String, String> obj) {
return new DefaultPooledObject<>(obj);
}
@Override
public void destroyObject(final PooledObject<KafkaConsumer> p) throws Exception {
KafkaConsumer kafkaConsumer = p.getObject();
public void destroyObject(final PooledObject<KafkaConsumer<String, String>> p) throws Exception {
KafkaConsumer<String, String> kafkaConsumer = p.getObject();
if (ValidateUtils.isNull(kafkaConsumer)) {
return;
}
@@ -57,7 +57,7 @@ public class KafkaConsumerFactory extends BasePooledObjectFactory<KafkaConsumer>
if (ValidateUtils.isBlank(clusterDO.getSecurityProperties())) {
return properties;
}
properties.putAll(JSONObject.parseObject(clusterDO.getSecurityProperties(), Properties.class));
properties.putAll(JsonUtils.stringToObj(clusterDO.getSecurityProperties(), Properties.class));
return properties;
}
}

View File

@@ -1,5 +1,10 @@
package com.xiaojukeji.kafka.manager.common.utils.jmx;
import lombok.Data;
import lombok.ToString;
@Data
@ToString
public class JmxConfig {
/**
* 单台最大连接数
@@ -21,45 +26,8 @@ public class JmxConfig {
*/
private Boolean openSSL;
public Integer getMaxConn() {
return maxConn;
}
public void setMaxConn(Integer maxConn) {
this.maxConn = maxConn;
}
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public Boolean isOpenSSL() {
return openSSL;
}
public void setOpenSSL(Boolean openSSL) {
this.openSSL = openSSL;
}
@Override
public String toString() {
return "JmxConfig{" +
"maxConn=" + maxConn +
", username='" + username + '\'' +
", password='" + password + '\'' +
", openSSL=" + openSSL +
'}';
}
/**
* 连接重试回退事件
*/
private Long retryConnectBackoffTimeUnitMs;
}

View File

@@ -1,5 +1,6 @@
package com.xiaojukeji.kafka.manager.common.utils.jmx;
import com.xiaojukeji.kafka.manager.common.utils.BackoffUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -13,11 +14,11 @@ import javax.naming.Context;
import javax.rmi.ssl.SslRMIClientSocketFactory;
import java.io.IOException;
import java.net.MalformedURLException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock;
/**
* JMXConnector包装类
@@ -25,19 +26,27 @@ import java.util.concurrent.atomic.AtomicInteger;
* @date 2015/11/9.
*/
public class JmxConnectorWrap {
private final static Logger LOGGER = LoggerFactory.getLogger(JmxConnectorWrap.class);
private static final Logger LOGGER = LoggerFactory.getLogger(JmxConnectorWrap.class);
private String host;
private final Long physicalClusterId;
private int port;
private final Integer brokerId;
private final String host;
private final int port;
private JMXConnector jmxConnector;
private AtomicInteger atomicInteger;
private final AtomicInteger atomicInteger;
private JmxConfig jmxConfig;
public JmxConnectorWrap(String host, int port, JmxConfig jmxConfig) {
private final ReentrantLock modifyJMXConnectorLock = new ReentrantLock();
public JmxConnectorWrap(Long physicalClusterId, Integer brokerId, String host, int port, JmxConfig jmxConfig) {
this.physicalClusterId = physicalClusterId;
this.brokerId = brokerId;
this.host = host;
this.port = port;
this.jmxConfig = jmxConfig;
@@ -45,7 +54,12 @@ public class JmxConnectorWrap {
this.jmxConfig = new JmxConfig();
}
if (ValidateUtils.isNullOrLessThanZero(this.jmxConfig.getMaxConn())) {
this.jmxConfig.setMaxConn(1);
// 默认设置20
this.jmxConfig.setMaxConn(20);
}
if (ValidateUtils.isNullOrLessThanZero(this.jmxConfig.getRetryConnectBackoffTimeUnitMs())) {
// 默认回退10分钟
this.jmxConfig.setRetryConnectBackoffTimeUnitMs(10 * 60 * 1000L);
}
this.atomicInteger = new AtomicInteger(this.jmxConfig.getMaxConn());
}
@@ -57,17 +71,40 @@ public class JmxConnectorWrap {
if (port == -1) {
return false;
}
return createJmxConnector();
return safeCreateJmxConnector();
}
public synchronized void close() {
public void close() {
this.closeJmxConnect();
}
public void closeJmxConnect() {
if (jmxConnector == null) {
return;
}
try {
modifyJMXConnectorLock.lock();
// 移除设置的backoff事件
BackoffUtils.removeNeedBackoffEvent(buildConnectJmxFailedBackoffEventKey(physicalClusterId, brokerId));
jmxConnector.close();
} catch (IOException e) {
LOGGER.warn("close JmxConnector exception, host:{} port:{}.", host, port, e);
} catch (Exception e) {
LOGGER.error("close JmxConnector exception, physicalClusterId:{} brokerId:{} host:{} port:{}.", physicalClusterId, brokerId, host, port, e);
} finally {
jmxConnector = null;
modifyJMXConnectorLock.unlock();
}
}
private boolean safeCreateJmxConnector() {
try {
modifyJMXConnectorLock.lock();
return createJmxConnector();
} finally {
modifyJMXConnectorLock.unlock();
}
}
@@ -75,6 +112,12 @@ public class JmxConnectorWrap {
if (jmxConnector != null) {
return true;
}
if (BackoffUtils.isNeedBackoff(buildConnectJmxFailedBackoffEventKey(physicalClusterId, brokerId))) {
// 被设置了需要进行回退,则本次不进行创建
return false;
}
String jmxUrl = String.format("service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi", host, port);
try {
Map<String, Object> environment = new HashMap<String, Object>();
@@ -82,7 +125,9 @@ public class JmxConnectorWrap {
// fixed by riyuetianmu
environment.put(JMXConnector.CREDENTIALS, new String[]{this.jmxConfig.getUsername(), this.jmxConfig.getPassword()});
}
if (jmxConfig.isOpenSSL() != null && this.jmxConfig.isOpenSSL()) {
if (jmxConfig.getOpenSSL() != null && this.jmxConfig.getOpenSSL()) {
// 开启ssl
environment.put(Context.SECURITY_PROTOCOL, "ssl");
SslRMIClientSocketFactory clientSocketFactory = new SslRMIClientSocketFactory();
environment.put(RMIConnectorServer.RMI_CLIENT_SOCKET_FACTORY_ATTRIBUTE, clientSocketFactory);
@@ -90,13 +135,17 @@ public class JmxConnectorWrap {
}
jmxConnector = JMXConnectorFactory.connect(new JMXServiceURL(jmxUrl), environment);
LOGGER.info("JMX connect success, host:{} port:{}.", host, port);
LOGGER.info("connect JMX success, physicalClusterId:{} brokerId:{} host:{} port:{}.", physicalClusterId, brokerId, host, port);
return true;
} catch (MalformedURLException e) {
LOGGER.error("JMX url exception, host:{} port:{} jmxUrl:{}", host, port, jmxUrl, e);
LOGGER.error("connect JMX failed, JMX url exception, physicalClusterId:{} brokerId:{} host:{} port:{} jmxUrl:{}.", physicalClusterId, brokerId, host, port, jmxUrl, e);
} catch (Exception e) {
LOGGER.error("JMX connect exception, host:{} port:{}.", host, port, e);
LOGGER.error("connect JMX failed, physicalClusterId:{} brokerId:{} host:{} port:{}.", physicalClusterId, brokerId, host, port, e);
}
// 设置连接backoff
BackoffUtils.putNeedBackoffEvent(buildConnectJmxFailedBackoffEventKey(physicalClusterId, brokerId), this.jmxConfig.getRetryConnectBackoffTimeUnitMs());
return false;
}
@@ -110,6 +159,11 @@ public class JmxConnectorWrap {
acquire();
MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection();
return mBeanServerConnection.getAttribute(name, attribute);
} catch (IOException ioe) {
// io错误则重置连接
this.closeJmxConnect();
throw ioe;
} finally {
atomicInteger.incrementAndGet();
}
@@ -125,6 +179,11 @@ public class JmxConnectorWrap {
acquire();
MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection();
return mBeanServerConnection.getAttributes(name, attributes);
} catch (IOException ioe) {
// io错误则重置连接
this.closeJmxConnect();
throw ioe;
} finally {
atomicInteger.incrementAndGet();
}
@@ -137,6 +196,11 @@ public class JmxConnectorWrap {
acquire();
MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection();
return mBeanServerConnection.queryNames(name, query);
} catch (IOException ioe) {
// io错误则重置连接
this.closeJmxConnect();
throw ioe;
} finally {
atomicInteger.incrementAndGet();
}
@@ -146,19 +210,21 @@ public class JmxConnectorWrap {
long now = System.currentTimeMillis();
while (true) {
try {
if (System.currentTimeMillis() - now > 60000) {
break;
}
int num = atomicInteger.get();
if (num <= 0) {
Thread.sleep(2);
continue;
BackoffUtils.backoff(2);
}
if (atomicInteger.compareAndSet(num, num - 1)) {
if (atomicInteger.compareAndSet(num, num - 1) || System.currentTimeMillis() - now > 6000) {
break;
}
} catch (Exception e) {
// ignore
}
}
}
private static String buildConnectJmxFailedBackoffEventKey(Long physicalClusterId, Integer brokerId) {
return "CONNECT_JMX_FAILED_BACK_OFF_EVENT_PHY_" + physicalClusterId + "_BROKER_" + brokerId;
}
}

View File

@@ -1,6 +1,17 @@
package com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.xiaojukeji.kafka.manager.common.constant.KafkaConstant;
import com.xiaojukeji.kafka.manager.common.entity.ao.common.IpPortData;
import com.xiaojukeji.kafka.manager.common.utils.NumberUtils;
import lombok.Data;
import java.io.Serializable;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* @author zengqiao
@@ -10,7 +21,7 @@ import java.util.List;
* 节点结构:
* {
* "listener_security_protocol_map":{"SASL_PLAINTEXT":"SASL_PLAINTEXT"},
* "endpoints":["SASL_PLAINTEXT://10.179.162.202:9093"],
* "endpoints":["SASL_PLAINTEXT://127.0.0.1:9093"],
* "jmx_port":9999,
* "host":null,
* "timestamp":"1546632983233",
@@ -18,22 +29,48 @@ import java.util.List;
* "version":4,
* "rack": "CY"
* }
*
* {
* "listener_security_protocol_map":{"SASL_PLAINTEXT":"SASL_PLAINTEXT","PLAINTEXT":"PLAINTEXT"},
* "endpoints":["SASL_PLAINTEXT://127.0.0.1:9093","PLAINTEXT://127.0.0.1:9092"],
* "jmx_port":8099,
* "host":"127.0.0.1",
* "timestamp":"1628833925822",
* "port":9092,
* "version":4
* }
*
* {
* "listener_security_protocol_map":{"EXTERNAL":"SASL_PLAINTEXT","INTERNAL":"SASL_PLAINTEXT"},
* "endpoints":["EXTERNAL://127.0.0.1:7092","INTERNAL://127.0.0.1:7093"],
* "jmx_port":8099,
* "host":null,
* "timestamp":"1627289710439",
* "port":-1,
* "version":4
* }
*
*/
public class BrokerMetadata implements Cloneable {
@Data
@JsonIgnoreProperties(ignoreUnknown = true)
public class BrokerMetadata implements Serializable {
private static final long serialVersionUID = 3918113492423375809L;
private long clusterId;
private int brokerId;
private List<String> endpoints;
// <EXTERNAL|INTERNAL, <ip, port>>
private Map<String, IpPortData> endpointMap;
private String host;
private int port;
/*
* ZK上对应的字段就是这个名字, 不要进行修改
*/
private int jmx_port;
@JsonProperty("jmx_port")
private int jmxPort;
private String version;
@@ -41,91 +78,54 @@ public class BrokerMetadata implements Cloneable {
private String rack;
public long getClusterId() {
return clusterId;
@JsonIgnore
public String getExternalHost() {
if (!endpointMap.containsKey(KafkaConstant.EXTERNAL_KEY)) {
return null;
}
return endpointMap.get(KafkaConstant.EXTERNAL_KEY).getIp();
}
public void setClusterId(long clusterId) {
this.clusterId = clusterId;
@JsonIgnore
public String getInternalHost() {
if (!endpointMap.containsKey(KafkaConstant.INTERNAL_KEY)) {
return null;
}
return endpointMap.get(KafkaConstant.INTERNAL_KEY).getIp();
}
public int getBrokerId() {
return brokerId;
}
public static void parseAndUpdateBrokerMetadata(BrokerMetadata brokerMetadata) {
brokerMetadata.setEndpointMap(new HashMap<>());
public void setBrokerId(int brokerId) {
this.brokerId = brokerId;
}
if (brokerMetadata.getEndpoints().isEmpty()) {
return;
}
public List<String> getEndpoints() {
return endpoints;
}
// example EXTERNAL://10.179.162.202:7092
for (String endpoint: brokerMetadata.getEndpoints()) {
int idx1 = endpoint.indexOf("://");
int idx2 = endpoint.lastIndexOf(":");
if (idx1 == -1 || idx2 == -1 || idx1 == idx2) {
continue;
}
public void setEndpoints(List<String> endpoints) {
this.endpoints = endpoints;
}
String brokerHost = endpoint.substring(idx1 + "://".length(), idx2);
String brokerPort = endpoint.substring(idx2 + 1);
public String getHost() {
return host;
}
brokerMetadata.getEndpointMap().put(endpoint.substring(0, idx1), new IpPortData(brokerHost, brokerPort));
public void setHost(String host) {
this.host = host;
}
if (KafkaConstant.EXTERNAL_KEY.equals(endpoint.substring(0, idx1))) {
// 优先使用external的地址进行展示
brokerMetadata.setHost(brokerHost);
brokerMetadata.setPort(NumberUtils.string2Integer(brokerPort));
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
public int getJmxPort() {
return jmx_port;
}
public void setJmxPort(int jmxPort) {
this.jmx_port = jmxPort;
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public long getTimestamp() {
return timestamp;
}
public void setTimestamp(long timestamp) {
this.timestamp = timestamp;
}
public String getRack() {
return rack;
}
public void setRack(String rack) {
this.rack = rack;
}
@Override
public String toString() {
return "BrokerMetadata{" +
"clusterId=" + clusterId +
", brokerId=" + brokerId +
", endpoints=" + endpoints +
", host='" + host + '\'' +
", port=" + port +
", jmxPort=" + jmx_port +
", version='" + version + '\'' +
", timestamp=" + timestamp +
", rack='" + rack + '\'' +
'}';
if (null == brokerMetadata.getHost()) {
brokerMetadata.setHost(brokerHost);
brokerMetadata.setPort(NumberUtils.string2Integer(brokerPort));
}
}
}
}

View File

@@ -1,9 +1,10 @@
{
"name": "logi-kafka",
"version": "2.4.3",
"version": "2.5.0",
"description": "",
"scripts": {
"start": "webpack-dev-server",
"prestart": "npm install --save-dev webpack-dev-server",
"start": "webpack serve",
"daily-build": "cross-env NODE_ENV=production webpack",
"pre-build": "cross-env NODE_ENV=production webpack",
"prod-build": "cross-env NODE_ENV=production webpack",
@@ -13,18 +14,19 @@
"license": "ISC",
"devDependencies": {
"@hot-loader/react-dom": "^16.8.6",
"@types/echarts": "^4.4.1",
"@types/events": "^3.0.0",
"@types/lodash.debounce": "^4.0.6",
"@types/react": "^16.8.8",
"@types/react-dom": "^16.8.2",
"@types/react-router-dom": "^4.3.1",
"@types/spark-md5": "^3.0.2",
"@webpack-cli/serve": "^1.6.0",
"antd": "^3.26.15",
"clean-webpack-plugin": "^3.0.0",
"clipboard": "^2.0.8",
"cross-env": "^7.0.2",
"css-loader": "^2.1.0",
"echarts": "^4.5.0",
"echarts": "^5.2.1",
"file-loader": "^5.0.2",
"html-webpack-plugin": "^3.2.0",
"increase-memory-limit": "^1.0.7",
@@ -50,11 +52,10 @@
"typescript": "^3.3.3333",
"url-loader": "^4.1.1",
"webpack": "^4.29.6",
"webpack-cli": "^3.2.3",
"webpack-dev-server": "^3.2.1",
"webpack-cli": "^4.9.1",
"xlsx": "^0.16.1"
},
"dependencies": {
"format-to-json": "^1.0.4"
}
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

View File

Before

Width:  |  Height:  |  Size: 125 KiB

After

Width:  |  Height:  |  Size: 125 KiB

View File

@@ -1,14 +1,29 @@
import * as React from 'react';
import { Spin, notification } from 'component/antd';
import echarts, { EChartOption } from 'echarts/lib/echarts';
import * as echarts from 'echarts/core';
// 引入柱状图
import 'echarts/lib/chart/bar';
import { BarChart } from 'echarts/charts';
// 引入提示框和标题组件
import 'echarts/lib/component/tooltip';
import 'echarts/lib/component/title';
import 'echarts/lib/component/legend';
import {
TitleComponent,
TooltipComponent,
LegendComponent,
GridComponent,
} from 'echarts/components';
import { CanvasRenderer } from 'echarts/renderers';
import { EChartsOption } from 'echarts';
// 注册必须的组件
echarts.use([
TitleComponent,
LegendComponent,
TooltipComponent,
BarChart,
GridComponent,
CanvasRenderer,
]);
interface IChartProps {
getChartData: any;
@@ -38,7 +53,7 @@ export class BarChartComponet extends React.Component<IChartProps> {
this.chart.resize();
}
public isHasData = (data: EChartOption) => {
public isHasData = (data: any) => {
const noData = !(data.series && data.series.length);
this.setState({ noData });
return !noData;
@@ -54,7 +69,7 @@ export class BarChartComponet extends React.Component<IChartProps> {
const chartOptions = getChartData();
if ((typeof chartOptions.then) === 'function') {
return chartOptions.then((data: EChartOption) => {
return chartOptions.then((data: EChartsOption) => {
this.setState({ loading: false });
if (this.isHasData(data)) {

View File

@@ -3,16 +3,34 @@ import { DatePicker, notification, Spin } from 'component/antd';
import moment, { Moment } from 'moment';
import { timeStampStr } from 'constants/strategy';
import { disabledDate } from 'lib/utils';
import echarts from 'echarts';
import * as echarts from 'echarts/core';
// 引入柱状图和折线图
import 'echarts/lib/chart/bar';
import 'echarts/lib/chart/line';
// 引入柱状图
import { BarChart, LineChart } from 'echarts/charts';
// 引入提示框和标题组件
import 'echarts/lib/component/tooltip';
import 'echarts/lib/component/title';
import 'echarts/lib/component/legend';
import {
TitleComponent,
TooltipComponent,
LegendComponent,
GridComponent,
MarkLineComponent,
DatasetComponent,
} from 'echarts/components';
import { CanvasRenderer } from 'echarts/renderers';
// 注册必须的组件
echarts.use([
TitleComponent,
LegendComponent,
TooltipComponent,
GridComponent,
BarChart,
LineChart,
CanvasRenderer,
DatasetComponent,
MarkLineComponent,
]);
import './index.less';
const { RangePicker } = DatePicker;
@@ -60,6 +78,23 @@ export class ChartWithDatePicker extends React.Component<IChartProps> {
public changeChartOptions(options: any) {
const noData = options.series.length ? false : true;
this.setState({ noData });
options.tooltip.formatter = (params: any) => {
let res =
'<div style=\'margin-bottom:5px;padding:0 12px;width:100%;height:24px;line-height:24px;border-radius:3px;\'><p>' +
params[0].data.time +
' </p></div>';
// tslint:disable-next-line:prefer-for-of
for (let i = 0; i < params.length; i++) {
res += `<div key=${params[i].seriesName} style="color: #fff;padding:0 12px;line-height: 24px">
<span style="display:inline-block;margin-right:5px;border-radius:50%;width:10px;height:10px;background-color:${[
params[i].color,
]};"></span>
${params[i].seriesName}
${params[i].data[params[i].seriesName]}
</div>`;
}
return res;
};
this.chart.setOption(options, true);
}
@@ -79,7 +114,7 @@ export class ChartWithDatePicker extends React.Component<IChartProps> {
public render() {
const { customerNode } = this.props;
return (
<div className="status-box" style={{minWidth: '930px'}}>
<div className="status-box" style={{ minWidth: '930px' }}>
<div className="status-graph">
<div className="k-toolbar">
{customerNode}

View File

@@ -1,13 +1,27 @@
import * as React from 'react';
import { Spin } from 'component/antd';
import echarts from 'echarts/lib/echarts';
// 引入饼
import 'echarts/lib/chart/pie';
// 引入提示框和标题组件
import 'echarts/lib/component/tooltip';
import 'echarts/lib/component/title';
import 'echarts/lib/component/legend';
import * as echarts from 'echarts/core';
// 引入饼图
import { PieChart } from 'echarts/charts';
// 引入提示框和标题组件
import {
TitleComponent,
TooltipComponent,
LegendComponent,
GridComponent,
} from 'echarts/components';
import { CanvasRenderer } from 'echarts/renderers';
// 注册必须的组件
echarts.use([
PieChart,
TitleComponent,
LegendComponent,
TooltipComponent,
GridComponent,
CanvasRenderer,
]);
interface IPieProps {
getChartData: any;
}

View File

@@ -1,25 +1,45 @@
import React from 'react';
import echarts, { EChartOption } from 'echarts/lib/echarts';
import 'echarts/lib/chart/pie';
import 'echarts/lib/chart/line';
import 'echarts/lib/component/legend';
import 'echarts/lib/component/tooltip';
import 'echarts/lib/component/title';
import 'echarts/lib/component/axis';
import * as echarts from 'echarts/core';
import './index.less';
// 引入柱状图
import { PieChart, LineChart } from 'echarts/charts';
// 引入提示框和标题组件
import {
TitleComponent,
TooltipComponent,
LegendComponent,
GridComponent,
ToolboxComponent,
DatasetComponent,
} from 'echarts/components';
import { CanvasRenderer } from 'echarts/renderers';
// 注册必须的组件
echarts.use([
PieChart,
LineChart,
ToolboxComponent,
TitleComponent,
LegendComponent,
TooltipComponent,
GridComponent,
DatasetComponent,
CanvasRenderer,
]);
export interface IEchartsProps {
width?: number;
height?: number;
options?: EChartOption;
options?: any;
}
export const hasData = (options: EChartOption) => {
export const hasData = (options: any) => {
if (options && options.series && options.series.length) return true;
return false;
};
export default class LineChart extends React.Component<IEchartsProps> {
export default class LineCharts extends React.Component<IEchartsProps> {
public id = null as HTMLDivElement;
public myChart = null as echarts.ECharts;
@@ -27,7 +47,7 @@ export default class LineChart extends React.Component<IEchartsProps> {
public componentDidMount() {
const { options } = this.props;
this.myChart = echarts.init(this.id);
this.myChart.setOption(options);
this.myChart.setOption(options, true);
window.addEventListener('resize', this.resize);
}
@@ -41,7 +61,7 @@ export default class LineChart extends React.Component<IEchartsProps> {
public refresh = () => {
const { options } = this.props;
this.myChart.setOption(options);
this.myChart.setOption(options, true);
}
public resize = () => {
@@ -50,6 +70,6 @@ export default class LineChart extends React.Component<IEchartsProps> {
public render() {
const { height, width } = this.props;
return <div ref={id => this.id = id} style={{width: `${width}px`, height: `${height}px`}} />;
return <div ref={id => this.id = id} style={{ width: `${width}px`, height: `${height}px` }} />;
}
}

View File

@@ -1,4 +1,3 @@
import { EChartOption } from 'echarts/lib/echarts';
import moment from 'moment';
import { ICurve } from 'container/common-curve/config';
import { adminMonitor } from 'store/admin-monitor';
@@ -124,7 +123,7 @@ export interface ICurveType {
type: curveType;
title: string;
curves: ICurve[];
parser: (option: ICurve, data: any[]) => EChartOption;
parser: (option: ICurve, data: any[]) => any;
}
export const byteTypeCurves: ICurveType[] = [

View File

@@ -1,5 +1,5 @@
import moment from 'moment';
import { EChartOption } from 'echarts';
import { EChartsOption } from 'echarts';
import { ICurve, ILineData, baseLineLegend, baseLineGrid, baseAxisStyle, noAxis, UNIT_HEIGHT } from 'container/common-curve/config';
import { IClusterMetrics, ISeriesOption } from 'types/base-type';
import { timeFormat } from 'constants/strategy';
@@ -48,20 +48,20 @@ export const getBaseOptions = (option: ICurve, data: ILineData[]) => {
return Number(i.value);
}),
}],
} as EChartOption;
} as EChartsOption;
};
export const parseLine = (option: ICurve, data: ILineData[]): EChartOption => {
export const parseLine = (option: ICurve, data: ILineData[]): EChartsOption => {
return Object.assign({}, getBaseOptions(option, data), {
legend: {
...baseLineLegend,
bottom: '0',
align: 'auto',
},
}) as EChartOption;
}) as EChartsOption;
};
export const parseBrokerMetricOption = (option: ICurve, data: IClusterMetrics[]): EChartOption => {
export const parseBrokerMetricOption = (option: ICurve, data: IClusterMetrics[]): EChartsOption => {
let name;
let series: ISeriesOption[];
data = data || [];

View File

@@ -6,7 +6,7 @@ import { alarm } from 'store/alarm';
import { observer } from 'mobx-react';
import { handlePageBack } from 'lib/utils';
import LineChart, { hasData } from 'component/chart/line-chart';
import { EChartOption } from 'echarts';
import { EChartsOption } from 'echarts';
import { timeFormat } from 'constants/strategy';
import Url from 'lib/url-parser';
import moment = require('moment');
@@ -40,7 +40,7 @@ export class HistoryDetail extends React.Component {
return <div className="no-data-info" style={{ ...style }} key="loading"><Spin /></div>;
}
public renderEchart = (options: EChartOption, loading = false) => {
public renderEchart = (options: EChartsOption, loading = false) => {
const data = hasData(options);
if (loading) return this.renderLoading(400);
if (!data) return this.renderNoData(400);
@@ -51,7 +51,7 @@ export class HistoryDetail extends React.Component {
}
public renderHistoricalTraffic(metric: IMonitorMetric) {
const option = this.getChartOption() as EChartOption;
const option = this.getChartOption() as EChartsOption;
return (
<>

View File

@@ -1,5 +1,4 @@
import { EChartOption } from 'echarts/lib/echarts';
import moment from 'moment';
import { EChartsOption } from 'echarts';
export interface ILineData {
value: number;
@@ -9,7 +8,7 @@ export interface ICurve {
title?: string;
path: string;
colors: string[];
parser?: (option: ICurve, data: ILineData) => EChartOption;
parser?: (option: ICurve, data: ILineData) => EChartsOption;
message?: string;
unit?: string;
api?: any;
@@ -69,13 +68,13 @@ export const noAxis = {
},
};
export const getHight = (options: EChartOption) => {
let grid = options ? options.grid as EChartOption.Grid : null;
export const getHight = (options: any) => {
let grid = options ? options.grid : null;
if (!options || !grid) grid = baseLineGrid;
return Number(grid.height) + getLegendHight(options) + Number(grid.top) + LEGEND_PADDING + UNIT_HEIGHT;
};
export const getLegendHight = (options: EChartOption) => {
export const getLegendHight = (options: any) => {
if (!options) return 0;
if (options.legend.show === false) return 0;
const legendHight = options.legend.textStyle.lineHeight + defaultLegendPadding;

View File

@@ -1,4 +1,4 @@
import { EChartOption } from 'echarts';
import { EChartsOption } from 'echarts';
import { observer } from 'mobx-react';
import React from 'react';
import { curveInfo } from 'store/curve-info';
@@ -10,7 +10,7 @@ import LineChart, { hasData } from 'component/chart/line-chart';
export interface ICommonCurveProps {
options: ICurve;
parser?: (option: ICurve, data: any[]) => EChartOption;
parser?: (option: ICurve, data: any[]) => any;
}
@observer
@@ -41,7 +41,7 @@ export class CommonCurve extends React.Component<ICommonCurveProps> {
fullScreen.show(this.renderCurve(options, loading, true));
}
public renderOpBtns = (options: EChartOption, expand = false) => {
public renderOpBtns = (options: EChartsOption, expand = false) => {
const data = hasData(options);
return (
<div className="charts-op" key="op">
@@ -85,7 +85,7 @@ export class CommonCurve extends React.Component<ICommonCurveProps> {
return <div className="no-data-info" style={{ ...style }} key="loading"><Spin /></div>;
}
public renderEchart = (options: EChartOption, loading = false) => {
public renderEchart = (options: EChartsOption, loading = false) => {
const height = getHight(options);
const data = hasData(options);
@@ -94,7 +94,7 @@ export class CommonCurve extends React.Component<ICommonCurveProps> {
return <LineChart height={height} options={options} key="chart" />;
}
public renderCurve = (options: EChartOption, loading: boolean, expand = false) => {
public renderCurve = (options: any, loading: boolean, expand = false) => {
const data = hasData(options);
return (
<div className="common-chart-wrapper" >

View File

@@ -7,7 +7,7 @@ import { urlPrefix } from 'constants/left-menu';
import { region, IRegionIdcs } from 'store/region';
import logoUrl from '../../assets/image/kafka-logo.png';
import userIcon from '../../assets/image/normal.png';
import weChat from '../../assets/image/wechat.jpeg';
import weChat from '../../assets/image/weChat.png';
import { users } from 'store/users';
import { observer } from 'mobx-react';
import { Link } from 'react-router-dom';
@@ -60,8 +60,8 @@ export const Header = observer((props: IHeader) => {
});
};
const content = (
<div style={{ height: '250px', padding: '5px' }} className="kafka-avatar-img">
<img style={{ width: '190px', height: '246px' }} src={weChat} alt="" />
<div style={{ height: '200px', padding: '5px' }} className="kafka-avatar-img">
<img style={{ width: '190px', height: '190px' }} src={weChat} alt="" />
</div>
);
const helpCenter = (
@@ -144,8 +144,8 @@ export const Header = observer((props: IHeader) => {
<div className="kafka-header-container">
<div className="left-content">
<img className="kafka-header-icon" src={logoUrl} alt="" />
<span className="kafka-header-text">Kafka Manager</span>
<a className='kafka-header-version' href="https://github.com/didi/Logi-KafkaManager/releases" target='_blank'>v2.4.2</a>
<span className="kafka-header-text">LogiKM</span>
<a className='kafka-header-version' href="https://github.com/didi/Logi-KafkaManager/releases" target='_blank'>v2.5.0</a>
{/* 添加版本超链接 */}
</div>
<div className="mid-content">

View File

@@ -115,11 +115,19 @@ export class OrderList extends SearchAndFilterContainer {
status,
{
title: '申请时间',
dataIndex: 'gmtTime',
key: 'gmtTime',
sorter: (a: IBaseOrder, b: IBaseOrder) => b.gmtTime - a.gmtTime,
render: (t: number) => moment(t).format(timeFormat),
}, {
dataIndex: 'gmtCreate',
key: 'gmtCreate',
sorter: (a: IBaseOrder, b: IBaseOrder) => b.gmtCreate - a.gmtCreate,
render: (t: number) => t ? moment(t).format(timeFormat) : '-',
},
{
title: '审批时间',
dataIndex: 'gmtHandle',
key: 'gmtHandle',
sorter: (a: IBaseOrder, b: IBaseOrder) => b.gmtHandle - a.gmtHandle,
render: (t: number) => t ? moment(t).format(timeFormat) : '-',
},
{
title: '操作',
key: 'operation',
dataIndex: 'operation',

View File

@@ -1,12 +1,15 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=2">
<title>KafkaManager</title>
<title>LogiKM</title>
</head>
<body>
<div id="root"></div>
<div id="modal"></div>
</body>
</html>

View File

@@ -1,6 +1,6 @@
import { observable, action } from 'mobx';
import moment = require('moment');
import { EChartOption } from 'echarts/lib/echarts';
import { EChartsOption } from 'echarts';
import { ICurve } from 'container/common-curve/config';
import { curveKeys, PERIOD_RADIO_MAP } from 'container/admin/data-curve/config';
import { timeFormat } from 'constants/strategy';
@@ -13,7 +13,7 @@ class CurveInfo {
public timeRange: [moment.Moment, moment.Moment] = PERIOD_RADIO_MAP.get(this.periodKey).dateRange;
@observable
public curveData: { [key: string]: EChartOption } = {};
public curveData: { [key: string]: EChartsOption } = {};
@observable
public curveLoading: { [key: string]: boolean } = {};
@@ -25,7 +25,7 @@ class CurveInfo {
public currentOperator: string;
@action.bound
public setCurveData(key: curveKeys | string, data: EChartOption) {
public setCurveData(key: curveKeys | string, data: EChartsOption) {
this.curveData[key] = data;
}
@@ -59,7 +59,7 @@ class CurveInfo {
public getCommonCurveData = (
options: ICurve,
parser: (option: ICurve, data: any[]) => EChartOption,
parser: (option: ICurve, data: any[]) => EChartsOption,
reload?: boolean) => {
const { path } = options;
this.setCurveData(path, null);

View File

@@ -122,11 +122,11 @@ module.exports = {
},
},
devServer: {
contentBase: outPath,
// contentBase: outPath,
host: '127.0.0.1',
port: 1025,
hot: true,
disableHostCheck: true,
// disableHostCheck: true,
historyApiFallback: true,
proxy: {
'/api/v1/': {

View File

@@ -24,7 +24,6 @@
<java_target_version>1.8</java_target_version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<file_encoding>UTF-8</file_encoding>
<spring-version>5.1.3.RELEASE</spring-version>
</properties>
<dependencies>
@@ -38,12 +37,10 @@
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-web</artifactId>
<version>${spring-version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
<version>${spring-version}</version>
</dependency>
<!-- javax -->

View File

@@ -17,6 +17,9 @@ public class ConsumerMetadataCache {
private static final Map<Long, ConsumerMetadata> CG_METADATA_IN_BK_MAP = new ConcurrentHashMap<>();
private ConsumerMetadataCache() {
}
public static void putConsumerMetadataInZK(Long clusterId, ConsumerMetadata consumerMetadata) {
if (clusterId == null || consumerMetadata == null) {
return;

View File

@@ -1,7 +1,7 @@
package com.xiaojukeji.kafka.manager.service.cache;
import com.alibaba.fastjson.JSONObject;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.utils.factory.KafkaConsumerFactory;
import kafka.admin.AdminClient;
@@ -14,6 +14,8 @@ import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import java.util.Map;
import java.util.Properties;
@@ -25,20 +27,36 @@ import java.util.concurrent.locks.ReentrantLock;
* @author zengqiao
* @date 19/12/24
*/
@Service
public class KafkaClientPool {
private final static Logger LOGGER = LoggerFactory.getLogger(KafkaClientPool.class);
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaClientPool.class);
@Value(value = "${client-pool.kafka-consumer.min-idle-client-num:24}")
private Integer kafkaConsumerMinIdleClientNum;
@Value(value = "${client-pool.kafka-consumer.max-idle-client-num:24}")
private Integer kafkaConsumerMaxIdleClientNum;
@Value(value = "${client-pool.kafka-consumer.max-total-client-num:24}")
private Integer kafkaConsumerMaxTotalClientNum;
@Value(value = "${client-pool.kafka-consumer.borrow-timeout-unit-ms:3000}")
private Integer kafkaConsumerBorrowTimeoutUnitMs;
/**
* AdminClient
*/
private static Map<Long, AdminClient> AdminClientMap = new ConcurrentHashMap<>();
private static final Map<Long, AdminClient> ADMIN_CLIENT_MAP = new ConcurrentHashMap<>();
private static Map<Long, KafkaProducer<String, String>> KAFKA_PRODUCER_MAP = new ConcurrentHashMap<>();
private static final Map<Long, KafkaProducer<String, String>> KAFKA_PRODUCER_MAP = new ConcurrentHashMap<>();
private static Map<Long, GenericObjectPool<KafkaConsumer>> KAFKA_CONSUMER_POOL = new ConcurrentHashMap<>();
private static final Map<Long, GenericObjectPool<KafkaConsumer<String, String>>> KAFKA_CONSUMER_POOL = new ConcurrentHashMap<>();
private static ReentrantLock lock = new ReentrantLock();
private KafkaClientPool() {
}
private static void initKafkaProducerMap(Long clusterId) {
ClusterDO clusterDO = PhysicalClusterMetadataManager.getClusterFromCache(clusterId);
if (clusterDO == null) {
@@ -55,7 +73,7 @@ public class KafkaClientPool {
properties.setProperty(ProducerConfig.COMPRESSION_TYPE_CONFIG, "lz4");
properties.setProperty(ProducerConfig.LINGER_MS_CONFIG, "10");
properties.setProperty(ProducerConfig.RETRIES_CONFIG, "3");
KAFKA_PRODUCER_MAP.put(clusterId, new KafkaProducer<String, String>(properties));
KAFKA_PRODUCER_MAP.put(clusterId, new KafkaProducer<>(properties));
} catch (Exception e) {
LOGGER.error("create kafka producer failed, clusterDO:{}.", clusterDO, e);
} finally {
@@ -77,25 +95,22 @@ public class KafkaClientPool {
if (ValidateUtils.isNull(kafkaProducer)) {
return false;
}
kafkaProducer.send(new ProducerRecord<String, String>(topicName, data));
kafkaProducer.send(new ProducerRecord<>(topicName, data));
return true;
}
private static void initKafkaConsumerPool(ClusterDO clusterDO) {
private void initKafkaConsumerPool(ClusterDO clusterDO) {
lock.lock();
try {
GenericObjectPool<KafkaConsumer> objectPool = KAFKA_CONSUMER_POOL.get(clusterDO.getId());
GenericObjectPool<KafkaConsumer<String, String>> objectPool = KAFKA_CONSUMER_POOL.get(clusterDO.getId());
if (objectPool != null) {
return;
}
GenericObjectPoolConfig config = new GenericObjectPoolConfig();
config.setMaxIdle(24);
config.setMinIdle(24);
config.setMaxTotal(24);
KAFKA_CONSUMER_POOL.put(
clusterDO.getId(),
new GenericObjectPool<KafkaConsumer>(new KafkaConsumerFactory(clusterDO), config)
);
GenericObjectPoolConfig<KafkaConsumer<String, String>> config = new GenericObjectPoolConfig<>();
config.setMaxIdle(kafkaConsumerMaxIdleClientNum);
config.setMinIdle(kafkaConsumerMinIdleClientNum);
config.setMaxTotal(kafkaConsumerMaxTotalClientNum);
KAFKA_CONSUMER_POOL.put(clusterDO.getId(), new GenericObjectPool<>(new KafkaConsumerFactory(clusterDO), config));
} catch (Exception e) {
LOGGER.error("create kafka consumer pool failed, clusterDO:{}.", clusterDO, e);
} finally {
@@ -106,7 +121,7 @@ public class KafkaClientPool {
public static void closeKafkaConsumerPool(Long clusterId) {
lock.lock();
try {
GenericObjectPool<KafkaConsumer> objectPool = KAFKA_CONSUMER_POOL.remove(clusterId);
GenericObjectPool<KafkaConsumer<String, String>> objectPool = KAFKA_CONSUMER_POOL.remove(clusterId);
if (objectPool == null) {
return;
}
@@ -118,11 +133,11 @@ public class KafkaClientPool {
}
}
public static KafkaConsumer borrowKafkaConsumerClient(ClusterDO clusterDO) {
public KafkaConsumer<String, String> borrowKafkaConsumerClient(ClusterDO clusterDO) {
if (ValidateUtils.isNull(clusterDO)) {
return null;
}
GenericObjectPool<KafkaConsumer> objectPool = KAFKA_CONSUMER_POOL.get(clusterDO.getId());
GenericObjectPool<KafkaConsumer<String, String>> objectPool = KAFKA_CONSUMER_POOL.get(clusterDO.getId());
if (ValidateUtils.isNull(objectPool)) {
initKafkaConsumerPool(clusterDO);
objectPool = KAFKA_CONSUMER_POOL.get(clusterDO.getId());
@@ -132,18 +147,18 @@ public class KafkaClientPool {
}
try {
return objectPool.borrowObject(3000);
return objectPool.borrowObject(kafkaConsumerBorrowTimeoutUnitMs);
} catch (Exception e) {
LOGGER.error("borrow kafka consumer client failed, clusterDO:{}.", clusterDO, e);
}
return null;
}
public static void returnKafkaConsumerClient(Long physicalClusterId, KafkaConsumer kafkaConsumer) {
public static void returnKafkaConsumerClient(Long physicalClusterId, KafkaConsumer<String, String> kafkaConsumer) {
if (ValidateUtils.isNull(physicalClusterId) || ValidateUtils.isNull(kafkaConsumer)) {
return;
}
GenericObjectPool<KafkaConsumer> objectPool = KAFKA_CONSUMER_POOL.get(physicalClusterId);
GenericObjectPool<KafkaConsumer<String, String>> objectPool = KAFKA_CONSUMER_POOL.get(physicalClusterId);
if (ValidateUtils.isNull(objectPool)) {
return;
}
@@ -155,7 +170,7 @@ public class KafkaClientPool {
}
public static AdminClient getAdminClient(Long clusterId) {
AdminClient adminClient = AdminClientMap.get(clusterId);
AdminClient adminClient = ADMIN_CLIENT_MAP.get(clusterId);
if (adminClient != null) {
return adminClient;
}
@@ -166,26 +181,26 @@ public class KafkaClientPool {
Properties properties = createProperties(clusterDO, false);
lock.lock();
try {
adminClient = AdminClientMap.get(clusterId);
adminClient = ADMIN_CLIENT_MAP.get(clusterId);
if (adminClient != null) {
return adminClient;
}
AdminClientMap.put(clusterId, AdminClient.create(properties));
ADMIN_CLIENT_MAP.put(clusterId, AdminClient.create(properties));
} catch (Exception e) {
LOGGER.error("create kafka admin client failed, clusterId:{}.", clusterId, e);
} finally {
lock.unlock();
}
return AdminClientMap.get(clusterId);
return ADMIN_CLIENT_MAP.get(clusterId);
}
public static void closeAdminClient(ClusterDO cluster) {
if (AdminClientMap.containsKey(cluster.getId())) {
AdminClientMap.get(cluster.getId()).close();
if (ADMIN_CLIENT_MAP.containsKey(cluster.getId())) {
ADMIN_CLIENT_MAP.get(cluster.getId()).close();
}
}
public static Properties createProperties(ClusterDO clusterDO, Boolean serialize) {
public static Properties createProperties(ClusterDO clusterDO, boolean serialize) {
Properties properties = new Properties();
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterDO.getBootstrapServers());
if (serialize) {
@@ -198,8 +213,7 @@ public class KafkaClientPool {
if (ValidateUtils.isBlank(clusterDO.getSecurityProperties())) {
return properties;
}
Properties securityProperties = JSONObject.parseObject(clusterDO.getSecurityProperties(), Properties.class);
properties.putAll(securityProperties);
properties.putAll(JsonUtils.stringToObj(clusterDO.getSecurityProperties(), Properties.class));
return properties;
}
}

View File

@@ -14,7 +14,10 @@ public class KafkaMetricsCache {
/**
* <clusterId, Metrics List>
*/
private static Map<Long, Map<String, TopicMetrics>> TopicMetricsMap = new ConcurrentHashMap<>();
private static final Map<Long, Map<String, TopicMetrics>> TOPIC_METRICS_MAP = new ConcurrentHashMap<>();
private KafkaMetricsCache() {
}
public static void putTopicMetricsToCache(Long clusterId, List<TopicMetrics> dataList) {
if (clusterId == null || dataList == null) {
@@ -24,22 +27,22 @@ public class KafkaMetricsCache {
for (TopicMetrics topicMetrics : dataList) {
subMetricsMap.put(topicMetrics.getTopicName(), topicMetrics);
}
TopicMetricsMap.put(clusterId, subMetricsMap);
TOPIC_METRICS_MAP.put(clusterId, subMetricsMap);
}
public static Map<String, TopicMetrics> getTopicMetricsFromCache(Long clusterId) {
return TopicMetricsMap.getOrDefault(clusterId, Collections.emptyMap());
return TOPIC_METRICS_MAP.getOrDefault(clusterId, Collections.emptyMap());
}
public static Map<Long, Map<String, TopicMetrics>> getAllTopicMetricsFromCache() {
return TopicMetricsMap;
return TOPIC_METRICS_MAP;
}
public static TopicMetrics getTopicMetricsFromCache(Long clusterId, String topicName) {
if (clusterId == null || topicName == null) {
return null;
}
Map<String, TopicMetrics> subMap = TopicMetricsMap.getOrDefault(clusterId, Collections.emptyMap());
Map<String, TopicMetrics> subMap = TOPIC_METRICS_MAP.getOrDefault(clusterId, Collections.emptyMap());
return subMap.get(topicName);
}
}

View File

@@ -160,7 +160,7 @@ public class LogicalClusterMetadataManager {
public void flush() {
List<LogicalClusterDO> logicalClusterDOList = logicalClusterService.listAll();
if (ValidateUtils.isNull(logicalClusterDOList)) {
logicalClusterDOList = Collections.EMPTY_LIST;
logicalClusterDOList = Collections.emptyList();
}
Set<Long> inDbLogicalClusterIds = logicalClusterDOList.stream()
.map(LogicalClusterDO::getId)
@@ -208,7 +208,8 @@ public class LogicalClusterMetadataManager {
// 计算逻辑集群到Topic名称的映射
Set<String> topicNameSet = PhysicalClusterMetadataManager.getBrokerTopicNum(
logicalClusterDO.getClusterId(),
brokerIdSet);
brokerIdSet
);
LOGICAL_CLUSTER_ID_TOPIC_NAME_MAP.put(logicalClusterDO.getId(), topicNameSet);
// 计算Topic名称到逻辑集群的映射

View File

@@ -39,7 +39,7 @@ import java.util.concurrent.ConcurrentHashMap;
*/
@Service
public class PhysicalClusterMetadataManager {
private final static Logger LOGGER = LoggerFactory.getLogger(PhysicalClusterMetadataManager.class);
private static final Logger LOGGER = LoggerFactory.getLogger(PhysicalClusterMetadataManager.class);
@Autowired
private ControllerDao controllerDao;
@@ -50,22 +50,25 @@ public class PhysicalClusterMetadataManager {
@Autowired
private ClusterService clusterService;
private final static Map<Long, ClusterDO> CLUSTER_MAP = new ConcurrentHashMap<>();
@Autowired
private ThreadPool threadPool;
private final static Map<Long, ControllerData> CONTROLLER_DATA_MAP = new ConcurrentHashMap<>();
private static final Map<Long, ClusterDO> CLUSTER_MAP = new ConcurrentHashMap<>();
private final static Map<Long, ZkConfigImpl> ZK_CONFIG_MAP = new ConcurrentHashMap<>();
private static final Map<Long, ControllerData> CONTROLLER_DATA_MAP = new ConcurrentHashMap<>();
private final static Map<Long, Map<String, TopicMetadata>> TOPIC_METADATA_MAP = new ConcurrentHashMap<>();
private static final Map<Long, ZkConfigImpl> ZK_CONFIG_MAP = new ConcurrentHashMap<>();
private final static Map<Long, Map<String, Properties>> TOPIC_PROPERTIES_MAP = new ConcurrentHashMap<>();
private static final Map<Long, Map<String, TopicMetadata>> TOPIC_METADATA_MAP = new ConcurrentHashMap<>();
private final static Map<Long, Map<Integer, BrokerMetadata>> BROKER_METADATA_MAP = new ConcurrentHashMap<>();
private static final Map<Long, Map<String, Properties>> TOPIC_PROPERTIES_MAP = new ConcurrentHashMap<>();
private static final Map<Long, Map<Integer, BrokerMetadata>> BROKER_METADATA_MAP = new ConcurrentHashMap<>();
/**
* JXM连接, 延迟连接
*/
private final static Map<Long, Map<Integer, JmxConnectorWrap>> JMX_CONNECTOR_MAP = new ConcurrentHashMap<>();
private static final Map<Long, Map<Integer, JmxConnectorWrap>> JMX_CONNECTOR_MAP = new ConcurrentHashMap<>();
/**
* KafkaBroker版本, 延迟获取
@@ -125,7 +128,7 @@ public class PhysicalClusterMetadataManager {
zkConfig.watchChildren(ZkPathUtil.BROKER_IDS_ROOT, brokerListener);
//增加Topic监控
TopicStateListener topicListener = new TopicStateListener(clusterDO.getId(), zkConfig);
TopicStateListener topicListener = new TopicStateListener(clusterDO.getId(), zkConfig, threadPool);
topicListener.init();
zkConfig.watchChildren(ZkPathUtil.BROKER_TOPICS_ROOT, topicListener);
@@ -314,7 +317,7 @@ public class PhysicalClusterMetadataManager {
metadataMap.put(brokerId, brokerMetadata);
Map<Integer, JmxConnectorWrap> jmxMap = JMX_CONNECTOR_MAP.getOrDefault(clusterId, new ConcurrentHashMap<>());
jmxMap.put(brokerId, new JmxConnectorWrap(brokerMetadata.getHost(), brokerMetadata.getJmxPort(), jmxConfig));
jmxMap.put(brokerId, new JmxConnectorWrap(clusterId, brokerId, brokerMetadata.getHost(), brokerMetadata.getJmxPort(), jmxConfig));
JMX_CONNECTOR_MAP.put(clusterId, jmxMap);
Map<Integer, KafkaVersion> versionMap = KAFKA_VERSION_MAP.getOrDefault(clusterId, new ConcurrentHashMap<>());
@@ -398,7 +401,7 @@ public class PhysicalClusterMetadataManager {
KafkaBrokerRoleEnum roleEnum) {
BrokerMetadata brokerMetadata =
PhysicalClusterMetadataManager.getBrokerMetadata(clusterId, brokerId);
if (ValidateUtils.isNull(brokerMetadata)) {
if (brokerMetadata == null) {
return;
}
String hostname = brokerMetadata.getHost().replace(KafkaConstant.BROKER_HOST_NAME_SUFFIX, "");
@@ -438,7 +441,7 @@ public class PhysicalClusterMetadataManager {
KafkaBrokerRoleEnum roleEnum) {
BrokerMetadata brokerMetadata =
PhysicalClusterMetadataManager.getBrokerMetadata(clusterId, brokerId);
if (ValidateUtils.isNull(brokerMetadata)) {
if (brokerMetadata == null) {
return;
}
@@ -539,9 +542,12 @@ public class PhysicalClusterMetadataManager {
}
public static Set<String> getBrokerTopicNum(Long clusterId, Set<Integer> brokerIdSet) {
Set<String> topicNameSet = new HashSet<>();
Map<String, TopicMetadata> metadataMap = TOPIC_METADATA_MAP.get(clusterId);
if (metadataMap == null) {
return new HashSet<>();
}
Set<String> topicNameSet = new HashSet<>();
for (String topicName: metadataMap.keySet()) {
try {
TopicMetadata tm = metadataMap.get(topicName);

View File

@@ -1,37 +1,63 @@
package com.xiaojukeji.kafka.manager.service.cache;
import com.xiaojukeji.kafka.manager.common.utils.factory.DefaultThreadFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import java.util.concurrent.*;
import javax.annotation.PostConstruct;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
* @author zengqiao
* @date 20/8/24
*/
@Service
public class ThreadPool {
private static final ExecutorService COLLECT_METRICS_THREAD_POOL = new ThreadPoolExecutor(
256,
256,
120L,
TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(),
new DefaultThreadFactory("Collect-Metrics-Thread")
);
private static final ExecutorService API_CALL_THREAD_POOL = new ThreadPoolExecutor(
16,
16,
120L,
TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(),
new DefaultThreadFactory("Api-Call-Thread")
);
@Value(value = "${thread-pool.collect-metrics.thread-num:256}")
private Integer collectMetricsThreadNum;
public static void submitCollectMetricsTask(Runnable collectMetricsTask) {
COLLECT_METRICS_THREAD_POOL.submit(collectMetricsTask);
@Value(value = "${thread-pool.collect-metrics.queue-size:10000}")
private Integer collectMetricsQueueSize;
@Value(value = "${thread-pool.api-call.thread-num:16}")
private Integer apiCallThreadNum;
@Value(value = "${thread-pool.api-call.queue-size:10000}")
private Integer apiCallQueueSize;
private ThreadPoolExecutor collectMetricsThreadPool;
private ThreadPoolExecutor apiCallThreadPool;
@PostConstruct
public void init() {
collectMetricsThreadPool = new ThreadPoolExecutor(
collectMetricsThreadNum,
collectMetricsThreadNum,
120L,
TimeUnit.SECONDS,
new LinkedBlockingQueue<>(collectMetricsQueueSize),
new DefaultThreadFactory("TaskThreadPool")
);
apiCallThreadPool = new ThreadPoolExecutor(
apiCallThreadNum,
apiCallThreadNum,
120L,
TimeUnit.SECONDS,
new LinkedBlockingQueue<>(apiCallQueueSize),
new DefaultThreadFactory("ApiThreadPool")
);
}
public static void submitApiCallTask(Runnable apiCallTask) {
API_CALL_THREAD_POOL.submit(apiCallTask);
public void submitCollectMetricsTask(Long clusterId, Runnable collectMetricsTask) {
collectMetricsThreadPool.submit(collectMetricsTask);
}
public void submitApiCallTask(Long clusterId, Runnable apiCallTask) {
apiCallThreadPool.submit(apiCallTask);
}
}

View File

@@ -13,4 +13,12 @@ public interface TopicExpiredService {
List<TopicExpiredData> getExpiredTopicDataList(String username);
ResultStatus retainExpiredTopic(Long physicalClusterId, String topicName, Integer retainDays);
/**
* 通过topictopic名称删除
* @param clusterId 集群id
* @param topicName topic名称
* @return int
*/
int deleteByTopicName(Long clusterId, String topicName);
}

View File

@@ -185,7 +185,8 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
List<GatewayConfigDO> gatewayConfigDOList = gatewayConfigDao.getByConfigType(gatewayConfigDO.getType());
Long version = 1L;
for (GatewayConfigDO elem: gatewayConfigDOList) {
if (elem.getVersion() > version) {
if (elem.getVersion() >= version) {
// 大于等于的情况下,都需要+1
version = elem.getVersion() + 1L;
}
}
@@ -204,6 +205,7 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
@Override
public Result deleteById(Long id) {
try {
// TODO 删除的时候不能直接删也需要变更一下version
if (gatewayConfigDao.deleteById(id) > 0) {
return Result.buildSuc();
}
@@ -232,7 +234,8 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
List<GatewayConfigDO> gatewayConfigDOList = gatewayConfigDao.getByConfigType(newGatewayConfigDO.getType());
Long version = 1L;
for (GatewayConfigDO elem: gatewayConfigDOList) {
if (elem.getVersion() > version) {
if (elem.getVersion() >= version) {
// 大于等于的情况下,都需要+1
version = elem.getVersion() + 1L;
}
}

View File

@@ -43,6 +43,9 @@ public class AdminServiceImpl implements AdminService {
@Autowired
private TopicManagerService topicManagerService;
@Autowired
private TopicExpiredService topicExpiredService;
@Autowired
private TopicService topicService;
@@ -143,6 +146,7 @@ public class AdminServiceImpl implements AdminService {
// 3. 数据库中删除topic
topicManagerService.deleteByTopicName(clusterDO.getId(), topicName);
topicExpiredService.deleteByTopicName(clusterDO.getId(), topicName);
// 4. 数据库中删除authority
authorityService.deleteAuthorityByTopic(clusterDO.getId(), topicName);

View File

@@ -61,6 +61,9 @@ public class BrokerServiceImpl implements BrokerService {
@Autowired
private PhysicalClusterMetadataManager physicalClusterMetadataManager;
@Autowired
private ThreadPool threadPool;
@Override
public ClusterBrokerStatus getClusterBrokerStatus(Long clusterId) {
// 副本同步状态
@@ -201,7 +204,7 @@ public class BrokerServiceImpl implements BrokerService {
return getBrokerMetricsFromJmx(clusterId, brokerId, metricsCode);
}
});
ThreadPool.submitApiCallTask(taskList[i]);
threadPool.submitApiCallTask(clusterId, taskList[i]);
}
List<BrokerMetrics> metricsList = new ArrayList<>(brokerIdSet.size());
for (int i = 0; i < brokerIdList.size(); i++) {

View File

@@ -19,6 +19,8 @@ import com.xiaojukeji.kafka.manager.service.cache.LogicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.service.*;
import com.xiaojukeji.kafka.manager.service.utils.ConfigUtils;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooKeeper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -210,7 +212,7 @@ public class ClusterServiceImpl implements ClusterService {
ZooKeeper zk = null;
try {
zk = new ZooKeeper(zookeeper, 1000, null);
zk = new ZooKeeper(zookeeper, 1000, watchedEvent -> LOGGER.info(" receive event : " + watchedEvent.getType().name()));
for (int i = 0; i < 15; ++i) {
if (zk.getState().isConnected()) {
// 只有状态是connected的时候才表示地址是合法的

View File

@@ -22,6 +22,7 @@ import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.*;
import java.util.regex.Pattern;
/**
* @author zengqiao
@@ -240,9 +241,11 @@ public class ExpertServiceImpl implements ExpertService {
return new ArrayList<>();
}
//获取满足条件的过期Topic
List<TopicExpiredDO> filteredExpiredTopicList = new ArrayList<>();
for (TopicExpiredDO elem: expiredTopicList) {
if (config.getIgnoreClusterIdList().contains(elem.getClusterId())) {
//判定是否为忽略Cluster或者判定是否为忽略Topic名使用正则来过滤理论上不属于过期的Topic
if (config.getIgnoreClusterIdList().contains(elem.getClusterId()) || Pattern.matches(config.getFilterRegex(), elem.getTopicName())) {
continue;
}
filteredExpiredTopicList.add(elem);

View File

@@ -39,6 +39,9 @@ public class JmxServiceImpl implements JmxService {
@Autowired
private PhysicalClusterMetadataManager physicalClusterMetadataManager;
@Autowired
private ThreadPool threadPool;
@Override
public BrokerMetrics getBrokerMetrics(Long clusterId, Integer brokerId, Integer metricsCode) {
if (clusterId == null || brokerId == null || metricsCode == null) {
@@ -98,7 +101,7 @@ public class JmxServiceImpl implements JmxService {
);
}
});
ThreadPool.submitCollectMetricsTask(taskList[i]);
threadPool.submitCollectMetricsTask(clusterId, taskList[i]);
}
List<TopicMetrics> metricsList = new ArrayList<>();
@@ -303,7 +306,7 @@ public class JmxServiceImpl implements JmxService {
return metricsList;
}
});
ThreadPool.submitCollectMetricsTask(taskList[i]);
threadPool.submitCollectMetricsTask(clusterId, taskList[i]);
}
Map<String, TopicMetrics> metricsMap = new HashMap<>();

View File

@@ -2,6 +2,8 @@ package com.xiaojukeji.kafka.manager.service.service.impl;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.pojo.RegionDO;
import com.xiaojukeji.kafka.manager.common.events.RegionCreatedEvent;
import com.xiaojukeji.kafka.manager.common.utils.SpringTool;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
import com.xiaojukeji.kafka.manager.dao.RegionDao;
@@ -59,6 +61,8 @@ public class RegionServiceImpl implements RegionService {
return ResultStatus.BROKER_NOT_EXIST;
}
if (regionDao.insert(regionDO) > 0) {
// 发布region创建事件
SpringTool.publish(new RegionCreatedEvent(this, regionDO));
return ResultStatus.SUCCESS;
}
} catch (DuplicateKeyException e) {

View File

@@ -75,4 +75,14 @@ public class TopicExpiredServiceImpl implements TopicExpiredService {
}
return ResultStatus.MYSQL_ERROR;
}
@Override
public int deleteByTopicName(Long clusterId, String topicName) {
try {
return topicExpiredDao.deleteByName(clusterId, topicName);
} catch (Exception e) {
LOGGER.error("delete topic failed, clusterId:{} topicName:{}", clusterId, topicName, e);
}
return 0;
}
}

View File

@@ -210,7 +210,7 @@ public class TopicManagerServiceImpl implements TopicManagerService {
}
}
// 增加流量信息
// 增加流量和描述信息
Map<Long, Map<String, TopicMetrics>> metricMap = KafkaMetricsCache.getAllTopicMetricsFromCache();
for (MineTopicSummary mineTopicSummary : summaryList) {
TopicMetrics topicMetrics = getTopicMetricsFromCacheOrJmx(
@@ -219,6 +219,10 @@ public class TopicManagerServiceImpl implements TopicManagerService {
metricMap);
mineTopicSummary.setBytesIn(topicMetrics.getSpecifiedMetrics("BytesInPerSecOneMinuteRate"));
mineTopicSummary.setBytesOut(topicMetrics.getSpecifiedMetrics("BytesOutPerSecOneMinuteRate"));
// 增加topic描述信息
TopicDO topicDO = topicDao.getByTopicName(mineTopicSummary.getPhysicalClusterId(), mineTopicSummary.getTopicName());
mineTopicSummary.setDescription(topicDO.getDescription());
}
return summaryList;
}

View File

@@ -87,6 +87,9 @@ public class TopicServiceImpl implements TopicService {
@Autowired
private AbstractHealthScoreStrategy healthScoreStrategy;
@Autowired
private KafkaClientPool kafkaClientPool;
@Override
public List<TopicMetricsDO> getTopicMetricsFromDB(Long clusterId, String topicName, Date startTime, Date endTime) {
try {
@@ -340,7 +343,7 @@ public class TopicServiceImpl implements TopicService {
Map<TopicPartition, Long> topicPartitionLongMap = new HashMap<>();
KafkaConsumer kafkaConsumer = null;
try {
kafkaConsumer = KafkaClientPool.borrowKafkaConsumerClient(clusterDO);
kafkaConsumer = kafkaClientPool.borrowKafkaConsumerClient(clusterDO);
if ((offsetPosEnum.getCode() & OffsetPosEnum.END.getCode()) > 0) {
topicPartitionLongMap = kafkaConsumer.endOffsets(topicPartitionList);
} else if ((offsetPosEnum.getCode() & OffsetPosEnum.BEGINNING.getCode()) > 0) {
@@ -541,7 +544,7 @@ public class TopicServiceImpl implements TopicService {
List<PartitionOffsetDTO> partitionOffsetDTOList = new ArrayList<>();
try {
kafkaConsumer = KafkaClientPool.borrowKafkaConsumerClient(clusterDO);
kafkaConsumer = kafkaClientPool.borrowKafkaConsumerClient(clusterDO);
Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestampMap = kafkaConsumer.offsetsForTimes(timestampsToSearch);
if (offsetAndTimestampMap == null) {
return new ArrayList<>();

View File

@@ -45,6 +45,9 @@ public class DidiHealthScoreStrategy extends AbstractHealthScoreStrategy {
@Autowired
private JmxService jmxService;
@Autowired
private ThreadPool threadPool;
@Override
public Integer calBrokerHealthScore(Long clusterId, Integer brokerId) {
BrokerMetadata brokerMetadata = PhysicalClusterMetadataManager.getBrokerMetadata(clusterId, brokerId);
@@ -125,7 +128,7 @@ public class DidiHealthScoreStrategy extends AbstractHealthScoreStrategy {
return calBrokerHealthScore(clusterId, brokerId);
}
});
ThreadPool.submitApiCallTask(taskList[i]);
threadPool.submitApiCallTask(clusterId, taskList[i]);
}
Integer topicHealthScore = HEALTH_SCORE_HEALTHY;

View File

@@ -1,5 +1,6 @@
package com.xiaojukeji.kafka.manager.service.utils;
import lombok.Data;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
@@ -8,38 +9,18 @@ import org.springframework.stereotype.Service;
* @author zengqiao
* @date 20/4/26
*/
@Data
@Service("configUtils")
public class ConfigUtils {
@Value(value = "${custom.idc}")
private ConfigUtils() {
}
@Value(value = "${custom.idc:cn}")
private String idc;
@Value(value = "${spring.profiles.active}")
@Value(value = "${spring.profiles.active:dev}")
private String kafkaManagerEnv;
@Value(value = "${custom.store-metrics-task.save-days}")
private Long maxMetricsSaveDays;
public String getIdc() {
return idc;
}
public void setIdc(String idc) {
this.idc = idc;
}
public String getKafkaManagerEnv() {
return kafkaManagerEnv;
}
public void setKafkaManagerEnv(String kafkaManagerEnv) {
this.kafkaManagerEnv = kafkaManagerEnv;
}
public Long getMaxMetricsSaveDays() {
return maxMetricsSaveDays;
}
public void setMaxMetricsSaveDays(Long maxMetricsSaveDays) {
this.maxMetricsSaveDays = maxMetricsSaveDays;
}
@Value(value = "${spring.application.version:unknown}")
private String applicationVersion;
}

View File

@@ -74,15 +74,10 @@ public class BrokerStateListener implements StateChangeListener {
BrokerMetadata brokerMetadata = null;
try {
brokerMetadata = zkConfig.get(ZkPathUtil.getBrokerIdNodePath(brokerId), BrokerMetadata.class);
if (!brokerMetadata.getEndpoints().isEmpty()) {
String endpoint = brokerMetadata.getEndpoints().get(0);
int idx = endpoint.indexOf("://");
endpoint = endpoint.substring(idx + "://".length());
idx = endpoint.indexOf(":");
brokerMetadata.setHost(endpoint.substring(0, idx));
brokerMetadata.setPort(Integer.parseInt(endpoint.substring(idx + 1)));
}
// 解析并更新本次存储的broker元信息
BrokerMetadata.parseAndUpdateBrokerMetadata(brokerMetadata);
brokerMetadata.setClusterId(clusterId);
brokerMetadata.setBrokerId(brokerId);
PhysicalClusterMetadataManager.putBrokerMetadata(clusterId, brokerId, brokerMetadata, jmxConfig);

View File

@@ -19,13 +19,13 @@ import org.springframework.dao.DuplicateKeyException;
* @date 20/5/14
*/
public class ControllerStateListener implements StateChangeListener {
private final static Logger LOGGER = LoggerFactory.getLogger(ControllerStateListener.class);
private static final Logger LOGGER = LoggerFactory.getLogger(ControllerStateListener.class);
private Long clusterId;
private final Long clusterId;
private ZkConfigImpl zkConfig;
private final ZkConfigImpl zkConfig;
private ControllerDao controllerDao;
private final ControllerDao controllerDao;
public ControllerStateListener(Long clusterId, ZkConfigImpl zkConfig, ControllerDao controllerDao) {
this.clusterId = clusterId;
@@ -35,8 +35,11 @@ public class ControllerStateListener implements StateChangeListener {
@Override
public void init() {
if (!checkNodeExist()) {
LOGGER.warn("kafka-controller data not exist, clusterId:{}.", clusterId);
return;
}
processControllerChange();
return;
}
@Override
@@ -49,12 +52,21 @@ public class ControllerStateListener implements StateChangeListener {
break;
}
} catch (Exception e) {
LOGGER.error("process controller state change failed, clusterId:{} state:{} path:{}.",
clusterId, state, path, e);
LOGGER.error("process controller state change failed, clusterId:{} state:{} path:{}.", clusterId, state, path, e);
}
}
private void processControllerChange(){
private boolean checkNodeExist() {
try {
return zkConfig.checkPathExists(ZkPathUtil.CONTROLLER_ROOT_NODE);
} catch (Exception e) {
LOGGER.error("init kafka-controller data failed, clusterId:{}.", clusterId, e);
}
return false;
}
private void processControllerChange() {
LOGGER.warn("init controllerData or controller change, clusterId:{}.", clusterId);
ControllerData controllerData = null;
try {

View File

@@ -10,6 +10,7 @@ import com.xiaojukeji.kafka.manager.service.cache.ThreadPool;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.HashSet;
import java.util.List;
@@ -28,9 +29,12 @@ public class TopicStateListener implements StateChangeListener {
private ZkConfigImpl zkConfig;
public TopicStateListener(Long clusterId, ZkConfigImpl zkConfig) {
private ThreadPool threadPool;
public TopicStateListener(Long clusterId, ZkConfigImpl zkConfig, ThreadPool threadPool) {
this.clusterId = clusterId;
this.zkConfig = zkConfig;
this.threadPool = threadPool;
}
@Override
@@ -47,7 +51,7 @@ public class TopicStateListener implements StateChangeListener {
return null;
}
});
ThreadPool.submitCollectMetricsTask(taskList[i]);
threadPool.submitCollectMetricsTask(clusterId, taskList[i]);
}
} catch (Exception e) {
LOGGER.error("init topics metadata failed, clusterId:{}.", clusterId, e);

View File

@@ -20,5 +20,5 @@ public interface BrokerMetricsDao {
*/
List<BrokerMetricsDO> getBrokerMetrics(Long clusterId, Integer brokerId, Date startTime, Date endTime);
int deleteBeforeTime(Date endTime);
int deleteBeforeTime(Date endTime, Integer limitSize);
}

View File

@@ -10,5 +10,5 @@ public interface ClusterMetricsDao {
List<ClusterMetricsDO> getClusterMetrics(long clusterId, Date startTime, Date endTime);
int deleteBeforeTime(Date endTime);
int deleteBeforeTime(Date endTime, Integer limitSize);
}

View File

@@ -30,5 +30,5 @@ public interface TopicAppMetricsDao {
* @param endTime
* @return
*/
int deleteBeforeTime(Date endTime);
int deleteBeforeTime(Date endTime, Integer limitSize);
}

View File

@@ -17,4 +17,6 @@ public interface TopicExpiredDao {
int replace(TopicExpiredDO expiredDO);
TopicExpiredDO getByTopic(Long clusterId, String topicName);
int deleteByName(Long clusterId, String topicName);
}

View File

@@ -22,5 +22,5 @@ public interface TopicMetricsDao {
List<TopicMetricsDO> getLatestTopicMetrics(Long clusterId, Date afterTime);
int deleteBeforeTime(Date endTime);
int deleteBeforeTime(Date endTime, Integer limitSize);
}

View File

@@ -33,9 +33,7 @@ public interface TopicRequestMetricsDao {
* @param endTime
* @return
*/
int deleteBeforeTime(Date endTime);
int deleteBeforeId(Long id);
int deleteBeforeTime(Date endTime, Integer limitSize);
List<TopicMetricsDO> getById(Long startId, Long endId);
}

View File

@@ -32,5 +32,5 @@ public interface TopicThrottledMetricsDao {
List<TopicThrottledMetricsDO> getLatestTopicThrottledMetrics(Long clusterId, Date afterTime);
int deleteBeforeTime(Date endTime);
int deleteBeforeTime(Date endTime, Integer limitSize);
}

View File

@@ -37,7 +37,10 @@ public class BrokerMetricsImpl implements BrokerMetricsDao {
}
@Override
public int deleteBeforeTime(Date endTime) {
return sqlSession.delete("BrokerMetricsDao.deleteBeforeTime", endTime);
public int deleteBeforeTime(Date endTime, Integer limitSize) {
Map<String, Object> params = new HashMap<>(2);
params.put("endTime", endTime);
params.put("limitSize", limitSize);
return sqlSession.delete("BrokerMetricsDao.deleteBeforeTime", params);
}
}

View File

@@ -27,7 +27,7 @@ public class ClusterMetricsDaoImpl implements ClusterMetricsDao {
@Override
public List<ClusterMetricsDO> getClusterMetrics(long clusterId, Date startTime, Date endTime) {
Map<String, Object> map = new HashMap<String, Object>(3);
Map<String, Object> map = new HashMap<>(3);
map.put("clusterId", clusterId);
map.put("startTime", startTime);
map.put("endTime", endTime);
@@ -35,7 +35,10 @@ public class ClusterMetricsDaoImpl implements ClusterMetricsDao {
}
@Override
public int deleteBeforeTime(Date endTime) {
return sqlSession.delete("ClusterMetricsDao.deleteBeforeTime", endTime);
public int deleteBeforeTime(Date endTime, Integer limitSize) {
Map<String, Object> params = new HashMap<>(2);
params.put("endTime", endTime);
params.put("limitSize", limitSize);
return sqlSession.delete("ClusterMetricsDao.deleteBeforeTime", params);
}
}

View File

@@ -46,7 +46,10 @@ public class TopicAppMetricsDaoImpl implements TopicAppMetricsDao {
}
@Override
public int deleteBeforeTime(Date endTime) {
return sqlSession.delete("TopicAppMetricsDao.deleteBeforeTime", endTime);
public int deleteBeforeTime(Date endTime, Integer limitSize) {
Map<String, Object> params = new HashMap<>(2);
params.put("endTime", endTime);
params.put("limitSize", limitSize);
return sqlSession.delete("TopicAppMetricsDao.deleteBeforeTime", params);
}
}

View File

@@ -50,4 +50,12 @@ public class TopicExpiredDaoImpl implements TopicExpiredDao {
params.put("topicName", topicName);
return sqlSession.selectOne("TopicExpiredDao.getByTopic", params);
}
@Override
public int deleteByName(Long clusterId, String topicName) {
Map<String, Object> params = new HashMap<>(2);
params.put("clusterId", clusterId);
params.put("topicName", topicName);
return sqlSession.delete("TopicExpiredDao.deleteByName", params);
}
}

View File

@@ -60,7 +60,10 @@ public class TopicMetricsDaoImpl implements TopicMetricsDao {
}
@Override
public int deleteBeforeTime(Date endTime) {
return sqlSession.delete("TopicMetricsDao.deleteBeforeTime", endTime);
public int deleteBeforeTime(Date endTime, Integer limitSize) {
Map<String, Object> params = new HashMap<>(2);
params.put("endTime", endTime);
params.put("limitSize", limitSize);
return sqlSession.delete("TopicMetricsDao.deleteBeforeTime", params);
}
}

View File

@@ -45,13 +45,11 @@ public class TopicRequestMetricsDaoImpl implements TopicRequestMetricsDao {
}
@Override
public int deleteBeforeTime(Date endTime) {
return sqlSession.delete("TopicRequestMetricsDao.deleteBeforeTime", endTime);
}
@Override
public int deleteBeforeId(Long id) {
return sqlSession.delete("TopicRequestMetricsDao.deleteBeforeId", id);
public int deleteBeforeTime(Date endTime, Integer limitSize) {
Map<String, Object> params = new HashMap<>();
params.put("endTime", endTime);
params.put("limitSize", limitSize);
return sqlSession.delete("TopicRequestMetricsDao.deleteBeforeTime", params);
}
@Override

View File

@@ -75,7 +75,10 @@ public class TopicThrottledMetricsDaoImpl implements TopicThrottledMetricsDao {
}
@Override
public int deleteBeforeTime(Date endTime) {
return sqlSession.delete("TopicThrottledMetricsDao.deleteBeforeTime", endTime);
public int deleteBeforeTime(Date endTime, Integer limitSize) {
Map<String, Object> params = new HashMap<>(2);
params.put("endTime", endTime);
params.put("limitSize", limitSize);
return sqlSession.delete("TopicThrottledMetricsDao.deleteBeforeTime", params);
}
}

View File

@@ -29,9 +29,9 @@
]]>
</select>
<delete id="deleteBeforeTime" parameterType="java.util.Date">
<delete id="deleteBeforeTime" parameterType="java.util.Map">
<![CDATA[
DELETE FROM broker_metrics WHERE gmt_create < #{endTime} LIMIT 1000
DELETE FROM broker_metrics WHERE gmt_create < #{endTime} LIMIT #{limitSize}
]]>
</delete>
</mapper>

View File

@@ -27,9 +27,9 @@
</foreach>
</insert>
<delete id="deleteBeforeTime" parameterType="java.util.Date">
<delete id="deleteBeforeTime" parameterType="java.util.Map">
<![CDATA[
DELETE FROM cluster_metrics WHERE gmt_create < #{endTime} LIMIT 200
DELETE FROM cluster_metrics WHERE gmt_create < #{endTime} LIMIT #{limitSize}
]]>
</delete>
</mapper>

View File

@@ -30,9 +30,9 @@
]]>
</select>
<delete id="deleteBeforeTime" parameterType="java.util.Date">
<delete id="deleteBeforeTime" parameterType="java.util.Map">
<![CDATA[
DELETE FROM topic_app_metrics WHERE gmt_create < #{endTime} LIMIT 3000
DELETE FROM topic_app_metrics WHERE gmt_create < #{endTime} LIMIT #{limitSize}
]]>
</delete>
</mapper>

View File

@@ -36,4 +36,8 @@
<select id="getByTopic" parameterType="java.util.Map" resultMap="TopicExpiredMap">
SELECT * FROM topic_expired WHERE cluster_id = #{clusterId} AND topic_name = #{topicName}
</select>
<delete id="deleteByName" parameterType="java.util.Map">
DELETE FROM topic_expired WHERE cluster_id=#{clusterId} AND topic_name=#{topicName}
</delete>
</mapper>

View File

@@ -25,6 +25,7 @@
WHERE cluster_id = #{clusterId}
AND topic_name = #{topicName}
AND gmt_create BETWEEN #{startTime} AND #{endTime}
ORDER BY gmt_create
]]>
</select>
@@ -32,12 +33,13 @@
<![CDATA[
SELECT * FROM topic_metrics
WHERE cluster_id = #{clusterId} AND #{afterTime} <= gmt_create
ORDER BY gmt_create
]]>
</select>
<delete id="deleteBeforeTime" parameterType="java.util.Date">
<delete id="deleteBeforeTime" parameterType="java.util.Map">
<![CDATA[
DELETE FROM topic_metrics WHERE gmt_create < #{endTime} LIMIT 3000
DELETE FROM topic_metrics WHERE gmt_create < #{endTime} LIMIT #{limitSize}
]]>
</delete>
</mapper>

View File

@@ -34,15 +34,9 @@
ORDER BY gmt_create ASC
</select>
<delete id="deleteBeforeTime" parameterType="java.util.Date">
<delete id="deleteBeforeTime" parameterType="java.util.Map">
<![CDATA[
DELETE FROM topic_request_time_metrics WHERE gmt_create < #{endTime} LIMIT 2000
]]>
</delete>
<delete id="deleteBeforeId" parameterType="java.lang.Long">
<![CDATA[
DELETE FROM topic_request_time_metrics WHERE id < #{id} LIMIT 20000
DELETE FROM topic_request_time_metrics WHERE gmt_create < #{endTime} LIMIT #{limitSize}
]]>
</delete>

View File

@@ -54,9 +54,9 @@
AND gmt_create > #{afterTime}
</select>
<delete id="deleteBeforeTime" parameterType="java.util.Date">
<delete id="deleteBeforeTime" parameterType="java.util.Map">
<![CDATA[
DELETE FROM topic_throttled_metrics WHERE gmt_create < #{endTime} LIMIT 3000
DELETE FROM topic_throttled_metrics WHERE gmt_create < #{endTime} LIMIT #{limitSize}
]]>
</delete>
</mapper>

View File

@@ -32,16 +32,16 @@ public class BaseSessionSignOn extends AbstractSingleSignOn {
private LdapAuthentication ldapAuthentication;
//是否开启ldap验证
@Value(value = "${account.ldap.enabled:}")
@Value(value = "${account.ldap.enabled:false}")
private Boolean accountLdapEnabled;
//ldap自动注册的默认角色。请注意它通常来说都是低权限角色
@Value(value = "${account.ldap.auth-user-registration-role:}")
@Value(value = "${account.ldap.auth-user-registration-role:normal}")
private String authUserRegistrationRole;
//ldap自动注册是否开启
@Value(value = "${account.ldap.auth-user-registration:}")
private boolean authUserRegistration;
@Value(value = "${account.ldap.auth-user-registration:false}")
private Boolean authUserRegistration;
@Override
public Result<String> loginAndGetLdap(HttpServletRequest request, HttpServletResponse response, LoginDTO dto) {

Some files were not shown because too many files have changed in this diff Show More