diff --git a/README.md b/README.md index ad88eada..885a9355 100644 --- a/README.md +++ b/README.md @@ -5,9 +5,9 @@ **一站式`Apache Kafka`集群指标监控与运维管控平台** +`LogiKM开源至今备受关注,考虑到开源项目应该更贴合Apache Kafka未来发展方向,经项目组慎重考虑,预计22年5月份将其品牌升级成Know Streaming,届时项目名称和Logo也将统一更新,感谢大家一如既往的支持,敬请期待!` - -阅读本README文档,您可以了解到滴滴Logi-KafkaManager的用户群体、产品定位等信息,并通过体验地址,快速体验Kafka集群指标监控与运维管控的全流程。
若滴滴Logi-KafkaManager已在贵司的生产环境进行使用,并想要获得官方更好地支持和指导,可以通过[`OCE认证`](http://obsuite.didiyun.com/open/openAuth),加入官方交流平台。 +阅读本README文档,您可以了解到滴滴Logi-KafkaManager的用户群体、产品定位等信息,并通过体验地址,快速体验Kafka集群指标监控与运维管控的全流程。 ## 1 产品简介 @@ -55,35 +55,56 @@ ## 2 相关文档 ### 2.1 产品文档 -- [滴滴Logi-KafkaManager 安装手册](docs/install_guide/install_guide_cn.md) -- [滴滴Logi-KafkaManager 接入集群](docs/user_guide/add_cluster/add_cluster.md) -- [滴滴Logi-KafkaManager 用户使用手册](docs/user_guide/user_guide_cn.md) -- [滴滴Logi-KafkaManager FAQ](docs/user_guide/faq.md) +- [滴滴LogiKM 安装手册](docs/install_guide/install_guide_cn.md) +- [滴滴LogiKM 接入集群](docs/user_guide/add_cluster/add_cluster.md) +- [滴滴LogiKM 用户使用手册](docs/user_guide/user_guide_cn.md) +- [滴滴LogiKM FAQ](docs/user_guide/faq.md) ### 2.2 社区文章 +- [kafka最强最全知识图谱](https://www.szzdzhp.com/kafka/) +- [LogiKM新用户入门系列文章专栏 --石臻臻](https://www.szzdzhp.com/categories/LogIKM/) - [滴滴云官网产品介绍](https://www.didiyun.com/production/logi-KafkaManager.html) - [7年沉淀之作--滴滴Logi日志服务套件](https://mp.weixin.qq.com/s/-KQp-Qo3WKEOc9wIR2iFnw) - [滴滴Logi-KafkaManager 一站式Kafka监控与管控平台](https://mp.weixin.qq.com/s/9qSZIkqCnU6u9nLMvOOjIQ) - [滴滴Logi-KafkaManager 开源之路](https://xie.infoq.cn/article/0223091a99e697412073c0d64) -- [滴滴Logi-KafkaManager 系列视频教程](https://mp.weixin.qq.com/s/9X7gH0tptHPtfjPPSdGO8g) +- [滴滴Logi-KafkaManager 系列视频教程](https://space.bilibili.com/442531657/channel/seriesdetail?sid=571649) - [kafka实践(十五):滴滴开源Kafka管控平台 Logi-KafkaManager研究--A叶子叶来](https://blog.csdn.net/yezonggang/article/details/113106244) -- [kafka的灵魂伴侣Logi-KafkaManager系列文章专栏 --石臻](https://blog.csdn.net/u010634066/category_10977588.html) + ## 3 滴滴Logi开源用户交流群 - ![image](https://user-images.githubusercontent.com/5287750/111266722-e531d800-8665-11eb-9242-3484da5a3099.png) -微信加群:关注公众号 云原生可观测性 回复 "Logi加群" -## 4 OCE认证 -OCE是一个认证机制和交流平台,为滴滴Logi-KafkaManager生产用户量身打造,我们会为OCE企业提供更好的技术支持,比如专属的技术沙龙、企业一对一的交流机会、专属的答疑群等,如果贵司Logi-KafkaManager上了生产,[快来加入吧](http://obsuite.didiyun.com/open/openAuth) +想跟各个大佬交流Kafka Es 等中间件/大数据相关技术请 加微信进群。 +微信加群:添加mike_zhangliangdanke-xie的微信号备注Logi加群或关注公众号 云原生可观测性 回复 "Logi加群" + +## 4 知识星球 + +image + +
+
+ ✅我们正在组建国内最大最权威的 +
+
+
+【Kafka中文社区】 +
+ +在这里你可以结交各大互联网Kafka大佬以及近2000+Kafka爱好者,一起实现知识共享,实时掌控最新行业资讯,期待您的加入中~https://z.didi.cn/5gSF9 + +有问必答~! + +互动有礼~! + +PS:提问请尽量把问题一次性描述清楚,并告知环境信息情况哦~!如使用版本、操作步骤、报错/警告信息等,方便大V们快速解答~ ## 5 项目成员 ### 5.1 内部核心人员 -`iceyuhui`、`liuyaguang`、`limengmonty`、`zhangliangmike`、`nullhuangyiming`、`zengqiao`、`eilenexuzhe`、`huangjiaweihjw`、`zhaoyinrui`、`marzkonglingxu`、`joysunchao` +`iceyuhui`、`liuyaguang`、`limengmonty`、`zhangliangmike`、`xiepeng`、`nullhuangyiming`、`zengqiao`、`eilenexuzhe`、`huangjiaweihjw`、`zhaoyinrui`、`marzkonglingxu`、`joysunchao`、`石臻臻` ### 5.2 外部贡献者 @@ -93,4 +114,4 @@ OCE是一个认证机制和交流平台,为滴滴Logi-KafkaManager生产用户 ## 6 协议 -`kafka-manager`基于`Apache-2.0`协议进行分发和使用,更多信息参见[协议文件](./LICENSE) +`LogiKM`基于`Apache-2.0`协议进行分发和使用,更多信息参见[协议文件](./LICENSE) diff --git a/build.sh b/build.sh deleted file mode 100644 index b07c6623..00000000 --- a/build.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash -workspace=$(cd $(dirname $0) && pwd -P) -cd $workspace - -## constant -OUTPUT_DIR=./output -KM_VERSION=2.4.2 -APP_NAME=kafka-manager -APP_DIR=${APP_NAME}-${KM_VERSION} - -MYSQL_TABLE_SQL_FILE=./docs/install_guide/create_mysql_table.sql -CONFIG_FILE=./kafka-manager-web/src/main/resources/application.yml - -## function -function build() { - # 编译命令 - mvn -U clean package -Dmaven.test.skip=true - - local sc=$? - if [ $sc -ne 0 ];then - ## 编译失败, 退出码为 非0 - echo "$APP_NAME build error" - exit $sc - else - echo "$APP_NAME build ok" - fi -} - -function make_output() { - # 新建output目录 - rm -rf ${OUTPUT_DIR} &>/dev/null - mkdir -p ${OUTPUT_DIR}/${APP_DIR} &>/dev/null - - # 填充output目录, output内的内容 - ( - cp -rf ${MYSQL_TABLE_SQL_FILE} ${OUTPUT_DIR}/${APP_DIR} && # 拷贝 sql 初始化脚本 至output目录 - cp -rf ${CONFIG_FILE} ${OUTPUT_DIR}/${APP_DIR} && # 拷贝 application.yml 至output目录 - - # 拷贝程序包到output路径 - cp kafka-manager-web/target/kafka-manager-web-${KM_VERSION}-SNAPSHOT.jar ${OUTPUT_DIR}/${APP_DIR}/${APP_NAME}.jar - echo -e "make output ok." - ) || { echo -e "make output error"; exit 2; } # 填充output目录失败后, 退出码为 非0 -} - -function make_package() { - # 压缩output目录 - ( - cd ${OUTPUT_DIR} && tar cvzf ${APP_DIR}.tar.gz ${APP_DIR} - echo -e "make package ok." - ) || { echo -e "make package error"; exit 2; } # 压缩output目录失败后, 退出码为 非0 -} - -########################################## -## main -## 其中, -## 1.进行编译 -## 2.生成部署包output -## 3.生成tar.gz压缩包 -########################################## - -# 1.进行编译 -build - -# 2.生成部署包output -make_output - -# 3.生成tar.gz压缩包 -make_package - -# 编译成功 -echo -e "build done" -exit 0 \ No newline at end of file diff --git a/container/dockerfiles/Dockerfile b/container/dockerfiles/Dockerfile index fa1850e3..941351e8 100644 --- a/container/dockerfiles/Dockerfile +++ b/container/dockerfiles/Dockerfile @@ -19,6 +19,7 @@ ENV JAVA_OPTS="-verbose:gc \ RUN wget https://github.com/didi/Logi-KafkaManager/releases/download/v${VERSION}/kafka-manager-${VERSION}.tar.gz && \ tar xvf kafka-manager-${VERSION}.tar.gz && \ mv kafka-manager-${VERSION}/kafka-manager.jar /opt/app.jar && \ + mv kafka-manager-${VERSION}/application.yml /opt/application.yml && \ rm -rf kafka-manager-${VERSION}* EXPOSE 8080 9999 diff --git a/container/dockerfiles/docker-depends/jmx_prometheus_javaagent-0.14.0.jar b/container/dockerfiles/docker-depends/jmx_prometheus_javaagent-0.14.0.jar deleted file mode 100644 index 54b633bf..00000000 Binary files a/container/dockerfiles/docker-depends/jmx_prometheus_javaagent-0.14.0.jar and /dev/null differ diff --git a/container/helm/templates/configmap.yaml b/container/helm/templates/configmap.yaml index b487f2bd..1428cf11 100644 --- a/container/helm/templates/configmap.yaml +++ b/container/helm/templates/configmap.yaml @@ -55,7 +55,7 @@ data: didi: app-topic-metrics-enabled: false topic-request-time-metrics-enabled: false - topic-throttled-metrics: false + topic-throttled-metrics-enabled: false save-days: 7 # 任务相关的开关 @@ -67,7 +67,16 @@ data: # ldap settings ldap: enabled: false - authUserRegistration: false + url: ldap://127.0.0.1:389/ + basedn: dc=tsign,dc=cn + factory: com.sun.jndi.ldap.LdapCtxFactory + filter: sAMAccountName + security: + authentication: simple + principal: cn=admin,dc=tsign,dc=cn + credentials: admin + auth-user-registration: false + auth-user-registration-role: normal kcm: enabled: false diff --git a/distribution/bin/shutdown.sh b/distribution/bin/shutdown.sh new file mode 100644 index 00000000..fdf2d01c --- /dev/null +++ b/distribution/bin/shutdown.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +cd `dirname $0`/../target +target_dir=`pwd` + +pid=`ps ax | grep -i 'kafka-manager' | grep ${target_dir} | grep java | grep -v grep | awk '{print $1}'` +if [ -z "$pid" ] ; then + echo "No kafka-manager running." + exit -1; +fi + +echo "The kafka-manager (${pid}) is running..." + +kill ${pid} + +echo "Send shutdown request to kafka-manager (${pid}) OK" diff --git a/distribution/bin/startup.sh b/distribution/bin/startup.sh new file mode 100644 index 00000000..ead6cde9 --- /dev/null +++ b/distribution/bin/startup.sh @@ -0,0 +1,81 @@ +error_exit () +{ + echo "ERROR: $1 !!" + exit 1 +} + +[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=$HOME/jdk/java +[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=/usr/java +[ ! -e "$JAVA_HOME/bin/java" ] && unset JAVA_HOME + +if [ -z "$JAVA_HOME" ]; then + if $darwin; then + + if [ -x '/usr/libexec/java_home' ] ; then + export JAVA_HOME=`/usr/libexec/java_home` + + elif [ -d "/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home" ]; then + export JAVA_HOME="/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home" + fi + else + JAVA_PATH=`dirname $(readlink -f $(which javac))` + if [ "x$JAVA_PATH" != "x" ]; then + export JAVA_HOME=`dirname $JAVA_PATH 2>/dev/null` + fi + fi + if [ -z "$JAVA_HOME" ]; then + error_exit "Please set the JAVA_HOME variable in your environment, We need java(x64)! jdk8 or later is better!" + fi +fi + + + + +export WEB_SERVER="kafka-manager" +export JAVA_HOME +export JAVA="$JAVA_HOME/bin/java" +export BASE_DIR=`cd $(dirname $0)/..; pwd` +export CUSTOM_SEARCH_LOCATIONS=file:${BASE_DIR}/conf/ + + +#=========================================================================================== +# JVM Configuration +#=========================================================================================== + +JAVA_OPT="${JAVA_OPT} -server -Xms2g -Xmx2g -Xmn1g -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=320m" +JAVA_OPT="${JAVA_OPT} -XX:-OmitStackTraceInFastThrow -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${BASE_DIR}/logs/java_heapdump.hprof" + +## jdk版本高的情况 有些 参数废弃了 +JAVA_MAJOR_VERSION=$($JAVA -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p') +if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then + JAVA_OPT="${JAVA_OPT} -Xlog:gc*:file=${BASE_DIR}/logs/km_gc.log:time,tags:filecount=10,filesize=102400" +else + JAVA_OPT="${JAVA_OPT} -Djava.ext.dirs=${JAVA_HOME}/jre/lib/ext:${JAVA_HOME}/lib/ext" + JAVA_OPT="${JAVA_OPT} -Xloggc:${BASE_DIR}/logs/km_gc.log -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M" + +fi + +JAVA_OPT="${JAVA_OPT} -jar ${BASE_DIR}/target/${WEB_SERVER}.jar" +JAVA_OPT="${JAVA_OPT} --spring.config.additional-location=${CUSTOM_SEARCH_LOCATIONS}" +JAVA_OPT="${JAVA_OPT} --logging.config=${BASE_DIR}/conf/logback-spring.xml" +JAVA_OPT="${JAVA_OPT} --server.max-http-header-size=524288" + + + +if [ ! -d "${BASE_DIR}/logs" ]; then + mkdir ${BASE_DIR}/logs +fi + +echo "$JAVA ${JAVA_OPT}" + +# check the start.out log output file +if [ ! -f "${BASE_DIR}/logs/start.out" ]; then + touch "${BASE_DIR}/logs/start.out" +fi +# start +echo -e "---- 启动脚本 ------\n $JAVA ${JAVA_OPT}" > ${BASE_DIR}/logs/start.out 2>&1 & + + +nohup $JAVA ${JAVA_OPT} >> ${BASE_DIR}/logs/start.out 2>&1 & + +echo "${WEB_SERVER} is starting,you can check the ${BASE_DIR}/logs/start.out" diff --git a/distribution/conf/application.yml b/distribution/conf/application.yml new file mode 100644 index 00000000..a11cb737 --- /dev/null +++ b/distribution/conf/application.yml @@ -0,0 +1,29 @@ + +## kafka-manager的配置文件,该文件中的配置会覆盖默认配置 +## 下面的配置信息基本就是jar中的 application.yml默认配置了; +## 可以只修改自己变更的配置,其他的删除就行了; 比如只配置一下mysql + + +server: + port: 8080 + tomcat: + accept-count: 1000 + max-connections: 10000 + max-threads: 800 + min-spare-threads: 100 + +spring: + application: + name: kafkamanager + version: @project.version@ + profiles: + active: dev + datasource: + kafka-manager: + jdbc-url: jdbc:mysql://localhost:3306/logi_kafka_manager?characterEncoding=UTF-8&useSSL=false&serverTimezone=GMT%2B8 + username: root + password: 123456 + driver-class-name: com.mysql.cj.jdbc.Driver + main: + allow-bean-definition-overriding: true + diff --git a/distribution/conf/application.yml.example b/distribution/conf/application.yml.example new file mode 100644 index 00000000..f777ce31 --- /dev/null +++ b/distribution/conf/application.yml.example @@ -0,0 +1,136 @@ + +## kafka-manager的配置文件,该文件中的配置会覆盖默认配置 +## 下面的配置信息基本就是jar中的 application.yml默认配置了; +## 可以只修改自己变更的配置,其他的删除就行了; 比如只配置一下mysql + + +server: + port: 8080 + tomcat: + accept-count: 1000 + max-connections: 10000 + max-threads: 800 + min-spare-threads: 100 + +spring: + application: + name: kafkamanager + version: @project.version@ + profiles: + active: dev + datasource: + kafka-manager: + jdbc-url: jdbc:mysql://localhost:3306/logi_kafka_manager?characterEncoding=UTF-8&useSSL=false&serverTimezone=GMT%2B8 + username: root + password: 123456 + driver-class-name: com.mysql.cj.jdbc.Driver + main: + allow-bean-definition-overriding: true + + servlet: + multipart: + max-file-size: 100MB + max-request-size: 100MB + +logging: + config: classpath:logback-spring.xml + +custom: + idc: cn + store-metrics-task: + community: + topic-metrics-enabled: true + didi: # 滴滴Kafka特有的指标 + app-topic-metrics-enabled: false + topic-request-time-metrics-enabled: false + topic-throttled-metrics-enabled: false + +# 任务相关的配置 +task: + op: + sync-topic-enabled: false # 未落盘的Topic定期同步到DB中 + order-auto-exec: # 工单自动化审批线程的开关 + topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启 + app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启 + metrics: + collect: # 收集指标 + broker-metrics-enabled: true # 收集Broker指标 + sink: # 上报指标 + cluster-metrics: # 上报cluster指标 + sink-db-enabled: true # 上报到db + broker-metrics: # 上报broker指标 + sink-db-enabled: true # 上报到db + delete: # 删除指标 + delete-limit-size: 1000 # 单次删除的批大小 + cluster-metrics-save-days: 14 # 集群指标保存天数 + broker-metrics-save-days: 14 # Broker指标保存天数 + topic-metrics-save-days: 7 # Topic指标保存天数 + topic-request-time-metrics-save-days: 7 # Topic请求耗时指标保存天数 + topic-throttled-metrics-save-days: 7 # Topic限流指标保存天数 + app-topic-metrics-save-days: 7 # App+Topic指标保存天数 + +thread-pool: + collect-metrics: + thread-num: 256 # 收集指标线程池大小 + queue-size: 5000 # 收集指标线程池的queue大小 + api-call: + thread-num: 16 # api服务线程池大小 + queue-size: 5000 # api服务线程池的queue大小 + +client-pool: + kafka-consumer: + min-idle-client-num: 24 # 最小空闲客户端数 + max-idle-client-num: 24 # 最大空闲客户端数 + max-total-client-num: 24 # 最大客户端数 + borrow-timeout-unit-ms: 3000 # 租借超时时间,单位毫秒 + +account: + jump-login: + gateway-api: false # 网关接口 + third-part-api: false # 第三方接口 + ldap: + enabled: false + url: ldap://127.0.0.1:389/ + basedn: dc=tsign,dc=cn + factory: com.sun.jndi.ldap.LdapCtxFactory + filter: sAMAccountName + security: + authentication: simple + principal: cn=admin,dc=tsign,dc=cn + credentials: admin + auth-user-registration: true + auth-user-registration-role: normal + +kcm: # 集群安装部署,仅安装broker + enabled: false # 是否开启 + s3: # s3 存储服务 + endpoint: s3.didiyunapi.com + access-key: 1234567890 + secret-key: 0987654321 + bucket: logi-kafka + n9e: # 夜莺 + base-url: http://127.0.0.1:8004 # 夜莺job服务地址 + user-token: 12345678 # 用户的token + timeout: 300 # 当台操作的超时时间 + account: root # 操作时使用的账号 + script-file: kcm_script.sh # 脚本,已内置好,在源码的kcm模块内,此处配置无需修改 + logikm-url: http://127.0.0.1:8080 # logikm部署地址,部署时kcm_script.sh会调用logikm检查部署中的一些状态 + +monitor: + enabled: false + n9e: + nid: 2 + user-token: 1234567890 + mon: + base-url: http://127.0.0.1:8000 # 夜莺v4版本,默认端口统一调整为了8000 + sink: + base-url: http://127.0.0.1:8000 # 夜莺v4版本,默认端口统一调整为了8000 + rdb: + base-url: http://127.0.0.1:8000 # 夜莺v4版本,默认端口统一调整为了8000 + +notify: + kafka: + cluster-id: 95 + topic-name: didi-kafka-notify + order: + detail-url: http://127.0.0.1 diff --git a/docs/install_guide/create_mysql_table.sql b/distribution/conf/create_mysql_table.sql similarity index 100% rename from docs/install_guide/create_mysql_table.sql rename to distribution/conf/create_mysql_table.sql diff --git a/distribution/conf/logback-spring.xml b/distribution/conf/logback-spring.xml new file mode 100644 index 00000000..c1c16136 --- /dev/null +++ b/distribution/conf/logback-spring.xml @@ -0,0 +1,215 @@ + + + logback + + + + + + + + + + + + + + info + + + ${CONSOLE_LOG_PATTERN} + UTF-8 + + + + + + + + + ${log.path}/log_debug.log + + + %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n + UTF-8 + + + + + ${log.path}/log_debug_%d{yyyy-MM-dd}.%i.log + + 100MB + + + 7 + + + + debug + ACCEPT + DENY + + + + + + + ${log.path}/log_info.log + + + %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n + UTF-8 + + + + + ${log.path}/log_info_%d{yyyy-MM-dd}.%i.log + + 100MB + + + 7 + + + + info + ACCEPT + DENY + + + + + + + ${log.path}/log_warn.log + + + %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n + UTF-8 + + + + ${log.path}/log_warn_%d{yyyy-MM-dd}.%i.log + + 100MB + + + 7 + + + + warn + ACCEPT + DENY + + + + + + + + ${log.path}/log_error.log + + + %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n + UTF-8 + + + + ${log.path}/log_error_%d{yyyy-MM-dd}.%i.log + + 100MB + + + 7 + + + + ERROR + ACCEPT + DENY + + + + + + ${log.path}/metrics/collector_metrics.log + + %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n + UTF-8 + + + ${log.path}/metrics/collector_metrics_%d{yyyy-MM-dd}.%i.log + + 100MB + + 3 + + + + + + ${log.path}/metrics/api_metrics.log + + %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n + UTF-8 + + + ${log.path}/metrics/api_metrics_%d{yyyy-MM-dd}.%i.log + + 100MB + + 3 + + + + + + ${log.path}/metrics/scheduled_tasks.log + + %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n + UTF-8 + + + ${log.path}/metrics/scheduled_tasks_%d{yyyy-MM-dd}.%i.log + + 100MB + + 5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/distribution/pom.xml b/distribution/pom.xml new file mode 100644 index 00000000..6b61525c --- /dev/null +++ b/distribution/pom.xml @@ -0,0 +1,64 @@ + + + + + + kafka-manager + com.xiaojukeji.kafka + ${kafka-manager.revision} + + + 4.0.0 + + distribution + distribution + pom + + + + ${project.groupId} + kafka-manager-web + ${kafka-manager.revision} + + + + + + + release-kafka-manager + + + ${project.groupId} + kafka-manager-web + ${kafka-manager.revision} + + + + + + org.apache.maven.plugins + maven-assembly-plugin + + + release-km.xml + + posix + + + + make-assembly + install + + single + + + + + + kafka-manager + + + + diff --git a/distribution/readme.md b/distribution/readme.md new file mode 100644 index 00000000..9d40efa4 --- /dev/null +++ b/distribution/readme.md @@ -0,0 +1,22 @@ +## 说明 + +### 1.创建mysql数据库文件 +> conf/create_mysql_table.sql + +### 2. 修改配置文件 +> conf/application.yml.example +> 请将application.yml.example 复制一份改名为application.yml; +> 并放在同级目录下(conf/); 并修改成自己的配置 +> 这里的优先级比jar包内配置文件的默认值高; +> + +### 3.启动/关闭kafka-manager +> sh bin/startup.sh 启动 +> +> sh shutdown.sh 关闭 +> + + +### 4.升级jar包 +> 如果是升级, 可以看看文件 `upgrade_config.md` 的配置变更历史; +> \ No newline at end of file diff --git a/distribution/release-km.xml b/distribution/release-km.xml new file mode 100755 index 00000000..d1b0db1c --- /dev/null +++ b/distribution/release-km.xml @@ -0,0 +1,51 @@ + + + + ${project.version} + true + + dir + tar.gz + zip + + + + + conf/** + + + + + + bin/* + + 0755 + + + + + + + readme.md + readme.md + + + upgrade_config.md + upgrade_config.md + + + + ../kafka-manager-web/target/kafka-manager.jar + target/ + + + + + + true + + com.xiaojukeji.kafka:kafka-manager-web + + + + diff --git a/distribution/upgrade_config.md b/distribution/upgrade_config.md new file mode 100644 index 00000000..5f976042 --- /dev/null +++ b/distribution/upgrade_config.md @@ -0,0 +1,42 @@ + +## 版本升级配置变更 +> 本文件 从 V2.2.0 开始记录; 如果配置有变更则会填写到下文中; 如果没有,则表示无变更; +> 当您从一个很低的版本升级时候,应该依次执行中间有过变更的sql脚本 + + + +**一站式`Apache Kafka`集群指标监控与运维管控平台** + +--- + +### 1.升级至`V2.2.0`版本 + +#### 1.mysql变更 + +`2.2.0`版本在`cluster`表及`logical_cluster`各增加了一个字段,因此需要执行下面的sql进行字段的增加。 + +```sql +# 往cluster表中增加jmx_properties字段, 这个字段会用于存储jmx相关的认证以及配置信息 +ALTER TABLE `cluster` ADD COLUMN `jmx_properties` TEXT NULL COMMENT 'JMX配置' AFTER `security_properties`; + +# 往logical_cluster中增加identification字段, 同时数据和原先name数据相同, 最后增加一个唯一键. +# 此后, name字段还是表示集群名称, 而identification字段表示的是集群标识, 只能是字母数字及下划线组成, +# 数据上报到监控系统时, 集群这个标识采用的字段就是identification字段, 之前使用的是name字段. +ALTER TABLE `logical_cluster` ADD COLUMN `identification` VARCHAR(192) NOT NULL DEFAULT '' COMMENT '逻辑集群标识' AFTER `name`; + +UPDATE `logical_cluster` SET `identification`=`name` WHERE id>=0; + +ALTER TABLE `logical_cluster` ADD INDEX `uniq_identification` (`identification` ASC); +``` + +### 升级至`2.3.0`版本 + +#### 1.mysql变更 +`2.3.0`版本在`gateway_config`表增加了一个描述说明的字段,因此需要执行下面的sql进行字段的增加。 + +```sql +ALTER TABLE `gateway_config` +ADD COLUMN `description` TEXT NULL COMMENT '描述信息' AFTER `version`; +``` + + diff --git a/docs/dev_guide/assets/increase_the_indicators_reported_to_monitor_system/collect_topic_metrics.jpg b/docs/dev_guide/assets/increase_the_indicators_reported_to_monitor_system/collect_topic_metrics.jpg new file mode 100644 index 00000000..c2613d0a Binary files /dev/null and b/docs/dev_guide/assets/increase_the_indicators_reported_to_monitor_system/collect_topic_metrics.jpg differ diff --git a/docs/dev_guide/assets/increase_the_indicators_reported_to_monitor_system/sink_metrcis.png b/docs/dev_guide/assets/increase_the_indicators_reported_to_monitor_system/sink_metrcis.png new file mode 100644 index 00000000..ba27bc1c Binary files /dev/null and b/docs/dev_guide/assets/increase_the_indicators_reported_to_monitor_system/sink_metrcis.png differ diff --git a/docs/dev_guide/dynamic_config_manager.md b/docs/dev_guide/dynamic_config_manager.md index 9e05839c..c3365138 100644 --- a/docs/dev_guide/dynamic_config_manager.md +++ b/docs/dev_guide/dynamic_config_manager.md @@ -136,7 +136,8 @@ EXPIRED_TOPIC_CONFIG 配置Value: ```json { - "minExpiredDay": 30, #过期时间大于此值才显示 + "minExpiredDay": 30, #过期时间大于此值才显示, + "filterRegex": ".*XXX\\s+", #忽略符合此正则规则的Topic "ignoreClusterIdList": [ # 忽略的集群 50 ] diff --git a/docs/dev_guide/如何增加上报监控系统指标.md b/docs/dev_guide/如何增加上报监控系统指标.md new file mode 100644 index 00000000..f1ed9260 --- /dev/null +++ b/docs/dev_guide/如何增加上报监控系统指标.md @@ -0,0 +1,53 @@ + +--- + +![kafka-manager-logo](../assets/images/common/logo_name.png) + +**一站式`Apache Kafka`集群指标监控与运维管控平台** + +--- + +# 如何增加上报监控系统指标? + +## 0、前言 + +LogiKM是 **一站式`Apache Kafka`集群指标监控与运维管控平台** ,当前会将消费Lag,Topic流量等指标上报到监控系统中,从而方便用户在监控系统中对这些指标配置监控告警规则,进而达到监控自身客户端是否正常的目的。 + +那么,如果我们想增加一个新的监控指标,应该如何做呢,比如我们想监控Broker的流量,监控Broker的存活信息,监控集群Controller个数等等。 + +在具体介绍之前,我们大家都知道,Kafka监控相关的信息,基本都存储于Broker、Jmx以及ZK中。当前LogiKM也已经具备从这三个地方获取数据的基本能力,因此基于LogiKM我们再获取其他指标,总体上还是非常方便的。 + +这里我们就以已经获取到的Topic流量信息为例,看LogiKM如何实现Topic指标的获取并上报的。 + +--- + +## 1、确定指标位置 + +基于对Kafka的了解,我们知道Topic流量信息这个指标是存储于Jmx中的,因此我们需要从Jmx中获取。大家如果对于自己所需要获取的指标存储在何处不太清楚的,可以加入我们维护的Kafka中文社区(README中有二维码)中今天沟通交流。 + +--- + +## 2、指标获取 + +Topic流量指标的获取详细见图中说明。 + +![Topic流量指标采集说明](./assets/increase_the_indicators_reported_to_monitor_system/collect_topic_metrics.jpg) + +--- + +## 3、指标上报 + +上一步我们已经采集到Topic流量指标了,下一步就是将该指标上报到监控系统,这块只需要按照监控系统要求的格式,将数据上报即可。 + +LogiKM中有一个monitor模块,具体的如下图所示: + +![指标上报](./assets/increase_the_indicators_reported_to_monitor_system/sink_metrcis.png) + + +## 4、补充说明 + +监控系统对接的相关内容见: + +[监控系统集成](./monitor_system_integrate_with_self.md) + +[监控系统集成例子——集成夜莺](./monitor_system_integrate_with_n9e.md) diff --git a/docs/install_guide/config_description.md b/docs/install_guide/config_description.md index 8f8fc0be..04335e29 100644 --- a/docs/install_guide/config_description.md +++ b/docs/install_guide/config_description.md @@ -51,13 +51,16 @@ custom: didi: app-topic-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭 topic-request-time-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭 - topic-throttled-metrics: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭 + topic-throttled-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭 save-days: 7 #指标在DB中保持的天数,-1表示永久保存,7表示保存近7天的数据 # 任务相关的开关 task: op: sync-topic-enabled: false # 未落盘的Topic定期同步到DB中 + order-auto-exec: # 工单自动化审批线程的开关 + topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启 + app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启 account: # ldap相关的配置, 社区版本暂时支持不够完善,可以先忽略,欢迎贡献代码对这块做优化 ldap: diff --git a/docs/install_guide/install_guide_cn.md b/docs/install_guide/install_guide_cn.md index 9a4a415b..0130bd55 100644 --- a/docs/install_guide/install_guide_cn.md +++ b/docs/install_guide/install_guide_cn.md @@ -31,17 +31,23 @@ **2、源代码进行打包** -下载好代码之后,进入`Logi-KafkaManager`的主目录,执行`sh build.sh`命令即可,执行完成之后会在`output/kafka-manager-xxx`目录下面生成一个jar包。 +下载好代码之后,进入`Logi-KafkaManager`的主目录,执行`mvn -Prelease-kafka-manager -Dmaven.test.skip=true clean install -U `命令即可, +执行完成之后会在`distribution/target`目录下面生成一个`kafka-manager-*.tar.gz`。 +和一个`kafka-manager-*.zip` 文件,随便任意一个压缩包都可以; +当然此时同级目录有一个已经解压好的文件夹; -对于`windows`环境的用户,估计执行不了`sh build.sh`命令,因此可以直接执行`mvn install`,然后在`kafka-manager-web/target`目录下生成一个kafka-manager-web-xxx.jar的包。 -获取到jar包之后,我们继续下面的步骤。 --- -## 3、MySQL-DB初始化 +## 3. 解压安装包 +解压完成后; 在文件目录中可以看到有`kafka-manager/conf/create_mysql_table.sql` 有个mysql初始化文件 +先初始化DB -执行[create_mysql_table.sql](create_mysql_table.sql)中的SQL命令,从而创建所需的MySQL库及表,默认创建的库名是`logi_kafka_manager`。 + +## 4、MySQL-DB初始化 + +执行[create_mysql_table.sql](../../distribution/conf/create_mysql_table.sql)中的SQL命令,从而创建所需的MySQL库及表,默认创建的库名是`logi_kafka_manager`。 ``` # 示例: @@ -50,15 +56,38 @@ mysql -uXXXX -pXXX -h XXX.XXX.XXX.XXX -PXXXX < ./create_mysql_table.sql --- -## 4、启动 +## 5.修该配置 +请将`conf/application.yml.example` 文件复制一份出来命名为`application.yml` 放在同级目录:conf/application.yml ; +并且修改配置; 当然不修改的话 就会用默认的配置; +至少 mysql配置成自己的吧 -``` -# application.yml 是配置文件,最简单的是仅修改MySQL相关的配置即可启动 -nohup java -jar kafka-manager.jar --spring.config.location=./application.yml > /dev/null 2>&1 & -``` +## 6、启动/关闭 +解压包中有启动和关闭脚本 +`kafka-manager/bin/shutdown.sh` +`kafka-manager/bin/startup.sh` -### 5、使用 +执行 sh startup.sh 启动 +执行 sh shutdown.sh 关闭 + + + +### 6、使用 本地启动的话,访问`http://localhost:8080`,输入帐号及密码(默认`admin/admin`)进行登录。更多参考:[kafka-manager 用户使用手册](../user_guide/user_guide_cn.md) +### 7. 升级 + +如果是升级版本,请查看文件 [kafka-manager 升级手册](../../distribution/upgrade_config.md) + 在您下载的启动包(V2.5及其后)中也有记录,在 kafka-manager/upgrade_config.md 中 + + +### 8. 在IDE中启动 +> 如果想参与开发或者想在IDE中启动的话 +> 先执行 `mvn -Dmaven.test.skip=true clean install -U ` +> +> 然后这个时候可以选择去 [pom.xml](../../pom.xml) 中将`kafka-manager-console`模块注释掉; +> 注释是因为每次install的时候都会把前端文件`kafka-manager-console`重新打包进`kafka-manager-web` +> +> 完事之后,只需要直接用IDE启动运行`kafka-manager-web`模块中的 +> com.xiaojukeji.kafka.manager.web.MainApplication main方法就行了 \ No newline at end of file diff --git a/kafka-manager-common/pom.xml b/kafka-manager-common/pom.xml index 6a8ff0cb..f784bf8d 100644 --- a/kafka-manager-common/pom.xml +++ b/kafka-manager-common/pom.xml @@ -21,15 +21,12 @@ 1.8 UTF-8 UTF-8 - - 5.1.3.RELEASE org.springframework spring-web - ${spring-version} @@ -109,5 +106,11 @@ junit junit + + + org.projectlombok + lombok + compile + \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/ApiPrefix.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/ApiPrefix.java index b0f84405..5422076c 100644 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/ApiPrefix.java +++ b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/ApiPrefix.java @@ -20,12 +20,6 @@ public class ApiPrefix { // open public static final String API_V1_THIRD_PART_PREFIX = API_V1_PREFIX + "third-part/"; - // 开放给OP的接口, 后续对 应的接口的集群都需要是物理集群 - public static final String API_V1_THIRD_PART_OP_PREFIX = API_V1_THIRD_PART_PREFIX + "op/"; - - // 开放给Normal的接口, 后续对应的接口的集群,都需要是逻辑集群 - public static final String API_V1_THIRD_PART_NORMAL_PREFIX = API_V1_THIRD_PART_PREFIX + "normal/"; - // gateway public static final String GATEWAY_API_V1_PREFIX = "/gateway" + API_V1_PREFIX; diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/Constant.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/Constant.java index 7ecc295b..02331255 100644 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/Constant.java +++ b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/Constant.java @@ -9,7 +9,7 @@ public class Constant { public static final Integer MAX_AVG_BYTES_DURATION = 10; - public static final Integer BATCH_INSERT_SIZE = 50; + public static final Integer BATCH_INSERT_SIZE = 30; public static final Integer DEFAULT_SESSION_TIMEOUT_UNIT_MS = 30000; diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/KafkaConstant.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/KafkaConstant.java index 4d69f914..463e9b1a 100644 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/KafkaConstant.java +++ b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/KafkaConstant.java @@ -17,6 +17,10 @@ public class KafkaConstant { public static final String RETENTION_MS_KEY = "retention.ms"; + public static final String EXTERNAL_KEY = "EXTERNAL"; + + public static final String INTERNAL_KEY = "INTERNAL"; + private KafkaConstant() { } } \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/common/IpPortData.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/common/IpPortData.java new file mode 100644 index 00000000..a16b32b4 --- /dev/null +++ b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/common/IpPortData.java @@ -0,0 +1,18 @@ +package com.xiaojukeji.kafka.manager.common.entity.ao.common; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +import java.io.Serializable; + +@Data +@NoArgsConstructor +@AllArgsConstructor +public class IpPortData implements Serializable { + private static final long serialVersionUID = -428897032994630685L; + + private String ip; + + private String port; +} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/expert/TopicExpiredConfig.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/expert/TopicExpiredConfig.java index d68dc2ba..be49cb18 100644 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/expert/TopicExpiredConfig.java +++ b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/expert/TopicExpiredConfig.java @@ -10,6 +10,8 @@ import java.util.List; public class TopicExpiredConfig { private Integer minExpiredDay = 30; + private String filterRegex = ""; + private List ignoreClusterIdList = new ArrayList<>(); public Integer getMinExpiredDay() { @@ -28,10 +30,19 @@ public class TopicExpiredConfig { this.ignoreClusterIdList = ignoreClusterIdList; } + public String getFilterRegex() { + return filterRegex; + } + + public void setFilterRegex(String filterRegex) { + this.filterRegex = filterRegex; + } + @Override public String toString() { return "TopicExpiredConfig{" + "minExpiredDay=" + minExpiredDay + + ", filterRegex='" + filterRegex + '\'' + ", ignoreClusterIdList=" + ignoreClusterIdList + '}'; } diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/MineTopicSummary.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/MineTopicSummary.java index 45fffebd..7f02e51b 100644 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/MineTopicSummary.java +++ b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/MineTopicSummary.java @@ -25,6 +25,8 @@ public class MineTopicSummary { private Integer access; + private String description; + public Long getLogicalClusterId() { return logicalClusterId; } @@ -105,6 +107,14 @@ public class MineTopicSummary { this.access = access; } + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + @Override public String toString() { return "MineTopicSummary{" + diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/order/OrderVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/order/OrderVO.java index 390d3ef4..e8b05779 100644 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/order/OrderVO.java +++ b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/order/OrderVO.java @@ -27,8 +27,11 @@ public class OrderVO { @ApiModelProperty(value = "工单状态, 0:待审批, 1:通过, 2:拒绝, 3:取消") private Integer status; - @ApiModelProperty(value = "申请/审核时间") - private Date gmtTime; + @ApiModelProperty(value = "申请时间") + private Date gmtCreate; + + @ApiModelProperty(value = "审核时间") + private Date gmtHandle; public Long getId() { return id; @@ -70,12 +73,20 @@ public class OrderVO { this.status = status; } - public Date getGmtTime() { - return gmtTime; + public Date getGmtCreate() { + return gmtCreate; } - public void setGmtTime(Date gmtTime) { - this.gmtTime = gmtTime; + public void setGmtCreate(Date gmtCreate) { + this.gmtCreate = gmtCreate; + } + + public Date getGmtHandle() { + return gmtHandle; + } + + public void setGmtHandle(Date gmtHandle) { + this.gmtHandle = gmtHandle; } public String getApplicant() { @@ -95,7 +106,7 @@ public class OrderVO { ", applicant='" + applicant + '\'' + ", description='" + description + '\'' + ", status=" + status + - ", gmtTime=" + gmtTime + + ", gmtTime=" + gmtCreate + '}'; } } diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicMineVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicMineVO.java index 2cda46cb..2e4665a1 100644 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicMineVO.java +++ b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicMineVO.java @@ -36,6 +36,9 @@ public class TopicMineVO { @ApiModelProperty(value = "状态, 0:无权限, 1:可消费 2:可发送 3:可消费发送 4:可管理") private Integer access; + @ApiModelProperty(value = "备注") + private String description; + public Long getClusterId() { return clusterId; } @@ -108,6 +111,14 @@ public class TopicMineVO { this.access = access; } + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + @Override public String toString() { return "TopicMineVO{" + diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/RegionCreatedEvent.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/RegionCreatedEvent.java new file mode 100644 index 00000000..b8d72de9 --- /dev/null +++ b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/RegionCreatedEvent.java @@ -0,0 +1,20 @@ +package com.xiaojukeji.kafka.manager.common.events; + +import com.xiaojukeji.kafka.manager.common.entity.pojo.RegionDO; +import lombok.Getter; +import org.springframework.context.ApplicationEvent; + +/** + * Region创建事件 + * @author zengqiao + * @date 22/01/1 + */ +@Getter +public class RegionCreatedEvent extends ApplicationEvent { + private final RegionDO regionDO; + + public RegionCreatedEvent(Object source, RegionDO regionDO) { + super(source); + this.regionDO = regionDO; + } +} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/metrics/BaseMetricsCollectedEvent.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/metrics/BaseMetricsCollectedEvent.java new file mode 100644 index 00000000..730e14c9 --- /dev/null +++ b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/metrics/BaseMetricsCollectedEvent.java @@ -0,0 +1,33 @@ +package com.xiaojukeji.kafka.manager.common.events.metrics; + +import org.springframework.context.ApplicationEvent; + +/** + * @author zengqiao + * @date 22/01/17 + */ +public class BaseMetricsCollectedEvent extends ApplicationEvent { + /** + * 物理集群ID + */ + protected final Long physicalClusterId; + + /** + * 收集时间,依据业务需要来设置,可以设置任务开始时间,也可以设置任务结束时间 + */ + protected final Long collectTime; + + public BaseMetricsCollectedEvent(Object source, Long physicalClusterId, Long collectTime) { + super(source); + this.physicalClusterId = physicalClusterId; + this.collectTime = collectTime; + } + + public Long getPhysicalClusterId() { + return physicalClusterId; + } + + public Long getCollectTime() { + return collectTime; + } +} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/metrics/BatchBrokerMetricsCollectedEvent.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/metrics/BatchBrokerMetricsCollectedEvent.java new file mode 100644 index 00000000..629a44ea --- /dev/null +++ b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/metrics/BatchBrokerMetricsCollectedEvent.java @@ -0,0 +1,22 @@ +package com.xiaojukeji.kafka.manager.common.events.metrics; + +import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics; + +import java.util.List; + +/** + * @author zengqiao + * @date 20/8/31 + */ +public class BatchBrokerMetricsCollectedEvent extends BaseMetricsCollectedEvent { + private final List metricsList; + + public BatchBrokerMetricsCollectedEvent(Object source, Long physicalClusterId, Long collectTime, List metricsList) { + super(source, physicalClusterId, collectTime); + this.metricsList = metricsList; + } + + public List getMetricsList() { + return metricsList; + } +} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/BackoffUtils.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/BackoffUtils.java new file mode 100644 index 00000000..afbf8fc4 --- /dev/null +++ b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/BackoffUtils.java @@ -0,0 +1,75 @@ +package com.xiaojukeji.kafka.manager.common.utils; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +public class BackoffUtils { + private BackoffUtils() { + } + + /** + * 需要进行回退的事件信息 + * <回退事件名,回退结束时间> + */ + private static final Map NEED_BACK_OFF_EVENT_MAP = new ConcurrentHashMap<>(); + + public static void backoff(long timeUnitMs) { + if (timeUnitMs <= 0) { + return; + } + + try { + Thread.sleep(timeUnitMs); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (Exception e) { + // ignore + } + } + + /** + * 记录回退设置 + * @param backoffEventKey 回退事件key + * @param backoffTimeUnitMs 回退时间(ms) + */ + public static void putNeedBackoffEvent(String backoffEventKey, Long backoffTimeUnitMs) { + if (backoffEventKey == null || backoffTimeUnitMs == null || backoffTimeUnitMs <= 0) { + return; + } + + NEED_BACK_OFF_EVENT_MAP.put(backoffEventKey, backoffTimeUnitMs + System.currentTimeMillis()); + } + + /** + * 移除回退设置 + * @param backoffEventKey 回退事件key + */ + public static void removeNeedBackoffEvent(String backoffEventKey) { + NEED_BACK_OFF_EVENT_MAP.remove(backoffEventKey); + } + + /** + * 检查是否需要回退 + * @param backoffEventKey 回退事件key + * @return + */ + public static boolean isNeedBackoff(String backoffEventKey) { + Long backoffEventEndTimeUnitMs = NEED_BACK_OFF_EVENT_MAP.get(backoffEventKey); + if (backoffEventEndTimeUnitMs == null) { + return false; + } + + if (backoffEventEndTimeUnitMs > System.currentTimeMillis()) { + return true; + } + + // 移除 + try { + NEED_BACK_OFF_EVENT_MAP.remove(backoffEventKey, backoffEventEndTimeUnitMs); + } catch (Exception e) { + // 如果key不存在,这里可能出现NPE,不过不管什么异常都可以忽略 + } + + return false; + } +} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/factory/KafkaConsumerFactory.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/factory/KafkaConsumerFactory.java index 68109779..5964d162 100644 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/factory/KafkaConsumerFactory.java +++ b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/factory/KafkaConsumerFactory.java @@ -1,7 +1,7 @@ package com.xiaojukeji.kafka.manager.common.utils.factory; -import com.alibaba.fastjson.JSONObject; import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO; +import com.xiaojukeji.kafka.manager.common.utils.JsonUtils; import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; import org.apache.commons.pool2.BasePooledObjectFactory; import org.apache.commons.pool2.PooledObject; @@ -16,7 +16,7 @@ import java.util.Properties; * @author zengqiao * @date 20/8/24 */ -public class KafkaConsumerFactory extends BasePooledObjectFactory { +public class KafkaConsumerFactory extends BasePooledObjectFactory> { private ClusterDO clusterDO; public KafkaConsumerFactory(ClusterDO clusterDO) { @@ -25,17 +25,17 @@ public class KafkaConsumerFactory extends BasePooledObjectFactory @Override public KafkaConsumer create() { - return new KafkaConsumer(createKafkaConsumerProperties(clusterDO)); + return new KafkaConsumer(createKafkaConsumerProperties(clusterDO)); } @Override - public PooledObject wrap(KafkaConsumer obj) { - return new DefaultPooledObject(obj); + public PooledObject> wrap(KafkaConsumer obj) { + return new DefaultPooledObject<>(obj); } @Override - public void destroyObject(final PooledObject p) throws Exception { - KafkaConsumer kafkaConsumer = p.getObject(); + public void destroyObject(final PooledObject> p) throws Exception { + KafkaConsumer kafkaConsumer = p.getObject(); if (ValidateUtils.isNull(kafkaConsumer)) { return; } @@ -57,7 +57,7 @@ public class KafkaConsumerFactory extends BasePooledObjectFactory if (ValidateUtils.isBlank(clusterDO.getSecurityProperties())) { return properties; } - properties.putAll(JSONObject.parseObject(clusterDO.getSecurityProperties(), Properties.class)); + properties.putAll(JsonUtils.stringToObj(clusterDO.getSecurityProperties(), Properties.class)); return properties; } } \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxConfig.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxConfig.java index bbc913c4..f5c380c2 100644 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxConfig.java +++ b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxConfig.java @@ -1,5 +1,10 @@ package com.xiaojukeji.kafka.manager.common.utils.jmx; +import lombok.Data; +import lombok.ToString; + +@Data +@ToString public class JmxConfig { /** * 单台最大连接数 @@ -21,45 +26,8 @@ public class JmxConfig { */ private Boolean openSSL; - public Integer getMaxConn() { - return maxConn; - } - - public void setMaxConn(Integer maxConn) { - this.maxConn = maxConn; - } - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = username; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public Boolean isOpenSSL() { - return openSSL; - } - - public void setOpenSSL(Boolean openSSL) { - this.openSSL = openSSL; - } - - @Override - public String toString() { - return "JmxConfig{" + - "maxConn=" + maxConn + - ", username='" + username + '\'' + - ", password='" + password + '\'' + - ", openSSL=" + openSSL + - '}'; - } + /** + * 连接重试回退事件 + */ + private Long retryConnectBackoffTimeUnitMs; } diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxConnectorWrap.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxConnectorWrap.java index 5625b37f..c66c7bc6 100644 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxConnectorWrap.java +++ b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxConnectorWrap.java @@ -1,5 +1,6 @@ package com.xiaojukeji.kafka.manager.common.utils.jmx; +import com.xiaojukeji.kafka.manager.common.utils.BackoffUtils; import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -13,11 +14,11 @@ import javax.naming.Context; import javax.rmi.ssl.SslRMIClientSocketFactory; import java.io.IOException; import java.net.MalformedURLException; -import java.util.Arrays; import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.ReentrantLock; /** * JMXConnector包装类 @@ -25,19 +26,27 @@ import java.util.concurrent.atomic.AtomicInteger; * @date 2015/11/9. */ public class JmxConnectorWrap { - private final static Logger LOGGER = LoggerFactory.getLogger(JmxConnectorWrap.class); + private static final Logger LOGGER = LoggerFactory.getLogger(JmxConnectorWrap.class); - private String host; + private final Long physicalClusterId; - private int port; + private final Integer brokerId; + + private final String host; + + private final int port; private JMXConnector jmxConnector; - private AtomicInteger atomicInteger; + private final AtomicInteger atomicInteger; private JmxConfig jmxConfig; - public JmxConnectorWrap(String host, int port, JmxConfig jmxConfig) { + private final ReentrantLock modifyJMXConnectorLock = new ReentrantLock(); + + public JmxConnectorWrap(Long physicalClusterId, Integer brokerId, String host, int port, JmxConfig jmxConfig) { + this.physicalClusterId = physicalClusterId; + this.brokerId = brokerId; this.host = host; this.port = port; this.jmxConfig = jmxConfig; @@ -45,7 +54,12 @@ public class JmxConnectorWrap { this.jmxConfig = new JmxConfig(); } if (ValidateUtils.isNullOrLessThanZero(this.jmxConfig.getMaxConn())) { - this.jmxConfig.setMaxConn(1); + // 默认设置20 + this.jmxConfig.setMaxConn(20); + } + if (ValidateUtils.isNullOrLessThanZero(this.jmxConfig.getRetryConnectBackoffTimeUnitMs())) { + // 默认回退10分钟 + this.jmxConfig.setRetryConnectBackoffTimeUnitMs(10 * 60 * 1000L); } this.atomicInteger = new AtomicInteger(this.jmxConfig.getMaxConn()); } @@ -57,17 +71,40 @@ public class JmxConnectorWrap { if (port == -1) { return false; } - return createJmxConnector(); + return safeCreateJmxConnector(); } - public synchronized void close() { + public void close() { + this.closeJmxConnect(); + } + + public void closeJmxConnect() { if (jmxConnector == null) { return; } + try { + modifyJMXConnectorLock.lock(); + + // 移除设置的backoff事件 + BackoffUtils.removeNeedBackoffEvent(buildConnectJmxFailedBackoffEventKey(physicalClusterId, brokerId)); + jmxConnector.close(); - } catch (IOException e) { - LOGGER.warn("close JmxConnector exception, host:{} port:{}.", host, port, e); + } catch (Exception e) { + LOGGER.error("close JmxConnector exception, physicalClusterId:{} brokerId:{} host:{} port:{}.", physicalClusterId, brokerId, host, port, e); + } finally { + jmxConnector = null; + + modifyJMXConnectorLock.unlock(); + } + } + + private boolean safeCreateJmxConnector() { + try { + modifyJMXConnectorLock.lock(); + return createJmxConnector(); + } finally { + modifyJMXConnectorLock.unlock(); } } @@ -75,6 +112,12 @@ public class JmxConnectorWrap { if (jmxConnector != null) { return true; } + + if (BackoffUtils.isNeedBackoff(buildConnectJmxFailedBackoffEventKey(physicalClusterId, brokerId))) { + // 被设置了需要进行回退,则本次不进行创建 + return false; + } + String jmxUrl = String.format("service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi", host, port); try { Map environment = new HashMap(); @@ -82,7 +125,9 @@ public class JmxConnectorWrap { // fixed by riyuetianmu environment.put(JMXConnector.CREDENTIALS, new String[]{this.jmxConfig.getUsername(), this.jmxConfig.getPassword()}); } - if (jmxConfig.isOpenSSL() != null && this.jmxConfig.isOpenSSL()) { + + if (jmxConfig.getOpenSSL() != null && this.jmxConfig.getOpenSSL()) { + // 开启ssl environment.put(Context.SECURITY_PROTOCOL, "ssl"); SslRMIClientSocketFactory clientSocketFactory = new SslRMIClientSocketFactory(); environment.put(RMIConnectorServer.RMI_CLIENT_SOCKET_FACTORY_ATTRIBUTE, clientSocketFactory); @@ -90,13 +135,17 @@ public class JmxConnectorWrap { } jmxConnector = JMXConnectorFactory.connect(new JMXServiceURL(jmxUrl), environment); - LOGGER.info("JMX connect success, host:{} port:{}.", host, port); + LOGGER.info("connect JMX success, physicalClusterId:{} brokerId:{} host:{} port:{}.", physicalClusterId, brokerId, host, port); return true; } catch (MalformedURLException e) { - LOGGER.error("JMX url exception, host:{} port:{} jmxUrl:{}", host, port, jmxUrl, e); + LOGGER.error("connect JMX failed, JMX url exception, physicalClusterId:{} brokerId:{} host:{} port:{} jmxUrl:{}.", physicalClusterId, brokerId, host, port, jmxUrl, e); } catch (Exception e) { - LOGGER.error("JMX connect exception, host:{} port:{}.", host, port, e); + LOGGER.error("connect JMX failed, physicalClusterId:{} brokerId:{} host:{} port:{}.", physicalClusterId, brokerId, host, port, e); } + + // 设置连接backoff + BackoffUtils.putNeedBackoffEvent(buildConnectJmxFailedBackoffEventKey(physicalClusterId, brokerId), this.jmxConfig.getRetryConnectBackoffTimeUnitMs()); + return false; } @@ -110,6 +159,11 @@ public class JmxConnectorWrap { acquire(); MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection(); return mBeanServerConnection.getAttribute(name, attribute); + } catch (IOException ioe) { + // io错误,则重置连接 + this.closeJmxConnect(); + + throw ioe; } finally { atomicInteger.incrementAndGet(); } @@ -125,6 +179,11 @@ public class JmxConnectorWrap { acquire(); MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection(); return mBeanServerConnection.getAttributes(name, attributes); + } catch (IOException ioe) { + // io错误,则重置连接 + this.closeJmxConnect(); + + throw ioe; } finally { atomicInteger.incrementAndGet(); } @@ -137,6 +196,11 @@ public class JmxConnectorWrap { acquire(); MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection(); return mBeanServerConnection.queryNames(name, query); + } catch (IOException ioe) { + // io错误,则重置连接 + this.closeJmxConnect(); + + throw ioe; } finally { atomicInteger.incrementAndGet(); } @@ -146,19 +210,21 @@ public class JmxConnectorWrap { long now = System.currentTimeMillis(); while (true) { try { - if (System.currentTimeMillis() - now > 60000) { - break; - } int num = atomicInteger.get(); if (num <= 0) { - Thread.sleep(2); - continue; + BackoffUtils.backoff(2); } - if (atomicInteger.compareAndSet(num, num - 1)) { + + if (atomicInteger.compareAndSet(num, num - 1) || System.currentTimeMillis() - now > 6000) { break; } } catch (Exception e) { + // ignore } } } + + private static String buildConnectJmxFailedBackoffEventKey(Long physicalClusterId, Integer brokerId) { + return "CONNECT_JMX_FAILED_BACK_OFF_EVENT_PHY_" + physicalClusterId + "_BROKER_" + brokerId; + } } diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/brokers/BrokerMetadata.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/brokers/BrokerMetadata.java index 3c179b4f..598784ca 100644 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/brokers/BrokerMetadata.java +++ b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/brokers/BrokerMetadata.java @@ -1,6 +1,17 @@ package com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.xiaojukeji.kafka.manager.common.constant.KafkaConstant; +import com.xiaojukeji.kafka.manager.common.entity.ao.common.IpPortData; +import com.xiaojukeji.kafka.manager.common.utils.NumberUtils; +import lombok.Data; + +import java.io.Serializable; +import java.util.HashMap; import java.util.List; +import java.util.Map; /** * @author zengqiao @@ -10,7 +21,7 @@ import java.util.List; * 节点结构: * { * "listener_security_protocol_map":{"SASL_PLAINTEXT":"SASL_PLAINTEXT"}, - * "endpoints":["SASL_PLAINTEXT://10.179.162.202:9093"], + * "endpoints":["SASL_PLAINTEXT://127.0.0.1:9093"], * "jmx_port":9999, * "host":null, * "timestamp":"1546632983233", @@ -18,22 +29,48 @@ import java.util.List; * "version":4, * "rack": "CY" * } + * + * { + * "listener_security_protocol_map":{"SASL_PLAINTEXT":"SASL_PLAINTEXT","PLAINTEXT":"PLAINTEXT"}, + * "endpoints":["SASL_PLAINTEXT://127.0.0.1:9093","PLAINTEXT://127.0.0.1:9092"], + * "jmx_port":8099, + * "host":"127.0.0.1", + * "timestamp":"1628833925822", + * "port":9092, + * "version":4 + * } + * + * { + * "listener_security_protocol_map":{"EXTERNAL":"SASL_PLAINTEXT","INTERNAL":"SASL_PLAINTEXT"}, + * "endpoints":["EXTERNAL://127.0.0.1:7092","INTERNAL://127.0.0.1:7093"], + * "jmx_port":8099, + * "host":null, + * "timestamp":"1627289710439", + * "port":-1, + * "version":4 + * } + * */ -public class BrokerMetadata implements Cloneable { +@Data +@JsonIgnoreProperties(ignoreUnknown = true) +public class BrokerMetadata implements Serializable { + private static final long serialVersionUID = 3918113492423375809L; + private long clusterId; private int brokerId; private List endpoints; + // > + private Map endpointMap; + private String host; private int port; - /* - * ZK上对应的字段就是这个名字, 不要进行修改 - */ - private int jmx_port; + @JsonProperty("jmx_port") + private int jmxPort; private String version; @@ -41,91 +78,54 @@ public class BrokerMetadata implements Cloneable { private String rack; - public long getClusterId() { - return clusterId; + @JsonIgnore + public String getExternalHost() { + if (!endpointMap.containsKey(KafkaConstant.EXTERNAL_KEY)) { + return null; + } + return endpointMap.get(KafkaConstant.EXTERNAL_KEY).getIp(); } - public void setClusterId(long clusterId) { - this.clusterId = clusterId; + @JsonIgnore + public String getInternalHost() { + if (!endpointMap.containsKey(KafkaConstant.INTERNAL_KEY)) { + return null; + } + return endpointMap.get(KafkaConstant.INTERNAL_KEY).getIp(); } - public int getBrokerId() { - return brokerId; - } + public static void parseAndUpdateBrokerMetadata(BrokerMetadata brokerMetadata) { + brokerMetadata.setEndpointMap(new HashMap<>()); - public void setBrokerId(int brokerId) { - this.brokerId = brokerId; - } + if (brokerMetadata.getEndpoints().isEmpty()) { + return; + } - public List getEndpoints() { - return endpoints; - } + // example EXTERNAL://10.179.162.202:7092 + for (String endpoint: brokerMetadata.getEndpoints()) { + int idx1 = endpoint.indexOf("://"); + int idx2 = endpoint.lastIndexOf(":"); + if (idx1 == -1 || idx2 == -1 || idx1 == idx2) { + continue; + } - public void setEndpoints(List endpoints) { - this.endpoints = endpoints; - } + String brokerHost = endpoint.substring(idx1 + "://".length(), idx2); + String brokerPort = endpoint.substring(idx2 + 1); - public String getHost() { - return host; - } + brokerMetadata.getEndpointMap().put(endpoint.substring(0, idx1), new IpPortData(brokerHost, brokerPort)); - public void setHost(String host) { - this.host = host; - } + if (KafkaConstant.EXTERNAL_KEY.equals(endpoint.substring(0, idx1))) { + // 优先使用external的地址进行展示 + brokerMetadata.setHost(brokerHost); + brokerMetadata.setPort(NumberUtils.string2Integer(brokerPort)); + } - public int getPort() { - return port; - } - - public void setPort(int port) { - this.port = port; - } - - public int getJmxPort() { - return jmx_port; - } - - public void setJmxPort(int jmxPort) { - this.jmx_port = jmxPort; - } - - public String getVersion() { - return version; - } - - public void setVersion(String version) { - this.version = version; - } - - public long getTimestamp() { - return timestamp; - } - - public void setTimestamp(long timestamp) { - this.timestamp = timestamp; - } - - public String getRack() { - return rack; - } - - public void setRack(String rack) { - this.rack = rack; - } - - @Override - public String toString() { - return "BrokerMetadata{" + - "clusterId=" + clusterId + - ", brokerId=" + brokerId + - ", endpoints=" + endpoints + - ", host='" + host + '\'' + - ", port=" + port + - ", jmxPort=" + jmx_port + - ", version='" + version + '\'' + - ", timestamp=" + timestamp + - ", rack='" + rack + '\'' + - '}'; + if (null == brokerMetadata.getHost()) { + brokerMetadata.setHost(brokerHost); + brokerMetadata.setPort(NumberUtils.string2Integer(brokerPort)); + } + } } } + diff --git a/kafka-manager-console/package.json b/kafka-manager-console/package.json index 3be33c21..c3c94acc 100644 --- a/kafka-manager-console/package.json +++ b/kafka-manager-console/package.json @@ -1,9 +1,10 @@ { "name": "logi-kafka", - "version": "2.4.3", + "version": "2.5.0", "description": "", "scripts": { - "start": "webpack-dev-server", + "prestart": "npm install --save-dev webpack-dev-server", + "start": "webpack serve", "daily-build": "cross-env NODE_ENV=production webpack", "pre-build": "cross-env NODE_ENV=production webpack", "prod-build": "cross-env NODE_ENV=production webpack", @@ -13,18 +14,19 @@ "license": "ISC", "devDependencies": { "@hot-loader/react-dom": "^16.8.6", - "@types/echarts": "^4.4.1", + "@types/events": "^3.0.0", "@types/lodash.debounce": "^4.0.6", "@types/react": "^16.8.8", "@types/react-dom": "^16.8.2", "@types/react-router-dom": "^4.3.1", "@types/spark-md5": "^3.0.2", + "@webpack-cli/serve": "^1.6.0", "antd": "^3.26.15", "clean-webpack-plugin": "^3.0.0", "clipboard": "^2.0.8", "cross-env": "^7.0.2", "css-loader": "^2.1.0", - "echarts": "^4.5.0", + "echarts": "^5.2.1", "file-loader": "^5.0.2", "html-webpack-plugin": "^3.2.0", "increase-memory-limit": "^1.0.7", @@ -50,11 +52,10 @@ "typescript": "^3.3.3333", "url-loader": "^4.1.1", "webpack": "^4.29.6", - "webpack-cli": "^3.2.3", - "webpack-dev-server": "^3.2.1", + "webpack-cli": "^4.9.1", "xlsx": "^0.16.1" }, "dependencies": { "format-to-json": "^1.0.4" } -} \ No newline at end of file +} diff --git a/kafka-manager-console/src/assets/image/weChat.png b/kafka-manager-console/src/assets/image/weChat.png new file mode 100644 index 00000000..126f5816 Binary files /dev/null and b/kafka-manager-console/src/assets/image/weChat.png differ diff --git a/kafka-manager-console/src/assets/image/wechat.jpeg b/kafka-manager-console/src/assets/image/wechats.jpeg similarity index 100% rename from kafka-manager-console/src/assets/image/wechat.jpeg rename to kafka-manager-console/src/assets/image/wechats.jpeg diff --git a/kafka-manager-console/src/component/chart/bar-chart.tsx b/kafka-manager-console/src/component/chart/bar-chart.tsx index c2f67099..d31fcfd7 100644 --- a/kafka-manager-console/src/component/chart/bar-chart.tsx +++ b/kafka-manager-console/src/component/chart/bar-chart.tsx @@ -1,14 +1,29 @@ import * as React from 'react'; import { Spin, notification } from 'component/antd'; -import echarts, { EChartOption } from 'echarts/lib/echarts'; +import * as echarts from 'echarts/core'; // 引入柱状图 -import 'echarts/lib/chart/bar'; +import { BarChart } from 'echarts/charts'; // 引入提示框和标题组件 -import 'echarts/lib/component/tooltip'; -import 'echarts/lib/component/title'; -import 'echarts/lib/component/legend'; +import { + TitleComponent, + TooltipComponent, + LegendComponent, + GridComponent, +} from 'echarts/components'; +import { CanvasRenderer } from 'echarts/renderers'; +import { EChartsOption } from 'echarts'; + +// 注册必须的组件 +echarts.use([ + TitleComponent, + LegendComponent, + TooltipComponent, + BarChart, + GridComponent, + CanvasRenderer, +]); interface IChartProps { getChartData: any; @@ -38,7 +53,7 @@ export class BarChartComponet extends React.Component { this.chart.resize(); } - public isHasData = (data: EChartOption) => { + public isHasData = (data: any) => { const noData = !(data.series && data.series.length); this.setState({ noData }); return !noData; @@ -54,7 +69,7 @@ export class BarChartComponet extends React.Component { const chartOptions = getChartData(); if ((typeof chartOptions.then) === 'function') { - return chartOptions.then((data: EChartOption) => { + return chartOptions.then((data: EChartsOption) => { this.setState({ loading: false }); if (this.isHasData(data)) { diff --git a/kafka-manager-console/src/component/chart/date-picker-chart.tsx b/kafka-manager-console/src/component/chart/date-picker-chart.tsx index 0e0d6d6a..39878805 100644 --- a/kafka-manager-console/src/component/chart/date-picker-chart.tsx +++ b/kafka-manager-console/src/component/chart/date-picker-chart.tsx @@ -3,16 +3,34 @@ import { DatePicker, notification, Spin } from 'component/antd'; import moment, { Moment } from 'moment'; import { timeStampStr } from 'constants/strategy'; import { disabledDate } from 'lib/utils'; -import echarts from 'echarts'; +import * as echarts from 'echarts/core'; -// 引入柱状图和折线图 -import 'echarts/lib/chart/bar'; -import 'echarts/lib/chart/line'; +// 引入柱状图 +import { BarChart, LineChart } from 'echarts/charts'; // 引入提示框和标题组件 -import 'echarts/lib/component/tooltip'; -import 'echarts/lib/component/title'; -import 'echarts/lib/component/legend'; +import { + TitleComponent, + TooltipComponent, + LegendComponent, + GridComponent, + MarkLineComponent, + DatasetComponent, +} from 'echarts/components'; +import { CanvasRenderer } from 'echarts/renderers'; + +// 注册必须的组件 +echarts.use([ + TitleComponent, + LegendComponent, + TooltipComponent, + GridComponent, + BarChart, + LineChart, + CanvasRenderer, + DatasetComponent, + MarkLineComponent, +]); import './index.less'; const { RangePicker } = DatePicker; @@ -60,6 +78,23 @@ export class ChartWithDatePicker extends React.Component { public changeChartOptions(options: any) { const noData = options.series.length ? false : true; this.setState({ noData }); + options.tooltip.formatter = (params: any) => { + let res = + '

' + + params[0].data.time + + '

'; + // tslint:disable-next-line:prefer-for-of + for (let i = 0; i < params.length; i++) { + res += `
+ + ${params[i].seriesName} + ${params[i].data[params[i].seriesName]} +
`; + } + return res; + }; this.chart.setOption(options, true); } @@ -79,7 +114,7 @@ export class ChartWithDatePicker extends React.Component { public render() { const { customerNode } = this.props; return ( -
+
{customerNode} diff --git a/kafka-manager-console/src/component/chart/doughnut-chart.tsx b/kafka-manager-console/src/component/chart/doughnut-chart.tsx index 0b2c3ebf..d4069b48 100644 --- a/kafka-manager-console/src/component/chart/doughnut-chart.tsx +++ b/kafka-manager-console/src/component/chart/doughnut-chart.tsx @@ -1,13 +1,27 @@ import * as React from 'react'; import { Spin } from 'component/antd'; -import echarts from 'echarts/lib/echarts'; -// 引入饼状图 -import 'echarts/lib/chart/pie'; -// 引入提示框和标题组件 -import 'echarts/lib/component/tooltip'; -import 'echarts/lib/component/title'; -import 'echarts/lib/component/legend'; +import * as echarts from 'echarts/core'; +// 引入饼图 +import { PieChart } from 'echarts/charts'; +// 引入提示框和标题组件 +import { + TitleComponent, + TooltipComponent, + LegendComponent, + GridComponent, +} from 'echarts/components'; +import { CanvasRenderer } from 'echarts/renderers'; + +// 注册必须的组件 +echarts.use([ + PieChart, + TitleComponent, + LegendComponent, + TooltipComponent, + GridComponent, + CanvasRenderer, +]); interface IPieProps { getChartData: any; } diff --git a/kafka-manager-console/src/component/chart/line-chart.tsx b/kafka-manager-console/src/component/chart/line-chart.tsx index 6b5db5be..230a503d 100644 --- a/kafka-manager-console/src/component/chart/line-chart.tsx +++ b/kafka-manager-console/src/component/chart/line-chart.tsx @@ -1,25 +1,45 @@ import React from 'react'; -import echarts, { EChartOption } from 'echarts/lib/echarts'; -import 'echarts/lib/chart/pie'; -import 'echarts/lib/chart/line'; -import 'echarts/lib/component/legend'; -import 'echarts/lib/component/tooltip'; -import 'echarts/lib/component/title'; -import 'echarts/lib/component/axis'; +import * as echarts from 'echarts/core'; import './index.less'; +// 引入柱状图 +import { PieChart, LineChart } from 'echarts/charts'; + +// 引入提示框和标题组件 +import { + TitleComponent, + TooltipComponent, + LegendComponent, + GridComponent, + ToolboxComponent, + DatasetComponent, +} from 'echarts/components'; +import { CanvasRenderer } from 'echarts/renderers'; + +// 注册必须的组件 +echarts.use([ + PieChart, + LineChart, + ToolboxComponent, + TitleComponent, + LegendComponent, + TooltipComponent, + GridComponent, + DatasetComponent, + CanvasRenderer, +]); export interface IEchartsProps { width?: number; height?: number; - options?: EChartOption; + options?: any; } -export const hasData = (options: EChartOption) => { +export const hasData = (options: any) => { if (options && options.series && options.series.length) return true; return false; }; -export default class LineChart extends React.Component { +export default class LineCharts extends React.Component { public id = null as HTMLDivElement; public myChart = null as echarts.ECharts; @@ -27,7 +47,7 @@ export default class LineChart extends React.Component { public componentDidMount() { const { options } = this.props; this.myChart = echarts.init(this.id); - this.myChart.setOption(options); + this.myChart.setOption(options, true); window.addEventListener('resize', this.resize); } @@ -41,7 +61,7 @@ export default class LineChart extends React.Component { public refresh = () => { const { options } = this.props; - this.myChart.setOption(options); + this.myChart.setOption(options, true); } public resize = () => { @@ -50,6 +70,6 @@ export default class LineChart extends React.Component { public render() { const { height, width } = this.props; - return
this.id = id} style={{width: `${width}px`, height: `${height}px`}} />; + return
this.id = id} style={{ width: `${width}px`, height: `${height}px` }} />; } } diff --git a/kafka-manager-console/src/container/admin/data-curve/config.ts b/kafka-manager-console/src/container/admin/data-curve/config.ts index 511c791d..e26fd51c 100644 --- a/kafka-manager-console/src/container/admin/data-curve/config.ts +++ b/kafka-manager-console/src/container/admin/data-curve/config.ts @@ -1,4 +1,3 @@ -import { EChartOption } from 'echarts/lib/echarts'; import moment from 'moment'; import { ICurve } from 'container/common-curve/config'; import { adminMonitor } from 'store/admin-monitor'; @@ -124,7 +123,7 @@ export interface ICurveType { type: curveType; title: string; curves: ICurve[]; - parser: (option: ICurve, data: any[]) => EChartOption; + parser: (option: ICurve, data: any[]) => any; } export const byteTypeCurves: ICurveType[] = [ diff --git a/kafka-manager-console/src/container/admin/data-curve/parser.ts b/kafka-manager-console/src/container/admin/data-curve/parser.ts index 4ab9f5a8..d15088f6 100644 --- a/kafka-manager-console/src/container/admin/data-curve/parser.ts +++ b/kafka-manager-console/src/container/admin/data-curve/parser.ts @@ -1,5 +1,5 @@ import moment from 'moment'; -import { EChartOption } from 'echarts'; +import { EChartsOption } from 'echarts'; import { ICurve, ILineData, baseLineLegend, baseLineGrid, baseAxisStyle, noAxis, UNIT_HEIGHT } from 'container/common-curve/config'; import { IClusterMetrics, ISeriesOption } from 'types/base-type'; import { timeFormat } from 'constants/strategy'; @@ -48,20 +48,20 @@ export const getBaseOptions = (option: ICurve, data: ILineData[]) => { return Number(i.value); }), }], - } as EChartOption; + } as EChartsOption; }; -export const parseLine = (option: ICurve, data: ILineData[]): EChartOption => { +export const parseLine = (option: ICurve, data: ILineData[]): EChartsOption => { return Object.assign({}, getBaseOptions(option, data), { legend: { ...baseLineLegend, bottom: '0', align: 'auto', }, - }) as EChartOption; + }) as EChartsOption; }; -export const parseBrokerMetricOption = (option: ICurve, data: IClusterMetrics[]): EChartOption => { +export const parseBrokerMetricOption = (option: ICurve, data: IClusterMetrics[]): EChartsOption => { let name; let series: ISeriesOption[]; data = data || []; diff --git a/kafka-manager-console/src/container/alarm/alarm-detail/history-detail.tsx b/kafka-manager-console/src/container/alarm/alarm-detail/history-detail.tsx index 7d9cae9f..956bd117 100644 --- a/kafka-manager-console/src/container/alarm/alarm-detail/history-detail.tsx +++ b/kafka-manager-console/src/container/alarm/alarm-detail/history-detail.tsx @@ -6,7 +6,7 @@ import { alarm } from 'store/alarm'; import { observer } from 'mobx-react'; import { handlePageBack } from 'lib/utils'; import LineChart, { hasData } from 'component/chart/line-chart'; -import { EChartOption } from 'echarts'; +import { EChartsOption } from 'echarts'; import { timeFormat } from 'constants/strategy'; import Url from 'lib/url-parser'; import moment = require('moment'); @@ -40,7 +40,7 @@ export class HistoryDetail extends React.Component { return
; } - public renderEchart = (options: EChartOption, loading = false) => { + public renderEchart = (options: EChartsOption, loading = false) => { const data = hasData(options); if (loading) return this.renderLoading(400); if (!data) return this.renderNoData(400); @@ -51,7 +51,7 @@ export class HistoryDetail extends React.Component { } public renderHistoricalTraffic(metric: IMonitorMetric) { - const option = this.getChartOption() as EChartOption; + const option = this.getChartOption() as EChartsOption; return ( <> diff --git a/kafka-manager-console/src/container/common-curve/config.ts b/kafka-manager-console/src/container/common-curve/config.ts index b5206f4f..79b0d1fe 100644 --- a/kafka-manager-console/src/container/common-curve/config.ts +++ b/kafka-manager-console/src/container/common-curve/config.ts @@ -1,5 +1,4 @@ -import { EChartOption } from 'echarts/lib/echarts'; -import moment from 'moment'; +import { EChartsOption } from 'echarts'; export interface ILineData { value: number; @@ -9,7 +8,7 @@ export interface ICurve { title?: string; path: string; colors: string[]; - parser?: (option: ICurve, data: ILineData) => EChartOption; + parser?: (option: ICurve, data: ILineData) => EChartsOption; message?: string; unit?: string; api?: any; @@ -69,13 +68,13 @@ export const noAxis = { }, }; -export const getHight = (options: EChartOption) => { - let grid = options ? options.grid as EChartOption.Grid : null; +export const getHight = (options: any) => { + let grid = options ? options.grid : null; if (!options || !grid) grid = baseLineGrid; return Number(grid.height) + getLegendHight(options) + Number(grid.top) + LEGEND_PADDING + UNIT_HEIGHT; }; -export const getLegendHight = (options: EChartOption) => { +export const getLegendHight = (options: any) => { if (!options) return 0; if (options.legend.show === false) return 0; const legendHight = options.legend.textStyle.lineHeight + defaultLegendPadding; diff --git a/kafka-manager-console/src/container/common-curve/index.tsx b/kafka-manager-console/src/container/common-curve/index.tsx index 910aa70d..6dc09b90 100644 --- a/kafka-manager-console/src/container/common-curve/index.tsx +++ b/kafka-manager-console/src/container/common-curve/index.tsx @@ -1,4 +1,4 @@ -import { EChartOption } from 'echarts'; +import { EChartsOption } from 'echarts'; import { observer } from 'mobx-react'; import React from 'react'; import { curveInfo } from 'store/curve-info'; @@ -10,7 +10,7 @@ import LineChart, { hasData } from 'component/chart/line-chart'; export interface ICommonCurveProps { options: ICurve; - parser?: (option: ICurve, data: any[]) => EChartOption; + parser?: (option: ICurve, data: any[]) => any; } @observer @@ -41,7 +41,7 @@ export class CommonCurve extends React.Component { fullScreen.show(this.renderCurve(options, loading, true)); } - public renderOpBtns = (options: EChartOption, expand = false) => { + public renderOpBtns = (options: EChartsOption, expand = false) => { const data = hasData(options); return (
@@ -85,7 +85,7 @@ export class CommonCurve extends React.Component { return
; } - public renderEchart = (options: EChartOption, loading = false) => { + public renderEchart = (options: EChartsOption, loading = false) => { const height = getHight(options); const data = hasData(options); @@ -94,7 +94,7 @@ export class CommonCurve extends React.Component { return ; } - public renderCurve = (options: EChartOption, loading: boolean, expand = false) => { + public renderCurve = (options: any, loading: boolean, expand = false) => { const data = hasData(options); return (
diff --git a/kafka-manager-console/src/container/header/index.tsx b/kafka-manager-console/src/container/header/index.tsx index e12e397c..0205e1be 100644 --- a/kafka-manager-console/src/container/header/index.tsx +++ b/kafka-manager-console/src/container/header/index.tsx @@ -7,7 +7,7 @@ import { urlPrefix } from 'constants/left-menu'; import { region, IRegionIdcs } from 'store/region'; import logoUrl from '../../assets/image/kafka-logo.png'; import userIcon from '../../assets/image/normal.png'; -import weChat from '../../assets/image/wechat.jpeg'; +import weChat from '../../assets/image/weChat.png'; import { users } from 'store/users'; import { observer } from 'mobx-react'; import { Link } from 'react-router-dom'; @@ -60,8 +60,8 @@ export const Header = observer((props: IHeader) => { }); }; const content = ( -
- +
+
); const helpCenter = ( @@ -144,8 +144,8 @@ export const Header = observer((props: IHeader) => {
- Kafka Manager - v2.4.2 + LogiKM + v2.5.0 {/* 添加版本超链接 */}
diff --git a/kafka-manager-console/src/container/user-center/order-list.tsx b/kafka-manager-console/src/container/user-center/order-list.tsx index 6c81b0ec..5ed5b961 100644 --- a/kafka-manager-console/src/container/user-center/order-list.tsx +++ b/kafka-manager-console/src/container/user-center/order-list.tsx @@ -115,11 +115,19 @@ export class OrderList extends SearchAndFilterContainer { status, { title: '申请时间', - dataIndex: 'gmtTime', - key: 'gmtTime', - sorter: (a: IBaseOrder, b: IBaseOrder) => b.gmtTime - a.gmtTime, - render: (t: number) => moment(t).format(timeFormat), - }, { + dataIndex: 'gmtCreate', + key: 'gmtCreate', + sorter: (a: IBaseOrder, b: IBaseOrder) => b.gmtCreate - a.gmtCreate, + render: (t: number) => t ? moment(t).format(timeFormat) : '-', + }, + { + title: '审批时间', + dataIndex: 'gmtHandle', + key: 'gmtHandle', + sorter: (a: IBaseOrder, b: IBaseOrder) => b.gmtHandle - a.gmtHandle, + render: (t: number) => t ? moment(t).format(timeFormat) : '-', + }, + { title: '操作', key: 'operation', dataIndex: 'operation', diff --git a/kafka-manager-console/src/routers/index.htm b/kafka-manager-console/src/routers/index.htm index b8d8454f..7cb7a0fd 100644 --- a/kafka-manager-console/src/routers/index.htm +++ b/kafka-manager-console/src/routers/index.htm @@ -1,12 +1,15 @@ + - KafkaManager + LogiKM +
+ \ No newline at end of file diff --git a/kafka-manager-console/src/store/curve-info.ts b/kafka-manager-console/src/store/curve-info.ts index 9531c849..fc4c57a9 100644 --- a/kafka-manager-console/src/store/curve-info.ts +++ b/kafka-manager-console/src/store/curve-info.ts @@ -1,6 +1,6 @@ import { observable, action } from 'mobx'; import moment = require('moment'); -import { EChartOption } from 'echarts/lib/echarts'; +import { EChartsOption } from 'echarts'; import { ICurve } from 'container/common-curve/config'; import { curveKeys, PERIOD_RADIO_MAP } from 'container/admin/data-curve/config'; import { timeFormat } from 'constants/strategy'; @@ -13,7 +13,7 @@ class CurveInfo { public timeRange: [moment.Moment, moment.Moment] = PERIOD_RADIO_MAP.get(this.periodKey).dateRange; @observable - public curveData: { [key: string]: EChartOption } = {}; + public curveData: { [key: string]: EChartsOption } = {}; @observable public curveLoading: { [key: string]: boolean } = {}; @@ -25,7 +25,7 @@ class CurveInfo { public currentOperator: string; @action.bound - public setCurveData(key: curveKeys | string, data: EChartOption) { + public setCurveData(key: curveKeys | string, data: EChartsOption) { this.curveData[key] = data; } @@ -59,7 +59,7 @@ class CurveInfo { public getCommonCurveData = ( options: ICurve, - parser: (option: ICurve, data: any[]) => EChartOption, + parser: (option: ICurve, data: any[]) => EChartsOption, reload?: boolean) => { const { path } = options; this.setCurveData(path, null); diff --git a/kafka-manager-console/webpack.config.js b/kafka-manager-console/webpack.config.js index a07d9990..d6d12fa8 100644 --- a/kafka-manager-console/webpack.config.js +++ b/kafka-manager-console/webpack.config.js @@ -122,11 +122,11 @@ module.exports = { }, }, devServer: { - contentBase: outPath, + // contentBase: outPath, host: '127.0.0.1', port: 1025, hot: true, - disableHostCheck: true, + // disableHostCheck: true, historyApiFallback: true, proxy: { '/api/v1/': { diff --git a/kafka-manager-core/pom.xml b/kafka-manager-core/pom.xml index 81675a43..e00663ed 100644 --- a/kafka-manager-core/pom.xml +++ b/kafka-manager-core/pom.xml @@ -24,7 +24,6 @@ 1.8 UTF-8 UTF-8 - 5.1.3.RELEASE @@ -38,12 +37,10 @@ org.springframework spring-web - ${spring-version} org.springframework spring-test - ${spring-version} diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/ConsumerMetadataCache.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/ConsumerMetadataCache.java index 41fd0092..3fd6aaac 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/ConsumerMetadataCache.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/ConsumerMetadataCache.java @@ -17,6 +17,9 @@ public class ConsumerMetadataCache { private static final Map CG_METADATA_IN_BK_MAP = new ConcurrentHashMap<>(); + private ConsumerMetadataCache() { + } + public static void putConsumerMetadataInZK(Long clusterId, ConsumerMetadata consumerMetadata) { if (clusterId == null || consumerMetadata == null) { return; diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/KafkaClientPool.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/KafkaClientPool.java index 921b13ba..2e1e9e71 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/KafkaClientPool.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/KafkaClientPool.java @@ -1,7 +1,7 @@ package com.xiaojukeji.kafka.manager.service.cache; -import com.alibaba.fastjson.JSONObject; import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO; +import com.xiaojukeji.kafka.manager.common.utils.JsonUtils; import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; import com.xiaojukeji.kafka.manager.common.utils.factory.KafkaConsumerFactory; import kafka.admin.AdminClient; @@ -14,6 +14,8 @@ import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.clients.producer.ProducerRecord; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Service; import java.util.Map; import java.util.Properties; @@ -25,20 +27,36 @@ import java.util.concurrent.locks.ReentrantLock; * @author zengqiao * @date 19/12/24 */ +@Service public class KafkaClientPool { - private final static Logger LOGGER = LoggerFactory.getLogger(KafkaClientPool.class); + private static final Logger LOGGER = LoggerFactory.getLogger(KafkaClientPool.class); + + @Value(value = "${client-pool.kafka-consumer.min-idle-client-num:24}") + private Integer kafkaConsumerMinIdleClientNum; + + @Value(value = "${client-pool.kafka-consumer.max-idle-client-num:24}") + private Integer kafkaConsumerMaxIdleClientNum; + + @Value(value = "${client-pool.kafka-consumer.max-total-client-num:24}") + private Integer kafkaConsumerMaxTotalClientNum; + + @Value(value = "${client-pool.kafka-consumer.borrow-timeout-unit-ms:3000}") + private Integer kafkaConsumerBorrowTimeoutUnitMs; /** * AdminClient */ - private static Map AdminClientMap = new ConcurrentHashMap<>(); + private static final Map ADMIN_CLIENT_MAP = new ConcurrentHashMap<>(); - private static Map> KAFKA_PRODUCER_MAP = new ConcurrentHashMap<>(); + private static final Map> KAFKA_PRODUCER_MAP = new ConcurrentHashMap<>(); - private static Map> KAFKA_CONSUMER_POOL = new ConcurrentHashMap<>(); + private static final Map>> KAFKA_CONSUMER_POOL = new ConcurrentHashMap<>(); private static ReentrantLock lock = new ReentrantLock(); + private KafkaClientPool() { + } + private static void initKafkaProducerMap(Long clusterId) { ClusterDO clusterDO = PhysicalClusterMetadataManager.getClusterFromCache(clusterId); if (clusterDO == null) { @@ -55,7 +73,7 @@ public class KafkaClientPool { properties.setProperty(ProducerConfig.COMPRESSION_TYPE_CONFIG, "lz4"); properties.setProperty(ProducerConfig.LINGER_MS_CONFIG, "10"); properties.setProperty(ProducerConfig.RETRIES_CONFIG, "3"); - KAFKA_PRODUCER_MAP.put(clusterId, new KafkaProducer(properties)); + KAFKA_PRODUCER_MAP.put(clusterId, new KafkaProducer<>(properties)); } catch (Exception e) { LOGGER.error("create kafka producer failed, clusterDO:{}.", clusterDO, e); } finally { @@ -77,25 +95,22 @@ public class KafkaClientPool { if (ValidateUtils.isNull(kafkaProducer)) { return false; } - kafkaProducer.send(new ProducerRecord(topicName, data)); + kafkaProducer.send(new ProducerRecord<>(topicName, data)); return true; } - private static void initKafkaConsumerPool(ClusterDO clusterDO) { + private void initKafkaConsumerPool(ClusterDO clusterDO) { lock.lock(); try { - GenericObjectPool objectPool = KAFKA_CONSUMER_POOL.get(clusterDO.getId()); + GenericObjectPool> objectPool = KAFKA_CONSUMER_POOL.get(clusterDO.getId()); if (objectPool != null) { return; } - GenericObjectPoolConfig config = new GenericObjectPoolConfig(); - config.setMaxIdle(24); - config.setMinIdle(24); - config.setMaxTotal(24); - KAFKA_CONSUMER_POOL.put( - clusterDO.getId(), - new GenericObjectPool(new KafkaConsumerFactory(clusterDO), config) - ); + GenericObjectPoolConfig> config = new GenericObjectPoolConfig<>(); + config.setMaxIdle(kafkaConsumerMaxIdleClientNum); + config.setMinIdle(kafkaConsumerMinIdleClientNum); + config.setMaxTotal(kafkaConsumerMaxTotalClientNum); + KAFKA_CONSUMER_POOL.put(clusterDO.getId(), new GenericObjectPool<>(new KafkaConsumerFactory(clusterDO), config)); } catch (Exception e) { LOGGER.error("create kafka consumer pool failed, clusterDO:{}.", clusterDO, e); } finally { @@ -106,7 +121,7 @@ public class KafkaClientPool { public static void closeKafkaConsumerPool(Long clusterId) { lock.lock(); try { - GenericObjectPool objectPool = KAFKA_CONSUMER_POOL.remove(clusterId); + GenericObjectPool> objectPool = KAFKA_CONSUMER_POOL.remove(clusterId); if (objectPool == null) { return; } @@ -118,11 +133,11 @@ public class KafkaClientPool { } } - public static KafkaConsumer borrowKafkaConsumerClient(ClusterDO clusterDO) { + public KafkaConsumer borrowKafkaConsumerClient(ClusterDO clusterDO) { if (ValidateUtils.isNull(clusterDO)) { return null; } - GenericObjectPool objectPool = KAFKA_CONSUMER_POOL.get(clusterDO.getId()); + GenericObjectPool> objectPool = KAFKA_CONSUMER_POOL.get(clusterDO.getId()); if (ValidateUtils.isNull(objectPool)) { initKafkaConsumerPool(clusterDO); objectPool = KAFKA_CONSUMER_POOL.get(clusterDO.getId()); @@ -132,18 +147,18 @@ public class KafkaClientPool { } try { - return objectPool.borrowObject(3000); + return objectPool.borrowObject(kafkaConsumerBorrowTimeoutUnitMs); } catch (Exception e) { LOGGER.error("borrow kafka consumer client failed, clusterDO:{}.", clusterDO, e); } return null; } - public static void returnKafkaConsumerClient(Long physicalClusterId, KafkaConsumer kafkaConsumer) { + public static void returnKafkaConsumerClient(Long physicalClusterId, KafkaConsumer kafkaConsumer) { if (ValidateUtils.isNull(physicalClusterId) || ValidateUtils.isNull(kafkaConsumer)) { return; } - GenericObjectPool objectPool = KAFKA_CONSUMER_POOL.get(physicalClusterId); + GenericObjectPool> objectPool = KAFKA_CONSUMER_POOL.get(physicalClusterId); if (ValidateUtils.isNull(objectPool)) { return; } @@ -155,7 +170,7 @@ public class KafkaClientPool { } public static AdminClient getAdminClient(Long clusterId) { - AdminClient adminClient = AdminClientMap.get(clusterId); + AdminClient adminClient = ADMIN_CLIENT_MAP.get(clusterId); if (adminClient != null) { return adminClient; } @@ -166,26 +181,26 @@ public class KafkaClientPool { Properties properties = createProperties(clusterDO, false); lock.lock(); try { - adminClient = AdminClientMap.get(clusterId); + adminClient = ADMIN_CLIENT_MAP.get(clusterId); if (adminClient != null) { return adminClient; } - AdminClientMap.put(clusterId, AdminClient.create(properties)); + ADMIN_CLIENT_MAP.put(clusterId, AdminClient.create(properties)); } catch (Exception e) { LOGGER.error("create kafka admin client failed, clusterId:{}.", clusterId, e); } finally { lock.unlock(); } - return AdminClientMap.get(clusterId); + return ADMIN_CLIENT_MAP.get(clusterId); } public static void closeAdminClient(ClusterDO cluster) { - if (AdminClientMap.containsKey(cluster.getId())) { - AdminClientMap.get(cluster.getId()).close(); + if (ADMIN_CLIENT_MAP.containsKey(cluster.getId())) { + ADMIN_CLIENT_MAP.get(cluster.getId()).close(); } } - public static Properties createProperties(ClusterDO clusterDO, Boolean serialize) { + public static Properties createProperties(ClusterDO clusterDO, boolean serialize) { Properties properties = new Properties(); properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterDO.getBootstrapServers()); if (serialize) { @@ -198,8 +213,7 @@ public class KafkaClientPool { if (ValidateUtils.isBlank(clusterDO.getSecurityProperties())) { return properties; } - Properties securityProperties = JSONObject.parseObject(clusterDO.getSecurityProperties(), Properties.class); - properties.putAll(securityProperties); + properties.putAll(JsonUtils.stringToObj(clusterDO.getSecurityProperties(), Properties.class)); return properties; } } \ No newline at end of file diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/KafkaMetricsCache.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/KafkaMetricsCache.java index 011bc1e6..7ba1e304 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/KafkaMetricsCache.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/KafkaMetricsCache.java @@ -14,7 +14,10 @@ public class KafkaMetricsCache { /** * */ - private static Map> TopicMetricsMap = new ConcurrentHashMap<>(); + private static final Map> TOPIC_METRICS_MAP = new ConcurrentHashMap<>(); + + private KafkaMetricsCache() { + } public static void putTopicMetricsToCache(Long clusterId, List dataList) { if (clusterId == null || dataList == null) { @@ -24,22 +27,22 @@ public class KafkaMetricsCache { for (TopicMetrics topicMetrics : dataList) { subMetricsMap.put(topicMetrics.getTopicName(), topicMetrics); } - TopicMetricsMap.put(clusterId, subMetricsMap); + TOPIC_METRICS_MAP.put(clusterId, subMetricsMap); } public static Map getTopicMetricsFromCache(Long clusterId) { - return TopicMetricsMap.getOrDefault(clusterId, Collections.emptyMap()); + return TOPIC_METRICS_MAP.getOrDefault(clusterId, Collections.emptyMap()); } public static Map> getAllTopicMetricsFromCache() { - return TopicMetricsMap; + return TOPIC_METRICS_MAP; } public static TopicMetrics getTopicMetricsFromCache(Long clusterId, String topicName) { if (clusterId == null || topicName == null) { return null; } - Map subMap = TopicMetricsMap.getOrDefault(clusterId, Collections.emptyMap()); + Map subMap = TOPIC_METRICS_MAP.getOrDefault(clusterId, Collections.emptyMap()); return subMap.get(topicName); } } diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/LogicalClusterMetadataManager.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/LogicalClusterMetadataManager.java index 5cd81581..d58efc9a 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/LogicalClusterMetadataManager.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/LogicalClusterMetadataManager.java @@ -160,7 +160,7 @@ public class LogicalClusterMetadataManager { public void flush() { List logicalClusterDOList = logicalClusterService.listAll(); if (ValidateUtils.isNull(logicalClusterDOList)) { - logicalClusterDOList = Collections.EMPTY_LIST; + logicalClusterDOList = Collections.emptyList(); } Set inDbLogicalClusterIds = logicalClusterDOList.stream() .map(LogicalClusterDO::getId) @@ -208,7 +208,8 @@ public class LogicalClusterMetadataManager { // 计算逻辑集群到Topic名称的映射 Set topicNameSet = PhysicalClusterMetadataManager.getBrokerTopicNum( logicalClusterDO.getClusterId(), - brokerIdSet); + brokerIdSet + ); LOGICAL_CLUSTER_ID_TOPIC_NAME_MAP.put(logicalClusterDO.getId(), topicNameSet); // 计算Topic名称到逻辑集群的映射 diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/PhysicalClusterMetadataManager.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/PhysicalClusterMetadataManager.java index a7142fa9..47ab8b64 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/PhysicalClusterMetadataManager.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/PhysicalClusterMetadataManager.java @@ -39,7 +39,7 @@ import java.util.concurrent.ConcurrentHashMap; */ @Service public class PhysicalClusterMetadataManager { - private final static Logger LOGGER = LoggerFactory.getLogger(PhysicalClusterMetadataManager.class); + private static final Logger LOGGER = LoggerFactory.getLogger(PhysicalClusterMetadataManager.class); @Autowired private ControllerDao controllerDao; @@ -50,22 +50,25 @@ public class PhysicalClusterMetadataManager { @Autowired private ClusterService clusterService; - private final static Map CLUSTER_MAP = new ConcurrentHashMap<>(); + @Autowired + private ThreadPool threadPool; - private final static Map CONTROLLER_DATA_MAP = new ConcurrentHashMap<>(); + private static final Map CLUSTER_MAP = new ConcurrentHashMap<>(); - private final static Map ZK_CONFIG_MAP = new ConcurrentHashMap<>(); + private static final Map CONTROLLER_DATA_MAP = new ConcurrentHashMap<>(); - private final static Map> TOPIC_METADATA_MAP = new ConcurrentHashMap<>(); + private static final Map ZK_CONFIG_MAP = new ConcurrentHashMap<>(); - private final static Map> TOPIC_PROPERTIES_MAP = new ConcurrentHashMap<>(); + private static final Map> TOPIC_METADATA_MAP = new ConcurrentHashMap<>(); - private final static Map> BROKER_METADATA_MAP = new ConcurrentHashMap<>(); + private static final Map> TOPIC_PROPERTIES_MAP = new ConcurrentHashMap<>(); + + private static final Map> BROKER_METADATA_MAP = new ConcurrentHashMap<>(); /** * JXM连接, 延迟连接 */ - private final static Map> JMX_CONNECTOR_MAP = new ConcurrentHashMap<>(); + private static final Map> JMX_CONNECTOR_MAP = new ConcurrentHashMap<>(); /** * KafkaBroker版本, 延迟获取 @@ -125,7 +128,7 @@ public class PhysicalClusterMetadataManager { zkConfig.watchChildren(ZkPathUtil.BROKER_IDS_ROOT, brokerListener); //增加Topic监控 - TopicStateListener topicListener = new TopicStateListener(clusterDO.getId(), zkConfig); + TopicStateListener topicListener = new TopicStateListener(clusterDO.getId(), zkConfig, threadPool); topicListener.init(); zkConfig.watchChildren(ZkPathUtil.BROKER_TOPICS_ROOT, topicListener); @@ -314,7 +317,7 @@ public class PhysicalClusterMetadataManager { metadataMap.put(brokerId, brokerMetadata); Map jmxMap = JMX_CONNECTOR_MAP.getOrDefault(clusterId, new ConcurrentHashMap<>()); - jmxMap.put(brokerId, new JmxConnectorWrap(brokerMetadata.getHost(), brokerMetadata.getJmxPort(), jmxConfig)); + jmxMap.put(brokerId, new JmxConnectorWrap(clusterId, brokerId, brokerMetadata.getHost(), brokerMetadata.getJmxPort(), jmxConfig)); JMX_CONNECTOR_MAP.put(clusterId, jmxMap); Map versionMap = KAFKA_VERSION_MAP.getOrDefault(clusterId, new ConcurrentHashMap<>()); @@ -398,7 +401,7 @@ public class PhysicalClusterMetadataManager { KafkaBrokerRoleEnum roleEnum) { BrokerMetadata brokerMetadata = PhysicalClusterMetadataManager.getBrokerMetadata(clusterId, brokerId); - if (ValidateUtils.isNull(brokerMetadata)) { + if (brokerMetadata == null) { return; } String hostname = brokerMetadata.getHost().replace(KafkaConstant.BROKER_HOST_NAME_SUFFIX, ""); @@ -438,7 +441,7 @@ public class PhysicalClusterMetadataManager { KafkaBrokerRoleEnum roleEnum) { BrokerMetadata brokerMetadata = PhysicalClusterMetadataManager.getBrokerMetadata(clusterId, brokerId); - if (ValidateUtils.isNull(brokerMetadata)) { + if (brokerMetadata == null) { return; } @@ -539,9 +542,12 @@ public class PhysicalClusterMetadataManager { } public static Set getBrokerTopicNum(Long clusterId, Set brokerIdSet) { - Set topicNameSet = new HashSet<>(); - Map metadataMap = TOPIC_METADATA_MAP.get(clusterId); + if (metadataMap == null) { + return new HashSet<>(); + } + + Set topicNameSet = new HashSet<>(); for (String topicName: metadataMap.keySet()) { try { TopicMetadata tm = metadataMap.get(topicName); diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/ThreadPool.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/ThreadPool.java index f1b685cb..ba870465 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/ThreadPool.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/ThreadPool.java @@ -1,37 +1,63 @@ package com.xiaojukeji.kafka.manager.service.cache; import com.xiaojukeji.kafka.manager.common.utils.factory.DefaultThreadFactory; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Service; -import java.util.concurrent.*; +import javax.annotation.PostConstruct; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; /** * @author zengqiao * @date 20/8/24 */ +@Service public class ThreadPool { - private static final ExecutorService COLLECT_METRICS_THREAD_POOL = new ThreadPoolExecutor( - 256, - 256, - 120L, - TimeUnit.SECONDS, - new LinkedBlockingQueue(), - new DefaultThreadFactory("Collect-Metrics-Thread") - ); - private static final ExecutorService API_CALL_THREAD_POOL = new ThreadPoolExecutor( - 16, - 16, - 120L, - TimeUnit.SECONDS, - new LinkedBlockingQueue(), - new DefaultThreadFactory("Api-Call-Thread") - ); + @Value(value = "${thread-pool.collect-metrics.thread-num:256}") + private Integer collectMetricsThreadNum; - public static void submitCollectMetricsTask(Runnable collectMetricsTask) { - COLLECT_METRICS_THREAD_POOL.submit(collectMetricsTask); + @Value(value = "${thread-pool.collect-metrics.queue-size:10000}") + private Integer collectMetricsQueueSize; + + @Value(value = "${thread-pool.api-call.thread-num:16}") + private Integer apiCallThreadNum; + + @Value(value = "${thread-pool.api-call.queue-size:10000}") + private Integer apiCallQueueSize; + + private ThreadPoolExecutor collectMetricsThreadPool; + + private ThreadPoolExecutor apiCallThreadPool; + + @PostConstruct + public void init() { + collectMetricsThreadPool = new ThreadPoolExecutor( + collectMetricsThreadNum, + collectMetricsThreadNum, + 120L, + TimeUnit.SECONDS, + new LinkedBlockingQueue<>(collectMetricsQueueSize), + new DefaultThreadFactory("TaskThreadPool") + ); + + apiCallThreadPool = new ThreadPoolExecutor( + apiCallThreadNum, + apiCallThreadNum, + 120L, + TimeUnit.SECONDS, + new LinkedBlockingQueue<>(apiCallQueueSize), + new DefaultThreadFactory("ApiThreadPool") + ); } - public static void submitApiCallTask(Runnable apiCallTask) { - API_CALL_THREAD_POOL.submit(apiCallTask); + public void submitCollectMetricsTask(Long clusterId, Runnable collectMetricsTask) { + collectMetricsThreadPool.submit(collectMetricsTask); + } + + public void submitApiCallTask(Long clusterId, Runnable apiCallTask) { + apiCallThreadPool.submit(apiCallTask); } } diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/TopicExpiredService.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/TopicExpiredService.java index 273b62c6..9ab963aa 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/TopicExpiredService.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/TopicExpiredService.java @@ -13,4 +13,12 @@ public interface TopicExpiredService { List getExpiredTopicDataList(String username); ResultStatus retainExpiredTopic(Long physicalClusterId, String topicName, Integer retainDays); + + /** + * 通过topictopic名称删除 + * @param clusterId 集群id + * @param topicName topic名称 + * @return int + */ + int deleteByTopicName(Long clusterId, String topicName); } \ No newline at end of file diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/gateway/impl/GatewayConfigServiceImpl.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/gateway/impl/GatewayConfigServiceImpl.java index 0ceb3b30..754a81a7 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/gateway/impl/GatewayConfigServiceImpl.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/gateway/impl/GatewayConfigServiceImpl.java @@ -185,7 +185,8 @@ public class GatewayConfigServiceImpl implements GatewayConfigService { List gatewayConfigDOList = gatewayConfigDao.getByConfigType(gatewayConfigDO.getType()); Long version = 1L; for (GatewayConfigDO elem: gatewayConfigDOList) { - if (elem.getVersion() > version) { + if (elem.getVersion() >= version) { + // 大于等于的情况下,都需要+1 version = elem.getVersion() + 1L; } } @@ -204,6 +205,7 @@ public class GatewayConfigServiceImpl implements GatewayConfigService { @Override public Result deleteById(Long id) { try { + // TODO 删除的时候,不能直接删,也需要变更一下version if (gatewayConfigDao.deleteById(id) > 0) { return Result.buildSuc(); } @@ -232,7 +234,8 @@ public class GatewayConfigServiceImpl implements GatewayConfigService { List gatewayConfigDOList = gatewayConfigDao.getByConfigType(newGatewayConfigDO.getType()); Long version = 1L; for (GatewayConfigDO elem: gatewayConfigDOList) { - if (elem.getVersion() > version) { + if (elem.getVersion() >= version) { + // 大于等于的情况下,都需要+1 version = elem.getVersion() + 1L; } } diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/AdminServiceImpl.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/AdminServiceImpl.java index 8a0028c7..594f1aa1 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/AdminServiceImpl.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/AdminServiceImpl.java @@ -43,6 +43,9 @@ public class AdminServiceImpl implements AdminService { @Autowired private TopicManagerService topicManagerService; + @Autowired + private TopicExpiredService topicExpiredService; + @Autowired private TopicService topicService; @@ -143,6 +146,7 @@ public class AdminServiceImpl implements AdminService { // 3. 数据库中删除topic topicManagerService.deleteByTopicName(clusterDO.getId(), topicName); + topicExpiredService.deleteByTopicName(clusterDO.getId(), topicName); // 4. 数据库中删除authority authorityService.deleteAuthorityByTopic(clusterDO.getId(), topicName); diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/BrokerServiceImpl.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/BrokerServiceImpl.java index 24eea55f..ac3e0593 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/BrokerServiceImpl.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/BrokerServiceImpl.java @@ -61,6 +61,9 @@ public class BrokerServiceImpl implements BrokerService { @Autowired private PhysicalClusterMetadataManager physicalClusterMetadataManager; + @Autowired + private ThreadPool threadPool; + @Override public ClusterBrokerStatus getClusterBrokerStatus(Long clusterId) { // 副本同步状态 @@ -201,7 +204,7 @@ public class BrokerServiceImpl implements BrokerService { return getBrokerMetricsFromJmx(clusterId, brokerId, metricsCode); } }); - ThreadPool.submitApiCallTask(taskList[i]); + threadPool.submitApiCallTask(clusterId, taskList[i]); } List metricsList = new ArrayList<>(brokerIdSet.size()); for (int i = 0; i < brokerIdList.size(); i++) { diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/ClusterServiceImpl.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/ClusterServiceImpl.java index ea9d22da..153576c4 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/ClusterServiceImpl.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/ClusterServiceImpl.java @@ -19,6 +19,8 @@ import com.xiaojukeji.kafka.manager.service.cache.LogicalClusterMetadataManager; import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager; import com.xiaojukeji.kafka.manager.service.service.*; import com.xiaojukeji.kafka.manager.service.utils.ConfigUtils; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; import org.apache.zookeeper.ZooKeeper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -210,7 +212,7 @@ public class ClusterServiceImpl implements ClusterService { ZooKeeper zk = null; try { - zk = new ZooKeeper(zookeeper, 1000, null); + zk = new ZooKeeper(zookeeper, 1000, watchedEvent -> LOGGER.info(" receive event : " + watchedEvent.getType().name())); for (int i = 0; i < 15; ++i) { if (zk.getState().isConnected()) { // 只有状态是connected的时候,才表示地址是合法的 diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/ExpertServiceImpl.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/ExpertServiceImpl.java index d0b34e3d..94f00d2c 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/ExpertServiceImpl.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/ExpertServiceImpl.java @@ -22,6 +22,7 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import java.util.*; +import java.util.regex.Pattern; /** * @author zengqiao @@ -240,9 +241,11 @@ public class ExpertServiceImpl implements ExpertService { return new ArrayList<>(); } + //获取满足条件的过期Topic List filteredExpiredTopicList = new ArrayList<>(); for (TopicExpiredDO elem: expiredTopicList) { - if (config.getIgnoreClusterIdList().contains(elem.getClusterId())) { + //判定是否为忽略Cluster或者判定是否为忽略Topic名,使用正则来过滤理论上不属于过期的Topic + if (config.getIgnoreClusterIdList().contains(elem.getClusterId()) || Pattern.matches(config.getFilterRegex(), elem.getTopicName())) { continue; } filteredExpiredTopicList.add(elem); diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/JmxServiceImpl.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/JmxServiceImpl.java index 611dc203..d0f0c514 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/JmxServiceImpl.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/JmxServiceImpl.java @@ -39,6 +39,9 @@ public class JmxServiceImpl implements JmxService { @Autowired private PhysicalClusterMetadataManager physicalClusterMetadataManager; + @Autowired + private ThreadPool threadPool; + @Override public BrokerMetrics getBrokerMetrics(Long clusterId, Integer brokerId, Integer metricsCode) { if (clusterId == null || brokerId == null || metricsCode == null) { @@ -98,7 +101,7 @@ public class JmxServiceImpl implements JmxService { ); } }); - ThreadPool.submitCollectMetricsTask(taskList[i]); + threadPool.submitCollectMetricsTask(clusterId, taskList[i]); } List metricsList = new ArrayList<>(); @@ -303,7 +306,7 @@ public class JmxServiceImpl implements JmxService { return metricsList; } }); - ThreadPool.submitCollectMetricsTask(taskList[i]); + threadPool.submitCollectMetricsTask(clusterId, taskList[i]); } Map metricsMap = new HashMap<>(); diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/RegionServiceImpl.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/RegionServiceImpl.java index 8f957b02..3b2df843 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/RegionServiceImpl.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/RegionServiceImpl.java @@ -2,6 +2,8 @@ package com.xiaojukeji.kafka.manager.service.service.impl; import com.xiaojukeji.kafka.manager.common.entity.ResultStatus; import com.xiaojukeji.kafka.manager.common.entity.pojo.RegionDO; +import com.xiaojukeji.kafka.manager.common.events.RegionCreatedEvent; +import com.xiaojukeji.kafka.manager.common.utils.SpringTool; import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata; import com.xiaojukeji.kafka.manager.dao.RegionDao; @@ -59,6 +61,8 @@ public class RegionServiceImpl implements RegionService { return ResultStatus.BROKER_NOT_EXIST; } if (regionDao.insert(regionDO) > 0) { + // 发布region创建事件 + SpringTool.publish(new RegionCreatedEvent(this, regionDO)); return ResultStatus.SUCCESS; } } catch (DuplicateKeyException e) { diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/TopicExpiredServiceImpl.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/TopicExpiredServiceImpl.java index c51e1dcb..d310af1a 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/TopicExpiredServiceImpl.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/TopicExpiredServiceImpl.java @@ -75,4 +75,14 @@ public class TopicExpiredServiceImpl implements TopicExpiredService { } return ResultStatus.MYSQL_ERROR; } + + @Override + public int deleteByTopicName(Long clusterId, String topicName) { + try { + return topicExpiredDao.deleteByName(clusterId, topicName); + } catch (Exception e) { + LOGGER.error("delete topic failed, clusterId:{} topicName:{}", clusterId, topicName, e); + } + return 0; + } } \ No newline at end of file diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/TopicManagerServiceImpl.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/TopicManagerServiceImpl.java index 4a8f501f..a25115ef 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/TopicManagerServiceImpl.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/TopicManagerServiceImpl.java @@ -210,7 +210,7 @@ public class TopicManagerServiceImpl implements TopicManagerService { } } - // 增加流量信息 + // 增加流量和描述信息 Map> metricMap = KafkaMetricsCache.getAllTopicMetricsFromCache(); for (MineTopicSummary mineTopicSummary : summaryList) { TopicMetrics topicMetrics = getTopicMetricsFromCacheOrJmx( @@ -219,6 +219,10 @@ public class TopicManagerServiceImpl implements TopicManagerService { metricMap); mineTopicSummary.setBytesIn(topicMetrics.getSpecifiedMetrics("BytesInPerSecOneMinuteRate")); mineTopicSummary.setBytesOut(topicMetrics.getSpecifiedMetrics("BytesOutPerSecOneMinuteRate")); + + // 增加topic描述信息 + TopicDO topicDO = topicDao.getByTopicName(mineTopicSummary.getPhysicalClusterId(), mineTopicSummary.getTopicName()); + mineTopicSummary.setDescription(topicDO.getDescription()); } return summaryList; } diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/TopicServiceImpl.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/TopicServiceImpl.java index 154faf77..aa4fe3fb 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/TopicServiceImpl.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/TopicServiceImpl.java @@ -87,6 +87,9 @@ public class TopicServiceImpl implements TopicService { @Autowired private AbstractHealthScoreStrategy healthScoreStrategy; + @Autowired + private KafkaClientPool kafkaClientPool; + @Override public List getTopicMetricsFromDB(Long clusterId, String topicName, Date startTime, Date endTime) { try { @@ -340,7 +343,7 @@ public class TopicServiceImpl implements TopicService { Map topicPartitionLongMap = new HashMap<>(); KafkaConsumer kafkaConsumer = null; try { - kafkaConsumer = KafkaClientPool.borrowKafkaConsumerClient(clusterDO); + kafkaConsumer = kafkaClientPool.borrowKafkaConsumerClient(clusterDO); if ((offsetPosEnum.getCode() & OffsetPosEnum.END.getCode()) > 0) { topicPartitionLongMap = kafkaConsumer.endOffsets(topicPartitionList); } else if ((offsetPosEnum.getCode() & OffsetPosEnum.BEGINNING.getCode()) > 0) { @@ -541,7 +544,7 @@ public class TopicServiceImpl implements TopicService { List partitionOffsetDTOList = new ArrayList<>(); try { - kafkaConsumer = KafkaClientPool.borrowKafkaConsumerClient(clusterDO); + kafkaConsumer = kafkaClientPool.borrowKafkaConsumerClient(clusterDO); Map offsetAndTimestampMap = kafkaConsumer.offsetsForTimes(timestampsToSearch); if (offsetAndTimestampMap == null) { return new ArrayList<>(); diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/strategy/healthscore/DidiHealthScoreStrategy.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/strategy/healthscore/DidiHealthScoreStrategy.java index d75dec5a..51295644 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/strategy/healthscore/DidiHealthScoreStrategy.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/strategy/healthscore/DidiHealthScoreStrategy.java @@ -45,6 +45,9 @@ public class DidiHealthScoreStrategy extends AbstractHealthScoreStrategy { @Autowired private JmxService jmxService; + @Autowired + private ThreadPool threadPool; + @Override public Integer calBrokerHealthScore(Long clusterId, Integer brokerId) { BrokerMetadata brokerMetadata = PhysicalClusterMetadataManager.getBrokerMetadata(clusterId, brokerId); @@ -125,7 +128,7 @@ public class DidiHealthScoreStrategy extends AbstractHealthScoreStrategy { return calBrokerHealthScore(clusterId, brokerId); } }); - ThreadPool.submitApiCallTask(taskList[i]); + threadPool.submitApiCallTask(clusterId, taskList[i]); } Integer topicHealthScore = HEALTH_SCORE_HEALTHY; diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/utils/ConfigUtils.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/utils/ConfigUtils.java index 5df85b5e..40b73868 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/utils/ConfigUtils.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/utils/ConfigUtils.java @@ -1,5 +1,6 @@ package com.xiaojukeji.kafka.manager.service.utils; +import lombok.Data; import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Service; @@ -8,38 +9,18 @@ import org.springframework.stereotype.Service; * @author zengqiao * @date 20/4/26 */ +@Data @Service("configUtils") public class ConfigUtils { - @Value(value = "${custom.idc}") + private ConfigUtils() { + } + + @Value(value = "${custom.idc:cn}") private String idc; - @Value(value = "${spring.profiles.active}") + @Value(value = "${spring.profiles.active:dev}") private String kafkaManagerEnv; - @Value(value = "${custom.store-metrics-task.save-days}") - private Long maxMetricsSaveDays; - - public String getIdc() { - return idc; - } - - public void setIdc(String idc) { - this.idc = idc; - } - - public String getKafkaManagerEnv() { - return kafkaManagerEnv; - } - - public void setKafkaManagerEnv(String kafkaManagerEnv) { - this.kafkaManagerEnv = kafkaManagerEnv; - } - - public Long getMaxMetricsSaveDays() { - return maxMetricsSaveDays; - } - - public void setMaxMetricsSaveDays(Long maxMetricsSaveDays) { - this.maxMetricsSaveDays = maxMetricsSaveDays; - } + @Value(value = "${spring.application.version:unknown}") + private String applicationVersion; } diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/BrokerStateListener.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/BrokerStateListener.java index a94ec9de..f5cdefe8 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/BrokerStateListener.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/BrokerStateListener.java @@ -74,15 +74,10 @@ public class BrokerStateListener implements StateChangeListener { BrokerMetadata brokerMetadata = null; try { brokerMetadata = zkConfig.get(ZkPathUtil.getBrokerIdNodePath(brokerId), BrokerMetadata.class); - if (!brokerMetadata.getEndpoints().isEmpty()) { - String endpoint = brokerMetadata.getEndpoints().get(0); - int idx = endpoint.indexOf("://"); - endpoint = endpoint.substring(idx + "://".length()); - idx = endpoint.indexOf(":"); - brokerMetadata.setHost(endpoint.substring(0, idx)); - brokerMetadata.setPort(Integer.parseInt(endpoint.substring(idx + 1))); - } + // 解析并更新本次存储的broker元信息 + BrokerMetadata.parseAndUpdateBrokerMetadata(brokerMetadata); + brokerMetadata.setClusterId(clusterId); brokerMetadata.setBrokerId(brokerId); PhysicalClusterMetadataManager.putBrokerMetadata(clusterId, brokerId, brokerMetadata, jmxConfig); diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/ControllerStateListener.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/ControllerStateListener.java index 3f43f57b..c417df66 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/ControllerStateListener.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/ControllerStateListener.java @@ -19,13 +19,13 @@ import org.springframework.dao.DuplicateKeyException; * @date 20/5/14 */ public class ControllerStateListener implements StateChangeListener { - private final static Logger LOGGER = LoggerFactory.getLogger(ControllerStateListener.class); + private static final Logger LOGGER = LoggerFactory.getLogger(ControllerStateListener.class); - private Long clusterId; + private final Long clusterId; - private ZkConfigImpl zkConfig; + private final ZkConfigImpl zkConfig; - private ControllerDao controllerDao; + private final ControllerDao controllerDao; public ControllerStateListener(Long clusterId, ZkConfigImpl zkConfig, ControllerDao controllerDao) { this.clusterId = clusterId; @@ -35,8 +35,11 @@ public class ControllerStateListener implements StateChangeListener { @Override public void init() { + if (!checkNodeExist()) { + LOGGER.warn("kafka-controller data not exist, clusterId:{}.", clusterId); + return; + } processControllerChange(); - return; } @Override @@ -49,12 +52,21 @@ public class ControllerStateListener implements StateChangeListener { break; } } catch (Exception e) { - LOGGER.error("process controller state change failed, clusterId:{} state:{} path:{}.", - clusterId, state, path, e); + LOGGER.error("process controller state change failed, clusterId:{} state:{} path:{}.", clusterId, state, path, e); } } - private void processControllerChange(){ + private boolean checkNodeExist() { + try { + return zkConfig.checkPathExists(ZkPathUtil.CONTROLLER_ROOT_NODE); + } catch (Exception e) { + LOGGER.error("init kafka-controller data failed, clusterId:{}.", clusterId, e); + } + + return false; + } + + private void processControllerChange() { LOGGER.warn("init controllerData or controller change, clusterId:{}.", clusterId); ControllerData controllerData = null; try { diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/TopicStateListener.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/TopicStateListener.java index 4314a101..6f3d33b3 100644 --- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/TopicStateListener.java +++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/TopicStateListener.java @@ -10,6 +10,7 @@ import com.xiaojukeji.kafka.manager.service.cache.ThreadPool; import org.apache.zookeeper.data.Stat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; import java.util.HashSet; import java.util.List; @@ -28,9 +29,12 @@ public class TopicStateListener implements StateChangeListener { private ZkConfigImpl zkConfig; - public TopicStateListener(Long clusterId, ZkConfigImpl zkConfig) { + private ThreadPool threadPool; + + public TopicStateListener(Long clusterId, ZkConfigImpl zkConfig, ThreadPool threadPool) { this.clusterId = clusterId; this.zkConfig = zkConfig; + this.threadPool = threadPool; } @Override @@ -47,7 +51,7 @@ public class TopicStateListener implements StateChangeListener { return null; } }); - ThreadPool.submitCollectMetricsTask(taskList[i]); + threadPool.submitCollectMetricsTask(clusterId, taskList[i]); } } catch (Exception e) { LOGGER.error("init topics metadata failed, clusterId:{}.", clusterId, e); diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/BrokerMetricsDao.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/BrokerMetricsDao.java index 75399538..9f1d36eb 100644 --- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/BrokerMetricsDao.java +++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/BrokerMetricsDao.java @@ -20,5 +20,5 @@ public interface BrokerMetricsDao { */ List getBrokerMetrics(Long clusterId, Integer brokerId, Date startTime, Date endTime); - int deleteBeforeTime(Date endTime); + int deleteBeforeTime(Date endTime, Integer limitSize); } diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/ClusterMetricsDao.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/ClusterMetricsDao.java index d0731508..0e2e68a7 100644 --- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/ClusterMetricsDao.java +++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/ClusterMetricsDao.java @@ -10,5 +10,5 @@ public interface ClusterMetricsDao { List getClusterMetrics(long clusterId, Date startTime, Date endTime); - int deleteBeforeTime(Date endTime); + int deleteBeforeTime(Date endTime, Integer limitSize); } diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicAppMetricsDao.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicAppMetricsDao.java index 9d02c5d5..e0c3f84e 100644 --- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicAppMetricsDao.java +++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicAppMetricsDao.java @@ -30,5 +30,5 @@ public interface TopicAppMetricsDao { * @param endTime * @return */ - int deleteBeforeTime(Date endTime); + int deleteBeforeTime(Date endTime, Integer limitSize); } diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicExpiredDao.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicExpiredDao.java index 18698941..ea189eb4 100644 --- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicExpiredDao.java +++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicExpiredDao.java @@ -17,4 +17,6 @@ public interface TopicExpiredDao { int replace(TopicExpiredDO expiredDO); TopicExpiredDO getByTopic(Long clusterId, String topicName); + + int deleteByName(Long clusterId, String topicName); } \ No newline at end of file diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicMetricsDao.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicMetricsDao.java index 58029f36..5d7af6e0 100644 --- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicMetricsDao.java +++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicMetricsDao.java @@ -22,5 +22,5 @@ public interface TopicMetricsDao { List getLatestTopicMetrics(Long clusterId, Date afterTime); - int deleteBeforeTime(Date endTime); + int deleteBeforeTime(Date endTime, Integer limitSize); } diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicRequestMetricsDao.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicRequestMetricsDao.java index e7fd5169..5e6b237d 100644 --- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicRequestMetricsDao.java +++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicRequestMetricsDao.java @@ -33,9 +33,7 @@ public interface TopicRequestMetricsDao { * @param endTime * @return */ - int deleteBeforeTime(Date endTime); - - int deleteBeforeId(Long id); + int deleteBeforeTime(Date endTime, Integer limitSize); List getById(Long startId, Long endId); } \ No newline at end of file diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicThrottledMetricsDao.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicThrottledMetricsDao.java index 1010cc17..cc975c52 100644 --- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicThrottledMetricsDao.java +++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicThrottledMetricsDao.java @@ -32,5 +32,5 @@ public interface TopicThrottledMetricsDao { List getLatestTopicThrottledMetrics(Long clusterId, Date afterTime); - int deleteBeforeTime(Date endTime); + int deleteBeforeTime(Date endTime, Integer limitSize); } diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/BrokerMetricsImpl.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/BrokerMetricsImpl.java index 5a06e5ce..bba58185 100644 --- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/BrokerMetricsImpl.java +++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/BrokerMetricsImpl.java @@ -37,7 +37,10 @@ public class BrokerMetricsImpl implements BrokerMetricsDao { } @Override - public int deleteBeforeTime(Date endTime) { - return sqlSession.delete("BrokerMetricsDao.deleteBeforeTime", endTime); + public int deleteBeforeTime(Date endTime, Integer limitSize) { + Map params = new HashMap<>(2); + params.put("endTime", endTime); + params.put("limitSize", limitSize); + return sqlSession.delete("BrokerMetricsDao.deleteBeforeTime", params); } } diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/ClusterMetricsDaoImpl.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/ClusterMetricsDaoImpl.java index b05d3c0f..08948871 100644 --- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/ClusterMetricsDaoImpl.java +++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/ClusterMetricsDaoImpl.java @@ -27,7 +27,7 @@ public class ClusterMetricsDaoImpl implements ClusterMetricsDao { @Override public List getClusterMetrics(long clusterId, Date startTime, Date endTime) { - Map map = new HashMap(3); + Map map = new HashMap<>(3); map.put("clusterId", clusterId); map.put("startTime", startTime); map.put("endTime", endTime); @@ -35,7 +35,10 @@ public class ClusterMetricsDaoImpl implements ClusterMetricsDao { } @Override - public int deleteBeforeTime(Date endTime) { - return sqlSession.delete("ClusterMetricsDao.deleteBeforeTime", endTime); + public int deleteBeforeTime(Date endTime, Integer limitSize) { + Map params = new HashMap<>(2); + params.put("endTime", endTime); + params.put("limitSize", limitSize); + return sqlSession.delete("ClusterMetricsDao.deleteBeforeTime", params); } } diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicAppMetricsDaoImpl.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicAppMetricsDaoImpl.java index fe55a1ab..90ce7e3e 100644 --- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicAppMetricsDaoImpl.java +++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicAppMetricsDaoImpl.java @@ -46,7 +46,10 @@ public class TopicAppMetricsDaoImpl implements TopicAppMetricsDao { } @Override - public int deleteBeforeTime(Date endTime) { - return sqlSession.delete("TopicAppMetricsDao.deleteBeforeTime", endTime); + public int deleteBeforeTime(Date endTime, Integer limitSize) { + Map params = new HashMap<>(2); + params.put("endTime", endTime); + params.put("limitSize", limitSize); + return sqlSession.delete("TopicAppMetricsDao.deleteBeforeTime", params); } } diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicExpiredDaoImpl.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicExpiredDaoImpl.java index 51853db7..936d4931 100644 --- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicExpiredDaoImpl.java +++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicExpiredDaoImpl.java @@ -50,4 +50,12 @@ public class TopicExpiredDaoImpl implements TopicExpiredDao { params.put("topicName", topicName); return sqlSession.selectOne("TopicExpiredDao.getByTopic", params); } + + @Override + public int deleteByName(Long clusterId, String topicName) { + Map params = new HashMap<>(2); + params.put("clusterId", clusterId); + params.put("topicName", topicName); + return sqlSession.delete("TopicExpiredDao.deleteByName", params); + } } \ No newline at end of file diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicMetricsDaoImpl.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicMetricsDaoImpl.java index 7397a28c..a7eae32c 100644 --- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicMetricsDaoImpl.java +++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicMetricsDaoImpl.java @@ -60,7 +60,10 @@ public class TopicMetricsDaoImpl implements TopicMetricsDao { } @Override - public int deleteBeforeTime(Date endTime) { - return sqlSession.delete("TopicMetricsDao.deleteBeforeTime", endTime); + public int deleteBeforeTime(Date endTime, Integer limitSize) { + Map params = new HashMap<>(2); + params.put("endTime", endTime); + params.put("limitSize", limitSize); + return sqlSession.delete("TopicMetricsDao.deleteBeforeTime", params); } } diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicRequestMetricsDaoImpl.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicRequestMetricsDaoImpl.java index bfaa552c..e59324f5 100644 --- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicRequestMetricsDaoImpl.java +++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicRequestMetricsDaoImpl.java @@ -45,13 +45,11 @@ public class TopicRequestMetricsDaoImpl implements TopicRequestMetricsDao { } @Override - public int deleteBeforeTime(Date endTime) { - return sqlSession.delete("TopicRequestMetricsDao.deleteBeforeTime", endTime); - } - - @Override - public int deleteBeforeId(Long id) { - return sqlSession.delete("TopicRequestMetricsDao.deleteBeforeId", id); + public int deleteBeforeTime(Date endTime, Integer limitSize) { + Map params = new HashMap<>(); + params.put("endTime", endTime); + params.put("limitSize", limitSize); + return sqlSession.delete("TopicRequestMetricsDao.deleteBeforeTime", params); } @Override diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicThrottledMetricsDaoImpl.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicThrottledMetricsDaoImpl.java index 784bc242..b1f64d43 100644 --- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicThrottledMetricsDaoImpl.java +++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicThrottledMetricsDaoImpl.java @@ -75,7 +75,10 @@ public class TopicThrottledMetricsDaoImpl implements TopicThrottledMetricsDao { } @Override - public int deleteBeforeTime(Date endTime) { - return sqlSession.delete("TopicThrottledMetricsDao.deleteBeforeTime", endTime); + public int deleteBeforeTime(Date endTime, Integer limitSize) { + Map params = new HashMap<>(2); + params.put("endTime", endTime); + params.put("limitSize", limitSize); + return sqlSession.delete("TopicThrottledMetricsDao.deleteBeforeTime", params); } } diff --git a/kafka-manager-dao/src/main/resources/mapper/BrokerMetricsDao.xml b/kafka-manager-dao/src/main/resources/mapper/BrokerMetricsDao.xml index 49746df7..b5115e10 100644 --- a/kafka-manager-dao/src/main/resources/mapper/BrokerMetricsDao.xml +++ b/kafka-manager-dao/src/main/resources/mapper/BrokerMetricsDao.xml @@ -29,9 +29,9 @@ ]]> - + \ No newline at end of file diff --git a/kafka-manager-dao/src/main/resources/mapper/ClusterMetricsDao.xml b/kafka-manager-dao/src/main/resources/mapper/ClusterMetricsDao.xml index 11614d2d..8aca62ee 100644 --- a/kafka-manager-dao/src/main/resources/mapper/ClusterMetricsDao.xml +++ b/kafka-manager-dao/src/main/resources/mapper/ClusterMetricsDao.xml @@ -27,9 +27,9 @@ - + \ No newline at end of file diff --git a/kafka-manager-dao/src/main/resources/mapper/TopicAppMetricsDao.xml b/kafka-manager-dao/src/main/resources/mapper/TopicAppMetricsDao.xml index 1c64c0ce..fff5037a 100644 --- a/kafka-manager-dao/src/main/resources/mapper/TopicAppMetricsDao.xml +++ b/kafka-manager-dao/src/main/resources/mapper/TopicAppMetricsDao.xml @@ -30,9 +30,9 @@ ]]> - + \ No newline at end of file diff --git a/kafka-manager-dao/src/main/resources/mapper/TopicExpiredDao.xml b/kafka-manager-dao/src/main/resources/mapper/TopicExpiredDao.xml index 39ebf8ca..1da6753a 100644 --- a/kafka-manager-dao/src/main/resources/mapper/TopicExpiredDao.xml +++ b/kafka-manager-dao/src/main/resources/mapper/TopicExpiredDao.xml @@ -36,4 +36,8 @@ + + + DELETE FROM topic_expired WHERE cluster_id=#{clusterId} AND topic_name=#{topicName} + diff --git a/kafka-manager-dao/src/main/resources/mapper/TopicMetricsDao.xml b/kafka-manager-dao/src/main/resources/mapper/TopicMetricsDao.xml index baa6f4b0..249863f4 100644 --- a/kafka-manager-dao/src/main/resources/mapper/TopicMetricsDao.xml +++ b/kafka-manager-dao/src/main/resources/mapper/TopicMetricsDao.xml @@ -25,6 +25,7 @@ WHERE cluster_id = #{clusterId} AND topic_name = #{topicName} AND gmt_create BETWEEN #{startTime} AND #{endTime} + ORDER BY gmt_create ]]> @@ -32,12 +33,13 @@ - + \ No newline at end of file diff --git a/kafka-manager-dao/src/main/resources/mapper/TopicRequestMetricsDao.xml b/kafka-manager-dao/src/main/resources/mapper/TopicRequestMetricsDao.xml index b9aaa35b..7ad5e679 100644 --- a/kafka-manager-dao/src/main/resources/mapper/TopicRequestMetricsDao.xml +++ b/kafka-manager-dao/src/main/resources/mapper/TopicRequestMetricsDao.xml @@ -34,15 +34,9 @@ ORDER BY gmt_create ASC - + - - - - diff --git a/kafka-manager-dao/src/main/resources/mapper/TopicThrottledMetricsDao.xml b/kafka-manager-dao/src/main/resources/mapper/TopicThrottledMetricsDao.xml index c5b6474d..e163d30f 100644 --- a/kafka-manager-dao/src/main/resources/mapper/TopicThrottledMetricsDao.xml +++ b/kafka-manager-dao/src/main/resources/mapper/TopicThrottledMetricsDao.xml @@ -54,9 +54,9 @@ AND gmt_create > #{afterTime} - + \ No newline at end of file diff --git a/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/component/sso/BaseSessionSignOn.java b/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/component/sso/BaseSessionSignOn.java index 3ccbd17c..3683b193 100644 --- a/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/component/sso/BaseSessionSignOn.java +++ b/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/component/sso/BaseSessionSignOn.java @@ -32,16 +32,16 @@ public class BaseSessionSignOn extends AbstractSingleSignOn { private LdapAuthentication ldapAuthentication; //是否开启ldap验证 - @Value(value = "${account.ldap.enabled:}") + @Value(value = "${account.ldap.enabled:false}") private Boolean accountLdapEnabled; //ldap自动注册的默认角色。请注意:它通常来说都是低权限角色 - @Value(value = "${account.ldap.auth-user-registration-role:}") + @Value(value = "${account.ldap.auth-user-registration-role:normal}") private String authUserRegistrationRole; //ldap自动注册是否开启 - @Value(value = "${account.ldap.auth-user-registration:}") - private boolean authUserRegistration; + @Value(value = "${account.ldap.auth-user-registration:false}") + private Boolean authUserRegistration; @Override public Result loginAndGetLdap(HttpServletRequest request, HttpServletResponse response, LoginDTO dto) { diff --git a/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/impl/LoginServiceImpl.java b/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/impl/LoginServiceImpl.java index 8f079fde..f0299d87 100644 --- a/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/impl/LoginServiceImpl.java +++ b/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/impl/LoginServiceImpl.java @@ -14,6 +14,7 @@ import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Service; import javax.servlet.http.Cookie; @@ -27,7 +28,13 @@ import javax.servlet.http.HttpSession; */ @Service("loginService") public class LoginServiceImpl implements LoginService { - private final static Logger LOGGER = LoggerFactory.getLogger(LoginServiceImpl.class); + private static final Logger LOGGER = LoggerFactory.getLogger(LoginServiceImpl.class); + + @Value(value = "${account.jump-login.gateway-api:false}") + private Boolean jumpLoginGatewayApi; + + @Value(value = "${account.jump-login.third-part-api:false}") + private Boolean jumpLoginThirdPartApi; @Autowired private AccountService accountService; @@ -75,12 +82,10 @@ public class LoginServiceImpl implements LoginService { return false; } - if (classRequestMappingValue.equals(ApiPrefix.API_V1_SSO_PREFIX) - || classRequestMappingValue.equals(ApiPrefix.API_V1_THIRD_PART_PREFIX) - || classRequestMappingValue.equals(ApiPrefix.API_V1_THIRD_PART_OP_PREFIX) - || classRequestMappingValue.equals(ApiPrefix.API_V1_THIRD_PART_NORMAL_PREFIX) - || classRequestMappingValue.equals(ApiPrefix.GATEWAY_API_V1_PREFIX)) { - // 白名单接口直接true + if (classRequestMappingValue.equals(ApiPrefix.API_V1_SSO_PREFIX) || + (jumpLoginGatewayApi != null && jumpLoginGatewayApi && classRequestMappingValue.equals(ApiPrefix.GATEWAY_API_V1_PREFIX)) || + (jumpLoginThirdPartApi != null && jumpLoginThirdPartApi && classRequestMappingValue.equals(ApiPrefix.API_V1_THIRD_PART_PREFIX))) { + // 登录接口 or 允许跳过且是跳过类型的接口,则直接跳过登录 return true; } diff --git a/kafka-manager-extends/kafka-manager-kcm/pom.xml b/kafka-manager-extends/kafka-manager-kcm/pom.xml index 7ffd00e3..12a942d5 100644 --- a/kafka-manager-extends/kafka-manager-kcm/pom.xml +++ b/kafka-manager-extends/kafka-manager-kcm/pom.xml @@ -28,7 +28,6 @@ 1.8 UTF-8 UTF-8 - 5.1.3.RELEASE @@ -56,17 +55,14 @@ org.springframework spring-beans - ${spring-version} org.springframework spring-context - ${spring-version} org.springframework spring-test - ${spring-version} diff --git a/kafka-manager-extends/kafka-manager-kcm/src/main/java/com/xiaojukeji/kafka/manager/kcm/component/agent/n9e/N9e.java b/kafka-manager-extends/kafka-manager-kcm/src/main/java/com/xiaojukeji/kafka/manager/kcm/component/agent/n9e/N9e.java index 6e3fa677..d0a2503b 100644 --- a/kafka-manager-extends/kafka-manager-kcm/src/main/java/com/xiaojukeji/kafka/manager/kcm/component/agent/n9e/N9e.java +++ b/kafka-manager-extends/kafka-manager-kcm/src/main/java/com/xiaojukeji/kafka/manager/kcm/component/agent/n9e/N9e.java @@ -37,21 +37,24 @@ import java.util.Map; public class N9e extends AbstractAgent { private static final Logger LOGGER = LoggerFactory.getLogger(N9e.class); - @Value("${kcm.n9e.base-url}") + @Value("${kcm.n9e.base-url:}") private String baseUrl; - @Value("${kcm.n9e.user-token}") + @Value("${kcm.n9e.user-token:12345678}") private String userToken; - @Value("${kcm.n9e.account}") + @Value("${kcm.n9e.account:root}") private String account; - @Value("${kcm.n9e.timeout}") + @Value("${kcm.n9e.timeout:300}") private Integer timeout; - @Value("${kcm.n9e.script-file}") + @Value("${kcm.n9e.script-file:kcm_script.sh}") private String scriptFile; + @Value("${kcm.n9e.logikm-url:}") + private String logiKMUrl; + private String script; private static final String CREATE_TASK_URI = "/api/job-ce/tasks"; @@ -219,7 +222,8 @@ public class N9e extends AbstractAgent { sb.append(creationTaskData.getKafkaPackageUrl()).append(",,"); sb.append(creationTaskData.getServerPropertiesName().replace(KafkaFileEnum.SERVER_CONFIG.getSuffix(), "")).append(",,"); sb.append(creationTaskData.getServerPropertiesMd5()).append(",,"); - sb.append(creationTaskData.getServerPropertiesUrl()); + sb.append(creationTaskData.getServerPropertiesUrl()).append(",,"); + sb.append(this.logiKMUrl); N9eCreationTask n9eCreationTask = new N9eCreationTask(); n9eCreationTask.setTitle(Constant.TASK_TITLE_PREFIX + "-集群ID:" + creationTaskData.getClusterId()); diff --git a/kafka-manager-extends/kafka-manager-kcm/src/main/resources/kcm_script.sh b/kafka-manager-extends/kafka-manager-kcm/src/main/resources/kcm_script.sh index ffd54a20..16ffb80c 100644 --- a/kafka-manager-extends/kafka-manager-kcm/src/main/resources/kcm_script.sh +++ b/kafka-manager-extends/kafka-manager-kcm/src/main/resources/kcm_script.sh @@ -18,12 +18,13 @@ p_kafka_server_properties_name=${7} #server配置名 p_kafka_server_properties_md5=${8} #server配置MD5 p_kafka_server_properties_url=${9} #server配置文件下载地址 +p_kafka_manager_url=${10} #LogiKM地址 + #----------------------------------------配置信息------------------------------------------------------# g_base_dir='/home' g_cluster_task_dir=${g_base_dir}"/kafka_cluster_task/task_${p_task_id}" #部署升级路径 g_rollback_version=${g_cluster_task_dir}"/rollback_version" #回滚版本 g_new_kafka_package_name='' #最终的包名 -g_kafka_manager_addr='' #kafka-manager地址 g_local_ip=`ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:"` g_hostname=${g_local_ip} @@ -47,7 +48,7 @@ function dchat_alarm() { # 检查并初始化环境 function check_and_init_env() { - if [ -z "${p_task_id}" -o -z "${p_cluster_task_type}" -o -z "${p_kafka_package_url}" -o -z "${p_cluster_id}" -o -z "${p_kafka_package_name}" -o -z "${p_kafka_package_md5}" -o -z "${p_kafka_server_properties_name}" -o -z "${p_kafka_server_properties_md5}" ]; then + if [ -z "${p_task_id}" -o -z "${p_cluster_task_type}" -o -z "${p_kafka_package_url}" -o -z "${p_cluster_id}" -o -z "${p_kafka_package_name}" -o -z "${p_kafka_package_md5}" -o -z "${p_kafka_server_properties_name}" -o -z "${p_kafka_server_properties_md5}" -o -z "${p_kafka_manager_url}" ]; then ECHO_LOG "存在为空的参数不合法, 退出集群任务" dchat_alarm "存在为空的参数不合法, 退出集群任务" exit 1 @@ -72,11 +73,11 @@ function check_and_init_env() { # 检查并等待集群所有的副本处于同步的状态 function check_and_wait_broker_stabled() { - under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${g_kafka_manager_addr}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l` + under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${p_kafka_manager_url}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l` while [ "$under_replication_count" -ne 1 ]; do ECHO_LOG "存在${under_replication_count}个副本未同步, sleep 10s" sleep 10 - under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${g_kafka_manager_addr}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l` + under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${p_kafka_manager_url}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l` done ECHO_LOG "集群副本都已经处于同步的状态, 可以进行集群升级" } @@ -324,6 +325,7 @@ ECHO_LOG " p_kafka_package_name=${p_kafka_package_name}" ECHO_LOG " p_kafka_package_md5=${p_kafka_package_md5}" ECHO_LOG " p_kafka_server_properties_name=${p_kafka_server_properties_name}" ECHO_LOG " p_kafka_server_properties_md5=${p_kafka_server_properties_md5}" +ECHO_LOG " p_kafka_manager_url=${p_kafka_manager_url}" @@ -342,7 +344,7 @@ fi ECHO_LOG "停kafka服务" stop_kafka_server -ECHO_LOG "停5秒, 确保" +ECHO_LOG "再停5秒, 确保端口已释放" sleep 5 if [ "${p_cluster_task_type}" == "0" ];then diff --git a/kafka-manager-extends/kafka-manager-monitor/pom.xml b/kafka-manager-extends/kafka-manager-monitor/pom.xml index 0948a190..9f04b7c9 100644 --- a/kafka-manager-extends/kafka-manager-monitor/pom.xml +++ b/kafka-manager-extends/kafka-manager-monitor/pom.xml @@ -25,7 +25,6 @@ 1.8 UTF-8 UTF-8 - 5.1.3.RELEASE @@ -63,12 +62,10 @@ org.springframework spring-beans - ${spring-version} org.springframework spring-context - ${spring-version} \ No newline at end of file diff --git a/kafka-manager-extends/kafka-manager-notify/pom.xml b/kafka-manager-extends/kafka-manager-notify/pom.xml index a2fd2c4b..348164eb 100644 --- a/kafka-manager-extends/kafka-manager-notify/pom.xml +++ b/kafka-manager-extends/kafka-manager-notify/pom.xml @@ -25,7 +25,6 @@ 1.8 UTF-8 UTF-8 - 5.1.3.RELEASE @@ -48,7 +47,6 @@ org.springframework spring-context - ${spring-version} \ No newline at end of file diff --git a/kafka-manager-extends/kafka-manager-openapi/pom.xml b/kafka-manager-extends/kafka-manager-openapi/pom.xml index caaa1242..cc6f3316 100644 --- a/kafka-manager-extends/kafka-manager-openapi/pom.xml +++ b/kafka-manager-extends/kafka-manager-openapi/pom.xml @@ -24,7 +24,6 @@ 1.8 UTF-8 UTF-8 - 5.1.3.RELEASE @@ -46,7 +45,6 @@ org.springframework spring-context - ${spring-version} \ No newline at end of file diff --git a/kafka-manager-extends/kafka-manager-openapi/src/main/java/com/xiaojukeji/kafka/manager/openapi/impl/ThirdPartServiceImpl.java b/kafka-manager-extends/kafka-manager-openapi/src/main/java/com/xiaojukeji/kafka/manager/openapi/impl/ThirdPartServiceImpl.java index 5df7815e..07b0a3e3 100644 --- a/kafka-manager-extends/kafka-manager-openapi/src/main/java/com/xiaojukeji/kafka/manager/openapi/impl/ThirdPartServiceImpl.java +++ b/kafka-manager-extends/kafka-manager-openapi/src/main/java/com/xiaojukeji/kafka/manager/openapi/impl/ThirdPartServiceImpl.java @@ -42,6 +42,9 @@ public class ThirdPartServiceImpl implements ThirdPartService { @Autowired private ConsumerService consumerService; + @Autowired + private KafkaClientPool kafkaClientPool; + @Override public Result checkConsumeHealth(Long clusterId, String topicName, @@ -109,7 +112,7 @@ public class ThirdPartServiceImpl implements ThirdPartService { Long timestamp) { KafkaConsumer kafkaConsumer = null; try { - kafkaConsumer = KafkaClientPool.borrowKafkaConsumerClient(clusterDO); + kafkaConsumer = kafkaClientPool.borrowKafkaConsumerClient(clusterDO); if (ValidateUtils.isNull(kafkaConsumer)) { return null; } diff --git a/kafka-manager-task/pom.xml b/kafka-manager-task/pom.xml index 8927ef8e..dce8d3c8 100644 --- a/kafka-manager-task/pom.xml +++ b/kafka-manager-task/pom.xml @@ -24,7 +24,6 @@ 1.8 UTF-8 UTF-8 - 5.1.3.RELEASE @@ -52,7 +51,6 @@ org.springframework spring-context - ${spring-version} \ No newline at end of file diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/AbstractScheduledTask.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/AbstractScheduledTask.java index 7eddb926..564094d5 100644 --- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/AbstractScheduledTask.java +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/AbstractScheduledTask.java @@ -1,7 +1,6 @@ package com.xiaojukeji.kafka.manager.task.component; import com.google.common.collect.Lists; -import com.xiaojukeji.kafka.manager.common.constant.LogConstant; import com.xiaojukeji.kafka.manager.common.utils.factory.DefaultThreadFactory; import com.xiaojukeji.kafka.manager.common.utils.JsonUtils; import com.xiaojukeji.kafka.manager.common.utils.NetUtils; @@ -29,7 +28,7 @@ import java.util.concurrent.*; * @date 20/8/10 */ public abstract class AbstractScheduledTask implements SchedulingConfigurer { - private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER); + private static final Logger LOGGER = LoggerFactory.getLogger(AbstractScheduledTask.class); @Autowired private HeartbeatDao heartbeatDao; diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/BaseBizTask.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/BaseBizTask.java index 37a36238..b4cfdd47 100644 --- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/BaseBizTask.java +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/BaseBizTask.java @@ -1,6 +1,5 @@ package com.xiaojukeji.kafka.manager.task.component; -import com.xiaojukeji.kafka.manager.common.constant.LogConstant; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -9,11 +8,11 @@ import org.slf4j.LoggerFactory; * @date 20/8/10 */ public class BaseBizTask implements Runnable { - private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER); + private static final Logger LOGGER = LoggerFactory.getLogger(AbstractScheduledTask.class); - private E task; + private final E task; - private AbstractScheduledTask scheduledTask; + private final AbstractScheduledTask scheduledTask; public BaseBizTask(E task, AbstractScheduledTask scheduledTask) { this.task = task; @@ -30,6 +29,7 @@ public class BaseBizTask implements Runnable { } catch (Throwable t) { LOGGER.error("scheduled task scheduleName:{} execute failed, task:{}", scheduledTask.getScheduledName(), task, t); } + LOGGER.info("scheduled task scheduleName:{} finished, cost-time:{}ms.", scheduledTask.getScheduledName(), System.currentTimeMillis() - startTime); } } \ No newline at end of file diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishBrokerMetrics.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishBrokerMetrics.java new file mode 100644 index 00000000..47aa60d4 --- /dev/null +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishBrokerMetrics.java @@ -0,0 +1,93 @@ +package com.xiaojukeji.kafka.manager.task.dispatch.metrics.collect; + +import com.xiaojukeji.kafka.manager.common.constant.KafkaMetricsCollections; +import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics; +import com.xiaojukeji.kafka.manager.common.events.metrics.BatchBrokerMetricsCollectedEvent; +import com.xiaojukeji.kafka.manager.common.utils.SpringTool; +import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; +import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConstant; +import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO; +import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager; +import com.xiaojukeji.kafka.manager.service.service.ClusterService; +import com.xiaojukeji.kafka.manager.service.service.JmxService; +import com.xiaojukeji.kafka.manager.service.strategy.AbstractHealthScoreStrategy; +import com.xiaojukeji.kafka.manager.task.component.AbstractScheduledTask; +import com.xiaojukeji.kafka.manager.task.component.CustomScheduled; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; + +import java.util.ArrayList; +import java.util.List; + +/** + * Broker指标信息收集 + * @author zengqiao + * @date 20/5/7 + */ +@CustomScheduled(name = "collectAndPublishBrokerMetrics", cron = "21 0/1 * * * ?", threadNum = 2) +@ConditionalOnProperty(prefix = "task.metrics.collect", name = "broker-metrics-enabled", havingValue = "true", matchIfMissing = true) +public class CollectAndPublishBrokerMetrics extends AbstractScheduledTask { + private static final Logger LOGGER = LoggerFactory.getLogger(CollectAndPublishBrokerMetrics.class); + + @Autowired + private JmxService jmxService; + + @Autowired + private ClusterService clusterService; + + @Autowired + private AbstractHealthScoreStrategy healthScoreStrategy; + + @Override + protected List listAllTasks() { + return clusterService.list(); + } + + @Override + public void processTask(ClusterDO clusterDO) { + long startTime = System.currentTimeMillis(); + + try { + SpringTool.publish(new BatchBrokerMetricsCollectedEvent( + this, + clusterDO.getId(), + startTime, + this.getBrokerMetrics(clusterDO.getId())) + ); + } catch (Exception e) { + LOGGER.error("collect broker-metrics failed, physicalClusterId:{}.", clusterDO.getId(), e); + } + + LOGGER.info("collect broker-metrics finished, physicalClusterId:{} costTime:{}", clusterDO.getId(), System.currentTimeMillis() - startTime); + } + + private List getBrokerMetrics(Long clusterId) { + List metricsList = new ArrayList<>(); + for (Integer brokerId: PhysicalClusterMetadataManager.getBrokerIdList(clusterId)) { + BrokerMetrics metrics = jmxService.getBrokerMetrics( + clusterId, + brokerId, + KafkaMetricsCollections.BROKER_TO_DB_METRICS + ); + + if (ValidateUtils.isNull(metrics)) { + continue; + } + + metrics.getMetricsMap().put( + JmxConstant.HEALTH_SCORE, + healthScoreStrategy.calBrokerHealthScore(clusterId, brokerId, metrics) + ); + + metricsList.add(metrics); + } + + if (ValidateUtils.isEmptyList(metricsList)) { + return new ArrayList<>(); + } + + return metricsList; + } +} \ No newline at end of file diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishCGData.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishCGData.java index cc67428f..28bb1612 100644 --- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishCGData.java +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishCGData.java @@ -44,6 +44,9 @@ public class CollectAndPublishCGData extends AbstractScheduledTask { @Autowired private ConsumerService consumerService; + @Autowired + private ThreadPool threadPool; + @Override protected List listAllTasks() { return clusterService.list(); @@ -82,7 +85,7 @@ public class CollectAndPublishCGData extends AbstractScheduledTask { return getTopicConsumerMetrics(clusterDO, topicName, startTimeUnitMs); } }); - ThreadPool.submitCollectMetricsTask(taskList[i]); + threadPool.submitCollectMetricsTask(clusterDO.getId(), taskList[i]); } List consumerMetricsList = new ArrayList<>(); diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishCommunityTopicMetrics.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishCommunityTopicMetrics.java index 07a137e6..a6757310 100644 --- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishCommunityTopicMetrics.java +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishCommunityTopicMetrics.java @@ -30,16 +30,23 @@ public class CollectAndPublishCommunityTopicMetrics extends AbstractScheduledTas @Override protected List listAllTasks() { + // 获取需要进行指标采集的集群列表,这些集群将会被拆分到多台KM中进行执行。 return clusterService.list(); } @Override public void processTask(ClusterDO clusterDO) { + // 这里需要实现对clusterDO这个集群进行Topic指标采集的代码逻辑 + + // 进行Topic指标获取 List metricsList = getTopicMetrics(clusterDO.getId()); + + // 获取到Topic流量指标之后,发布一个事件, SpringTool.publish(new TopicMetricsCollectedEvent(this, clusterDO.getId(), metricsList)); } private List getTopicMetrics(Long clusterId) { + // 具体获取Topic流量指标的入口代码 List metricsList = jmxService.getTopicMetrics(clusterId, KafkaMetricsCollections.TOPIC_METRICS_TO_DB, true); if (ValidateUtils.isEmptyList(metricsList)) { diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/delete/DeleteMetrics.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/delete/DeleteMetrics.java index b8632971..89d7e516 100644 --- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/delete/DeleteMetrics.java +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/delete/DeleteMetrics.java @@ -1,15 +1,15 @@ package com.xiaojukeji.kafka.manager.task.dispatch.metrics.delete; -import com.xiaojukeji.kafka.manager.common.constant.Constant; import com.xiaojukeji.kafka.manager.common.constant.LogConstant; +import com.xiaojukeji.kafka.manager.common.utils.BackoffUtils; import com.xiaojukeji.kafka.manager.dao.*; -import com.xiaojukeji.kafka.manager.service.utils.ConfigUtils; import com.xiaojukeji.kafka.manager.task.component.AbstractScheduledTask; import com.xiaojukeji.kafka.manager.task.component.CustomScheduled; import com.xiaojukeji.kafka.manager.task.component.EmptyEntry; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; import java.util.Arrays; import java.util.Date; @@ -22,10 +22,7 @@ import java.util.List; */ @CustomScheduled(name = "deleteMetrics", cron = "0 0/2 * * * ?", threadNum = 1) public class DeleteMetrics extends AbstractScheduledTask { - private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER); - - @Autowired - private ConfigUtils configUtils; + private static final Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER); @Autowired private TopicMetricsDao topicMetricsDao; @@ -45,6 +42,27 @@ public class DeleteMetrics extends AbstractScheduledTask { @Autowired private TopicThrottledMetricsDao topicThrottledMetricsDao; + @Value(value = "${task.metrics.delete.delete-limit-size:1000}") + private Integer deleteLimitSize; + + @Value(value = "${task.metrics.delete.cluster-metrics-save-days:14}") + private Integer clusterMetricsSaveDays; + + @Value(value = "${task.metrics.delete.broker-metrics-save-days:14}") + private Integer brokerMetricsSaveDays; + + @Value(value = "${task.metrics.delete.topic-metrics-save-days:7}") + private Integer topicMetricsSaveDays; + + @Value(value = "${task.metrics.delete.topic-request-time-metrics-save-days:7}") + private Integer topicRequestTimeMetricsSaveDays; + + @Value(value = "${task.metrics.delete.topic-throttled-metrics-save-days:7}") + private Integer topicThrottledMetricsSaveDays; + + @Value(value = "${task.metrics.delete.app-topic-metrics-save-days:7}") + private Integer appTopicMetricsSaveDays; + @Override public List listAllTasks() { EmptyEntry emptyEntry = new EmptyEntry(); @@ -54,78 +72,73 @@ public class DeleteMetrics extends AbstractScheduledTask { @Override public void processTask(EmptyEntry entryEntry) { - if (Constant.INVALID_CODE.equals(configUtils.getMaxMetricsSaveDays())) { - // 无需数据删除 - return; - } - long startTime = System.currentTimeMillis(); LOGGER.info("start delete metrics"); - try { - deleteTopicMetrics(); - } catch (Exception e) { - LOGGER.error("delete topic metrics failed.", e); + + // 数据量可能比较大,一次触发多删除几次 + for (int i = 0; i < 10; ++i) { + try { + boolean needReDelete = this.deleteCommunityTopicMetrics(); + if (!needReDelete) { + break; + } + + // 暂停1000毫秒,避免删除太快导致DB出现问题 + BackoffUtils.backoff(1000); + } catch (Exception e) { + LOGGER.error("delete community topic metrics failed.", e); + } + } + + // 数据量可能比较大,一次触发多删除几次 + for (int i = 0; i < 10; ++i) { + try { + boolean needReDelete = this.deleteDiDiTopicMetrics(); + if (!needReDelete) { + break; + } + + // 暂停1000毫秒,避免删除太快导致DB出现问题 + BackoffUtils.backoff(1000); + } catch (Exception e) { + LOGGER.error("delete didi topic metrics failed.", e); + } } try { - deleteTopicAppMetrics(); + this.deleteClusterBrokerMetrics(); } catch (Exception e) { - LOGGER.error("delete topic app metrics failed.", e); + LOGGER.error("delete cluster and broker metrics failed.", e); } - try { - deleteTopicRequestMetrics(); - } catch (Exception e) { - LOGGER.error("delete topic request metrics failed.", e); - } - - try { - deleteThrottledMetrics(); - } catch (Exception e) { - LOGGER.error("delete topic throttled metrics failed.", e); - } - - try { - deleteBrokerMetrics(); - } catch (Exception e) { - LOGGER.error("delete broker metrics failed.", e); - } - - try { - deleteClusterMetrics(); - } catch (Exception e) { - LOGGER.error("delete cluster metrics failed.", e); - } LOGGER.info("finish delete metrics, costTime:{}ms.", System.currentTimeMillis() - startTime); } - private void deleteTopicMetrics() { - Date endTime = new Date(System.currentTimeMillis() - configUtils.getMaxMetricsSaveDays() * 24 * 60 * 60 * 1000); - topicMetricsDao.deleteBeforeTime(endTime); + private boolean deleteCommunityTopicMetrics() { + return topicMetricsDao.deleteBeforeTime(new Date(System.currentTimeMillis() - this.topicMetricsSaveDays * 24 * 60 * 60 * 1000), this.deleteLimitSize) >= this.deleteLimitSize; } - private void deleteTopicAppMetrics() { - Date endTime = new Date(System.currentTimeMillis() - configUtils.getMaxMetricsSaveDays() * 24 * 60 * 60 * 1000); - topicAppMetricsDao.deleteBeforeTime(endTime); + private boolean deleteDiDiTopicMetrics() { + boolean needReDelete = false; + + if (topicAppMetricsDao.deleteBeforeTime(new Date(System.currentTimeMillis() - this.appTopicMetricsSaveDays * 24 * 60 * 60 * 1000), this.deleteLimitSize) >= this.deleteLimitSize) { + needReDelete = true; + } + + if (topicRequestMetricsDao.deleteBeforeTime(new Date(System.currentTimeMillis() - this.topicRequestTimeMetricsSaveDays * 24 * 60 * 60 * 1000), this.deleteLimitSize) >= this.deleteLimitSize) { + needReDelete = true; + } + + if (topicThrottledMetricsDao.deleteBeforeTime(new Date(System.currentTimeMillis() - this.topicThrottledMetricsSaveDays * 24 * 60 * 60 * 1000), this.deleteLimitSize) >= this.deleteLimitSize) { + needReDelete = true; + } + + return needReDelete; } - private void deleteTopicRequestMetrics() { - Date endTime = new Date(System.currentTimeMillis() - configUtils.getMaxMetricsSaveDays() * 24 * 60 * 60 * 1000); - topicRequestMetricsDao.deleteBeforeTime(endTime); - } + private void deleteClusterBrokerMetrics() { + brokerMetricsDao.deleteBeforeTime(new Date(System.currentTimeMillis() - this.brokerMetricsSaveDays * 24 * 60 * 60 * 1000), this.deleteLimitSize); - private void deleteThrottledMetrics() { - Date endTime = new Date(System.currentTimeMillis() - configUtils.getMaxMetricsSaveDays() * 24 * 60 * 60 * 1000); - topicThrottledMetricsDao.deleteBeforeTime(endTime); - } - - private void deleteBrokerMetrics() { - Date endTime = new Date(System.currentTimeMillis() - configUtils.getMaxMetricsSaveDays() * 24 * 60 * 60 * 1000); - brokerMetricsDao.deleteBeforeTime(endTime); - } - - private void deleteClusterMetrics() { - Date endTime = new Date(System.currentTimeMillis() - configUtils.getMaxMetricsSaveDays() * 24 * 60 * 60 * 1000); - clusterMetricsDao.deleteBeforeTime(endTime); + clusterMetricsDao.deleteBeforeTime(new Date(System.currentTimeMillis() - this.clusterMetricsSaveDays * 24 * 60 * 60 * 1000), this.deleteLimitSize); } } \ No newline at end of file diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreBrokerMetrics.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreBrokerMetrics.java deleted file mode 100644 index 50f5f633..00000000 --- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreBrokerMetrics.java +++ /dev/null @@ -1,136 +0,0 @@ -package com.xiaojukeji.kafka.manager.task.dispatch.metrics.store; - -import com.xiaojukeji.kafka.manager.common.constant.KafkaMetricsCollections; -import com.xiaojukeji.kafka.manager.common.constant.LogConstant; -import com.xiaojukeji.kafka.manager.common.entity.metrics.BaseMetrics; -import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics; -import com.xiaojukeji.kafka.manager.common.entity.metrics.ClusterMetrics; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConstant; -import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata; -import com.xiaojukeji.kafka.manager.dao.BrokerMetricsDao; -import com.xiaojukeji.kafka.manager.dao.ClusterMetricsDao; -import com.xiaojukeji.kafka.manager.common.entity.pojo.BrokerMetricsDO; -import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO; -import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterMetricsDO; -import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager; -import com.xiaojukeji.kafka.manager.service.service.ClusterService; -import com.xiaojukeji.kafka.manager.service.service.JmxService; -import com.xiaojukeji.kafka.manager.service.strategy.AbstractHealthScoreStrategy; -import com.xiaojukeji.kafka.manager.service.utils.MetricsConvertUtils; -import com.xiaojukeji.kafka.manager.task.component.AbstractScheduledTask; -import com.xiaojukeji.kafka.manager.task.component.CustomScheduled; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -/** - * Broker指标信息存DB, Broker流量, 集群流量 - * @author zengqiao - * @date 20/5/7 - */ -@CustomScheduled(name = "storeBrokerMetrics", cron = "21 0/1 * * * ?", threadNum = 2) -@ConditionalOnProperty(prefix = "custom.store-metrics-task.community", name = "broker-metrics-enabled", havingValue = "true", matchIfMissing = true) -public class StoreBrokerMetrics extends AbstractScheduledTask { - private static final Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER); - - @Autowired - private JmxService jmxService; - - @Autowired - private ClusterService clusterService; - - @Autowired - private BrokerMetricsDao brokerMetricsDao; - - @Autowired - private ClusterMetricsDao clusterMetricsDao; - - @Autowired - private AbstractHealthScoreStrategy healthScoreStrategy; - - private static final Integer INSERT_BATCH_SIZE = 100; - - @Override - protected List listAllTasks() { - return clusterService.list(); - } - - @Override - public void processTask(ClusterDO clusterDO) { - long startTime = System.currentTimeMillis(); - List clusterMetricsList = new ArrayList<>(); - - try { - List brokerMetricsList = getAndBatchAddMetrics(startTime, clusterDO.getId()); - clusterMetricsList.add(supplyAndConvert2ClusterMetrics( - clusterDO.getId(), - MetricsConvertUtils.merge2BaseMetricsByAdd(brokerMetricsList)) - ); - } catch (Throwable t) { - LOGGER.error("collect failed, clusterId:{}.", clusterDO.getId(), t); - } - long endTime = System.currentTimeMillis(); - LOGGER.info("collect finish, clusterId:{} costTime:{}", clusterDO.getId(), endTime - startTime); - - List doList = MetricsConvertUtils.convertAndUpdateCreateTime2ClusterMetricsDOList( - startTime, - clusterMetricsList - ); - clusterMetricsDao.batchAdd(doList); - } - - private List getAndBatchAddMetrics(Long startTime, Long clusterId) { - List metricsList = new ArrayList<>(); - for (Integer brokerId: PhysicalClusterMetadataManager.getBrokerIdList(clusterId)) { - BrokerMetrics metrics = jmxService.getBrokerMetrics( - clusterId, - brokerId, - KafkaMetricsCollections.BROKER_TO_DB_METRICS - ); - if (ValidateUtils.isNull(metrics)) { - continue; - } - metrics.getMetricsMap().put( - JmxConstant.HEALTH_SCORE, - healthScoreStrategy.calBrokerHealthScore(clusterId, brokerId, metrics) - ); - metricsList.add(metrics); - } - if (ValidateUtils.isEmptyList(metricsList)) { - return new ArrayList<>(); - } - - List doList = - MetricsConvertUtils.convertAndUpdateCreateTime2BrokerMetricsDOList(startTime, metricsList); - int i = 0; - do { - brokerMetricsDao.batchAdd(doList.subList(i, Math.min(i + INSERT_BATCH_SIZE, doList.size()))); - i += INSERT_BATCH_SIZE; - } while (i < doList.size()); - return metricsList; - } - - private ClusterMetrics supplyAndConvert2ClusterMetrics(Long clusterId, BaseMetrics baseMetrics) { - ClusterMetrics metrics = new ClusterMetrics(clusterId); - Map metricsMap = metrics.getMetricsMap(); - metricsMap.putAll(baseMetrics.getMetricsMap()); - metricsMap.put(JmxConstant.TOPIC_NUM, PhysicalClusterMetadataManager.getTopicNameList(clusterId).size()); - metricsMap.put(JmxConstant.BROKER_NUM, PhysicalClusterMetadataManager.getBrokerIdList(clusterId).size()); - Integer partitionNum = 0; - for (String topicName : PhysicalClusterMetadataManager.getTopicNameList(clusterId)) { - TopicMetadata topicMetaData = PhysicalClusterMetadataManager.getTopicMetadata(clusterId, topicName); - if (ValidateUtils.isNull(topicMetaData)) { - continue; - } - partitionNum += topicMetaData.getPartitionNum(); - } - metricsMap.put(JmxConstant.PARTITION_NUM, partitionNum); - return metrics; - } -} \ No newline at end of file diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreDiDiAppTopicMetrics.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreDiDiAppTopicMetrics.java index ede6525d..6543f6fa 100644 --- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreDiDiAppTopicMetrics.java +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreDiDiAppTopicMetrics.java @@ -17,7 +17,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.stereotype.Component; import java.util.*; @@ -28,7 +27,7 @@ import java.util.*; @CustomScheduled(name = "storeDiDiAppTopicMetrics", cron = "41 0/1 * * * ?", threadNum = 5) @ConditionalOnProperty(prefix = "custom.store-metrics-task.didi", name = "app-topic-metrics-enabled", havingValue = "true", matchIfMissing = true) public class StoreDiDiAppTopicMetrics extends AbstractScheduledTask { - private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER); + private static final Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER); @Autowired private JmxService jmxService; @@ -50,7 +49,7 @@ public class StoreDiDiAppTopicMetrics extends AbstractScheduledTask { try { getAndBatchAddTopicAppMetrics(startTime, clusterDO.getId()); - } catch (Throwable t) { + } catch (Exception t) { LOGGER.error("save topic metrics failed, clusterId:{}.", clusterDO.getId(), t); } } @@ -65,7 +64,12 @@ public class StoreDiDiAppTopicMetrics extends AbstractScheduledTask { MetricsConvertUtils.convertAndUpdateCreateTime2TopicMetricsDOList(startTime, metricsList); int i = 0; do { - topicAppMetricsDao.batchAdd(doList.subList(i, Math.min(i + Constant.BATCH_INSERT_SIZE, doList.size()))); + List subDOList = doList.subList(i, Math.min(i + Constant.BATCH_INSERT_SIZE, doList.size())); + if (ValidateUtils.isEmptyList(subDOList)) { + return; + } + + topicAppMetricsDao.batchAdd(subDOList); i += Constant.BATCH_INSERT_SIZE; } while (i < doList.size()); } diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreDiDiTopicRequestTimeMetrics.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreDiDiTopicRequestTimeMetrics.java index c4caa229..040612f2 100644 --- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreDiDiTopicRequestTimeMetrics.java +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreDiDiTopicRequestTimeMetrics.java @@ -27,7 +27,7 @@ import java.util.*; @CustomScheduled(name = "storeDiDiTopicRequestTimeMetrics", cron = "51 0/1 * * * ?", threadNum = 5) @ConditionalOnProperty(prefix = "custom.store-metrics-task.didi", name = "topic-request-time-metrics-enabled", havingValue = "true", matchIfMissing = true) public class StoreDiDiTopicRequestTimeMetrics extends AbstractScheduledTask { - private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER); + private static final Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER); @Autowired private JmxService jmxService; @@ -51,7 +51,7 @@ public class StoreDiDiTopicRequestTimeMetrics extends AbstractScheduledTask subDOList = doList.subList(i, Math.min(i + Constant.BATCH_INSERT_SIZE, doList.size())); + if (ValidateUtils.isEmptyList(subDOList)) { + return; + } + + topicRequestMetricsDao.batchAdd(subDOList); i += Constant.BATCH_INSERT_SIZE; } while (i < doList.size()); } diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/biz/RegionCreatedListener.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/biz/RegionCreatedListener.java new file mode 100644 index 00000000..5daa0e9e --- /dev/null +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/biz/RegionCreatedListener.java @@ -0,0 +1,38 @@ +package com.xiaojukeji.kafka.manager.task.listener.biz; + +import com.xiaojukeji.kafka.manager.common.events.RegionCreatedEvent; +import com.xiaojukeji.kafka.manager.task.dispatch.biz.CalRegionCapacity; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.ApplicationListener; +import org.springframework.scheduling.annotation.Async; +import org.springframework.stereotype.Component; + +/** + * Region创建监听器, + * TODO 后续需要将其移动到core模块 + * @author zengqiao + * @date 22/01/11 + */ +@Component +public class RegionCreatedListener implements ApplicationListener { + private static final Logger logger = LoggerFactory.getLogger(RegionCreatedListener.class); + + @Autowired + private CalRegionCapacity calRegionCapacity; + + @Async + @Override + public void onApplicationEvent(RegionCreatedEvent event) { + try { + logger.info("cal region capacity started when region created, regionDO:{}.", event.getRegionDO()); + + calRegionCapacity.processTask(event.getRegionDO()); + + logger.info("cal region capacity finished when region created, regionDO:{}.", event.getRegionDO()); + } catch (Exception e) { + logger.error("cal region capacity failed when region created, regionDO:{}.", event.getRegionDO(), e); + } + } +} diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/SinkBrokerMetrics2DB.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/SinkBrokerMetrics2DB.java new file mode 100644 index 00000000..923d26b6 --- /dev/null +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/SinkBrokerMetrics2DB.java @@ -0,0 +1,55 @@ +package com.xiaojukeji.kafka.manager.task.listener.sink.db; + +import com.xiaojukeji.kafka.manager.common.constant.Constant; +import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics; +import com.xiaojukeji.kafka.manager.common.entity.pojo.BrokerMetricsDO; +import com.xiaojukeji.kafka.manager.common.events.metrics.BatchBrokerMetricsCollectedEvent; +import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; +import com.xiaojukeji.kafka.manager.dao.BrokerMetricsDao; +import com.xiaojukeji.kafka.manager.service.utils.MetricsConvertUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.ApplicationListener; +import org.springframework.stereotype.Component; + +import java.util.List; + +/** + * @author zengqiao + * @date 22/01/17 + */ +@Component +@ConditionalOnProperty(prefix = "task.metrics.sink.broker-metrics", name = "sink-db-enabled", havingValue = "true", matchIfMissing = true) +public class SinkBrokerMetrics2DB implements ApplicationListener { + private static final Logger logger = LoggerFactory.getLogger(SinkBrokerMetrics2DB.class); + + @Autowired + private BrokerMetricsDao metricsDao; + + @Override + public void onApplicationEvent(BatchBrokerMetricsCollectedEvent event) { + logger.debug("sink broker-metrics to db start, event:{}.", event); + + List metricsList = event.getMetricsList(); + if (ValidateUtils.isEmptyList(metricsList)) { + logger.warn("sink broker-metrics to db finished, without need sink, event:{}.", event); + return; + } + + List doList = MetricsConvertUtils.convertAndUpdateCreateTime2BrokerMetricsDOList(event.getCollectTime(), metricsList); + int i = 0; + while (i < doList.size()) { + List subDOList = doList.subList(i, Math.min(i + Constant.BATCH_INSERT_SIZE, doList.size())); + if (ValidateUtils.isEmptyList(subDOList)) { + break; + } + + metricsDao.batchAdd(subDOList); + i += Constant.BATCH_INSERT_SIZE; + } + + logger.debug("sink broker-metrics to db finished, event:{}.", event); + } +} \ No newline at end of file diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/SinkClusterMetrics2DB.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/SinkClusterMetrics2DB.java new file mode 100644 index 00000000..a1aab09c --- /dev/null +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/SinkClusterMetrics2DB.java @@ -0,0 +1,80 @@ +package com.xiaojukeji.kafka.manager.task.listener.sink.db; + +import com.xiaojukeji.kafka.manager.common.entity.metrics.BaseMetrics; +import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics; +import com.xiaojukeji.kafka.manager.common.entity.metrics.ClusterMetrics; +import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterMetricsDO; +import com.xiaojukeji.kafka.manager.common.events.metrics.BatchBrokerMetricsCollectedEvent; +import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; +import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConstant; +import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata; +import com.xiaojukeji.kafka.manager.dao.ClusterMetricsDao; +import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager; +import com.xiaojukeji.kafka.manager.service.utils.MetricsConvertUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.ApplicationListener; +import org.springframework.stereotype.Component; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +/** + * @author zengqiao + * @date 22/01/17 + */ +@Component +@ConditionalOnProperty(prefix = "task.metrics.sink.cluster-metrics", name = "sink-db-enabled", havingValue = "true", matchIfMissing = true) +public class SinkClusterMetrics2DB implements ApplicationListener { + private static final Logger logger = LoggerFactory.getLogger(SinkClusterMetrics2DB.class); + + @Autowired + private ClusterMetricsDao clusterMetricsDao; + + @Override + public void onApplicationEvent(BatchBrokerMetricsCollectedEvent event) { + logger.debug("sink cluster-metrics to db start, event:{}.", event); + + List metricsList = event.getMetricsList(); + if (ValidateUtils.isEmptyList(metricsList)) { + logger.warn("sink cluster-metrics to db finished, without need sink, event:{}.", event); + return; + } + + List doList = MetricsConvertUtils.convertAndUpdateCreateTime2ClusterMetricsDOList( + event.getCollectTime(), + // 合并broker-metrics为cluster-metrics + Arrays.asList(supplyAndConvert2ClusterMetrics(event.getPhysicalClusterId(), MetricsConvertUtils.merge2BaseMetricsByAdd(event.getMetricsList()))) + ); + + if (ValidateUtils.isEmptyList(doList)) { + logger.warn("sink cluster-metrics to db finished, without need sink, event:{}.", event); + return; + } + + clusterMetricsDao.batchAdd(doList); + + logger.debug("sink cluster-metrics to db finished, event:{}.", event); + } + + private ClusterMetrics supplyAndConvert2ClusterMetrics(Long clusterId, BaseMetrics baseMetrics) { + ClusterMetrics metrics = new ClusterMetrics(clusterId); + Map metricsMap = metrics.getMetricsMap(); + metricsMap.putAll(baseMetrics.getMetricsMap()); + metricsMap.put(JmxConstant.TOPIC_NUM, PhysicalClusterMetadataManager.getTopicNameList(clusterId).size()); + metricsMap.put(JmxConstant.BROKER_NUM, PhysicalClusterMetadataManager.getBrokerIdList(clusterId).size()); + Integer partitionNum = 0; + for (String topicName : PhysicalClusterMetadataManager.getTopicNameList(clusterId)) { + TopicMetadata topicMetaData = PhysicalClusterMetadataManager.getTopicMetadata(clusterId, topicName); + if (ValidateUtils.isNull(topicMetaData)) { + continue; + } + partitionNum += topicMetaData.getPartitionNum(); + } + metricsMap.put(JmxConstant.PARTITION_NUM, partitionNum); + return metrics; + } +} \ No newline at end of file diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/StoreCommunityTopicMetrics2DB.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/StoreCommunityTopicMetrics2DB.java similarity index 78% rename from kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/StoreCommunityTopicMetrics2DB.java rename to kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/StoreCommunityTopicMetrics2DB.java index 0c0714f7..267e32b7 100644 --- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/StoreCommunityTopicMetrics2DB.java +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/StoreCommunityTopicMetrics2DB.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.kafka.manager.task.listener; +package com.xiaojukeji.kafka.manager.task.listener.sink.db; import com.xiaojukeji.kafka.manager.common.constant.Constant; import com.xiaojukeji.kafka.manager.common.constant.LogConstant; @@ -25,7 +25,7 @@ import java.util.List; @Component("storeCommunityTopicMetrics2DB") @ConditionalOnProperty(prefix = "custom.store-metrics-task.community", name = "topic-metrics-enabled", havingValue = "true", matchIfMissing = true) public class StoreCommunityTopicMetrics2DB implements ApplicationListener { - private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER); + private static final Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER); @Autowired private TopicMetricsDao topicMetricsDao; @@ -40,17 +40,21 @@ public class StoreCommunityTopicMetrics2DB implements ApplicationListener metricsList) throws Exception { - List doList = - MetricsConvertUtils.convertAndUpdateCreateTime2TopicMetricsDOList(startTime, metricsList); + private void store2DB(Long startTime, List metricsList) { + List doList = MetricsConvertUtils.convertAndUpdateCreateTime2TopicMetricsDOList(startTime, metricsList); int i = 0; do { - topicMetricsDao.batchAdd(doList.subList(i, Math.min(i + Constant.BATCH_INSERT_SIZE, doList.size()))); + List subDOList = doList.subList(i, Math.min(i + Constant.BATCH_INSERT_SIZE, doList.size())); + if (ValidateUtils.isEmptyList(subDOList)) { + return; + } + + topicMetricsDao.batchAdd(subDOList); i += Constant.BATCH_INSERT_SIZE; } while (i < doList.size()); } diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/StoreTopicThrottledMetrics2DB.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/StoreTopicThrottledMetrics2DB.java similarity index 96% rename from kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/StoreTopicThrottledMetrics2DB.java rename to kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/StoreTopicThrottledMetrics2DB.java index 4e34e732..c2d74df3 100644 --- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/StoreTopicThrottledMetrics2DB.java +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/StoreTopicThrottledMetrics2DB.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.kafka.manager.task.listener; +package com.xiaojukeji.kafka.manager.task.listener.sink.db; import com.xiaojukeji.kafka.manager.common.bizenum.KafkaClientEnum; import com.xiaojukeji.kafka.manager.common.constant.Constant; @@ -22,7 +22,7 @@ import java.util.*; * @date 20/9/24 */ @Component("storeTopicThrottledMetrics2DB") -@ConditionalOnProperty(prefix = "custom.store-metrics-task.didi", name = "topic-throttled-metrics", havingValue = "true", matchIfMissing = true) +@ConditionalOnProperty(prefix = "custom.store-metrics-task.didi", name = "topic-throttled-metrics-enabled", havingValue = "true", matchIfMissing = true) public class StoreTopicThrottledMetrics2DB implements ApplicationListener { private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER); diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkCommunityTopicMetrics2Kafka.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/kafka/SinkCommunityTopicMetrics2Kafka.java similarity index 98% rename from kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkCommunityTopicMetrics2Kafka.java rename to kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/kafka/SinkCommunityTopicMetrics2Kafka.java index ad80ceb2..5f3a0e5c 100644 --- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkCommunityTopicMetrics2Kafka.java +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/kafka/SinkCommunityTopicMetrics2Kafka.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.kafka.manager.task.listener; +package com.xiaojukeji.kafka.manager.task.listener.sink.kafka; import com.xiaojukeji.kafka.manager.common.constant.ConfigConstant; import com.xiaojukeji.kafka.manager.common.constant.LogConstant; diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkConsumerMetrics2Kafka.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/kafka/SinkConsumerMetrics2Kafka.java similarity index 98% rename from kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkConsumerMetrics2Kafka.java rename to kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/kafka/SinkConsumerMetrics2Kafka.java index 7070dae1..eb6c2d37 100644 --- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkConsumerMetrics2Kafka.java +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/kafka/SinkConsumerMetrics2Kafka.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.kafka.manager.task.listener; +package com.xiaojukeji.kafka.manager.task.listener.sink.kafka; import com.xiaojukeji.kafka.manager.common.constant.ConfigConstant; import com.xiaojukeji.kafka.manager.common.constant.LogConstant; diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkCommunityTopicMetrics2Monitor.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/monitor/SinkCommunityTopicMetrics2Monitor.java similarity index 98% rename from kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkCommunityTopicMetrics2Monitor.java rename to kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/monitor/SinkCommunityTopicMetrics2Monitor.java index e2ac74a9..80b3eccd 100644 --- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkCommunityTopicMetrics2Monitor.java +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/monitor/SinkCommunityTopicMetrics2Monitor.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.kafka.manager.task.listener; +package com.xiaojukeji.kafka.manager.task.listener.sink.monitor; import com.xiaojukeji.kafka.manager.monitor.common.entry.bizenum.MonitorMetricNameEnum; import com.xiaojukeji.kafka.manager.common.constant.LogConstant; diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkConsumerMetrics2Monitor.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/monitor/SinkConsumerMetrics2Monitor.java similarity index 99% rename from kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkConsumerMetrics2Monitor.java rename to kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/monitor/SinkConsumerMetrics2Monitor.java index 4ca276f9..a5c2e008 100644 --- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkConsumerMetrics2Monitor.java +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/monitor/SinkConsumerMetrics2Monitor.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.kafka.manager.task.listener; +package com.xiaojukeji.kafka.manager.task.listener.sink.monitor; import com.xiaojukeji.kafka.manager.monitor.common.entry.bizenum.MonitorMetricNameEnum; import com.xiaojukeji.kafka.manager.common.constant.LogConstant; diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkTopicThrottledMetrics2Monitor.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/monitor/SinkTopicThrottledMetrics2Monitor.java similarity index 98% rename from kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkTopicThrottledMetrics2Monitor.java rename to kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/monitor/SinkTopicThrottledMetrics2Monitor.java index fb95947c..ff1cb823 100644 --- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkTopicThrottledMetrics2Monitor.java +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/monitor/SinkTopicThrottledMetrics2Monitor.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.kafka.manager.task.listener; +package com.xiaojukeji.kafka.manager.task.listener.sink.monitor; import com.xiaojukeji.kafka.manager.common.bizenum.KafkaClientEnum; import com.xiaojukeji.kafka.manager.monitor.common.MonitorSinkConstant; diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushZKConsumerGroupMetadata.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushZKConsumerGroupMetadata.java index a7d196af..54321240 100644 --- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushZKConsumerGroupMetadata.java +++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushZKConsumerGroupMetadata.java @@ -32,6 +32,9 @@ public class FlushZKConsumerGroupMetadata { @Autowired private ClusterService clusterService; + @Autowired + private ThreadPool threadPool; + @Scheduled(cron="35 0/1 * * * ?") public void schedule() { List doList = clusterService.list(); @@ -95,7 +98,7 @@ public class FlushZKConsumerGroupMetadata { return new ArrayList<>(); } }); - ThreadPool.submitCollectMetricsTask(taskList[i]); + threadPool.submitCollectMetricsTask(clusterId, taskList[i]); } Map> topicNameConsumerGroupMap = new HashMap<>(); diff --git a/kafka-manager-web/pom.xml b/kafka-manager-web/pom.xml index 17504ca7..80ccdfe1 100644 --- a/kafka-manager-web/pom.xml +++ b/kafka-manager-web/pom.xml @@ -16,10 +16,9 @@ 1.8 1.8 - 2.1.1.RELEASE - 5.1.3.RELEASE false - 8.5.66 + 8.5.72 + 2.16.0 @@ -72,22 +71,22 @@ org.springframework.boot spring-boot-starter-web - ${springframework.boot.version} + ${spring.boot.version} org.springframework.boot spring-boot-starter-aop - ${springframework.boot.version} + ${spring.boot.version} org.springframework.boot spring-boot-starter-logging - ${springframework.boot.version} + ${spring.boot.version} org.springframework.boot spring-boot-starter-thymeleaf - ${springframework.boot.version} + ${spring.boot.version} junit @@ -104,16 +103,17 @@ org.springframework spring-context-support - ${spring-version} + kafka-manager + org.springframework.boot spring-boot-maven-plugin - ${springframework.boot.version} + ${spring.boot.version} @@ -121,6 +121,7 @@ + diff --git a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/api/versionone/thirdpart/ThirdPartBrokerController.java b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/api/versionone/thirdpart/ThirdPartBrokerController.java index 790b85be..8469afec 100644 --- a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/api/versionone/thirdpart/ThirdPartBrokerController.java +++ b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/api/versionone/thirdpart/ThirdPartBrokerController.java @@ -32,7 +32,7 @@ import java.util.stream.Collectors; */ @Api(tags = "开放接口-Broker相关接口(REST)") @RestController -@RequestMapping(ApiPrefix.API_V1_THIRD_PART_OP_PREFIX) +@RequestMapping(ApiPrefix.API_V1_THIRD_PART_PREFIX) public class ThirdPartBrokerController { @Autowired private BrokerService brokerService; @@ -44,7 +44,7 @@ public class ThirdPartBrokerController { private ClusterService clusterService; @ApiOperation(value = "Broker信息概览", notes = "") - @RequestMapping(value = "{clusterId}/brokers/{brokerId}/overview", method = RequestMethod.GET) + @GetMapping(value = "{clusterId}/brokers/{brokerId}/overview") @ResponseBody public Result getBrokerOverview(@PathVariable Long clusterId, @PathVariable Integer brokerId) { @@ -70,7 +70,7 @@ public class ThirdPartBrokerController { } @ApiOperation(value = "BrokerRegion信息", notes = "所有集群的") - @RequestMapping(value = "broker-regions", method = RequestMethod.GET) + @GetMapping(value = "broker-regions") @ResponseBody public Result> getBrokerRegions() { List clusterDOList = clusterService.list(); diff --git a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/config/SwaggerConfig.java b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/config/SwaggerConfig.java index 91d0080c..f8406cfe 100644 --- a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/config/SwaggerConfig.java +++ b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/config/SwaggerConfig.java @@ -1,5 +1,7 @@ package com.xiaojukeji.kafka.manager.web.config; +import com.xiaojukeji.kafka.manager.service.utils.ConfigUtils; +import org.springframework.beans.factory.annotation.Autowired; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.web.servlet.config.annotation.*; @@ -20,6 +22,9 @@ import springfox.documentation.swagger2.annotations.EnableSwagger2; @EnableWebMvc @EnableSwagger2 public class SwaggerConfig implements WebMvcConfigurer { + @Autowired + private ConfigUtils configUtils; + @Override public void addResourceHandlers(ResourceHandlerRegistry registry) { registry.addResourceHandler("swagger-ui.html").addResourceLocations("classpath:/META-INF/resources/"); @@ -39,10 +44,9 @@ public class SwaggerConfig implements WebMvcConfigurer { private ApiInfo apiInfo() { return new ApiInfoBuilder() - .title("Logi-KafkaManager 接口文档") - .description("欢迎使用滴滴Logi-KafkaManager") - .contact("huangyiminghappy@163.com") - .version("2.2.0") + .title("LogiKM接口文档") + .description("欢迎使用滴滴LogiKM") + .version(configUtils.getApplicationVersion()) .build(); } diff --git a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/OrderConverter.java b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/OrderConverter.java index bbe8c656..ab6c0ba6 100644 --- a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/OrderConverter.java +++ b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/OrderConverter.java @@ -1,15 +1,16 @@ package com.xiaojukeji.kafka.manager.web.converters; -import com.xiaojukeji.kafka.manager.common.entity.ao.account.Account; import com.xiaojukeji.kafka.manager.bpm.common.OrderResult; +import com.xiaojukeji.kafka.manager.bpm.common.OrderStatusEnum; import com.xiaojukeji.kafka.manager.bpm.common.entry.BaseOrderDetailData; +import com.xiaojukeji.kafka.manager.common.entity.ao.account.Account; +import com.xiaojukeji.kafka.manager.common.entity.pojo.OrderDO; import com.xiaojukeji.kafka.manager.common.entity.vo.common.AccountVO; import com.xiaojukeji.kafka.manager.common.entity.vo.normal.order.OrderResultVO; import com.xiaojukeji.kafka.manager.common.entity.vo.normal.order.OrderVO; import com.xiaojukeji.kafka.manager.common.entity.vo.normal.order.detail.OrderDetailBaseVO; import com.xiaojukeji.kafka.manager.common.utils.CopyUtils; import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import com.xiaojukeji.kafka.manager.common.entity.pojo.OrderDO; import java.util.ArrayList; import java.util.Collections; @@ -41,7 +42,9 @@ public class OrderConverter { } OrderVO orderVO = new OrderVO(); CopyUtils.copyProperties(orderVO, orderDO); - orderVO.setGmtTime(orderDO.getGmtCreate()); + if (OrderStatusEnum.WAIT_DEAL.getCode().equals(orderDO.getStatus())) { + orderVO.setGmtHandle(null); + } return orderVO; } diff --git a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/ReassignModelConverter.java b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/ReassignModelConverter.java index 747fbb8b..06de3ad9 100644 --- a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/ReassignModelConverter.java +++ b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/ReassignModelConverter.java @@ -95,12 +95,21 @@ public class ReassignModelConverter { vo.setBeginTime(0L); vo.setEndTime(0L); + StringBuilder clusterAndTopicName = new StringBuilder(); + Integer completedTopicNum = 0; Set statusSet = new HashSet<>(); for (ReassignTaskDO elem: doList) { vo.setGmtCreate(elem.getGmtCreate().getTime()); vo.setOperator(elem.getOperator()); vo.setDescription(elem.getDescription()); + + if (clusterAndTopicName.length() == 0) { + clusterAndTopicName.append("-").append(elem.getClusterId()).append("-").append(elem.getTopicName()); + } else { + clusterAndTopicName.append("等"); + } + if (TaskStatusReassignEnum.isFinished(elem.getStatus())) { completedTopicNum += 1; statusSet.add(elem.getStatus()); @@ -114,6 +123,9 @@ public class ReassignModelConverter { vo.setBeginTime(elem.getBeginTime().getTime()); } + // 任务名称上,增加展示集群ID和Topic名称,多个时,仅展示第一个. PR from Hongten + vo.setTaskName(String.format("%s 数据迁移任务%s", DateUtils.getFormattedDate(taskId), clusterAndTopicName.toString())); + // 任务整体状态 if (statusSet.contains(TaskStatusReassignEnum.RUNNING.getCode())) { vo.setStatus(TaskStatusReassignEnum.RUNNING.getCode()); diff --git a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/TopicMineConverter.java b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/TopicMineConverter.java index 97b8f04a..e21c41da 100644 --- a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/TopicMineConverter.java +++ b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/TopicMineConverter.java @@ -29,6 +29,7 @@ public class TopicMineConverter { vo.setClusterName(data.getLogicalClusterName()); vo.setBytesIn(data.getBytesIn()); vo.setBytesOut(data.getBytesOut()); + vo.setDescription(data.getDescription()); voList.add(vo); } return voList; diff --git a/kafka-manager-web/src/main/resources/application.yml b/kafka-manager-web/src/main/resources/application.yml index 4463d746..9cd51d46 100644 --- a/kafka-manager-web/src/main/resources/application.yml +++ b/kafka-manager-web/src/main/resources/application.yml @@ -9,6 +9,9 @@ server: spring: application: name: kafkamanager + version: @project.version@ + profiles: + active: dev datasource: kafka-manager: jdbc-url: jdbc:mysql://localhost:3306/logi_kafka_manager?characterEncoding=UTF-8&useSSL=false&serverTimezone=GMT%2B8 @@ -18,8 +21,6 @@ spring: main: allow-bean-definition-overriding: true - profiles: - active: dev servlet: multipart: max-file-size: 100MB @@ -30,27 +31,57 @@ logging: custom: idc: cn - jmx: - max-conn: 10 # 2.3版本配置不在这个地方生效 store-metrics-task: community: - broker-metrics-enabled: true topic-metrics-enabled: true - didi: + didi: # 滴滴Kafka特有的指标 app-topic-metrics-enabled: false topic-request-time-metrics-enabled: false - topic-throttled-metrics: false - save-days: 7 + topic-throttled-metrics-enabled: false -# 任务相关的开关 +# 任务相关的配置 task: op: - sync-topic-enabled: false # 未落盘的Topic定期同步到DB中 - order-auto-exec: # 工单自动化审批线程的开关 - topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启 - app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启 + sync-topic-enabled: false # 未落盘的Topic定期同步到DB中 + order-auto-exec: # 工单自动化审批线程的开关 + topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启 + app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启 + metrics: + collect: # 收集指标 + broker-metrics-enabled: true # 收集Broker指标 + sink: # 上报指标 + cluster-metrics: # 上报cluster指标 + sink-db-enabled: true # 上报到db + broker-metrics: # 上报broker指标 + sink-db-enabled: true # 上报到db + delete: # 删除指标 + delete-limit-size: 1000 # 单次删除的批大小 + cluster-metrics-save-days: 14 # 集群指标保存天数 + broker-metrics-save-days: 14 # Broker指标保存天数 + topic-metrics-save-days: 7 # Topic指标保存天数 + topic-request-time-metrics-save-days: 7 # Topic请求耗时指标保存天数 + topic-throttled-metrics-save-days: 7 # Topic限流指标保存天数 + app-topic-metrics-save-days: 7 # App+Topic指标保存天数 + +thread-pool: + collect-metrics: + thread-num: 256 # 收集指标线程池大小 + queue-size: 5000 # 收集指标线程池的queue大小 + api-call: + thread-num: 16 # api服务线程池大小 + queue-size: 5000 # api服务线程池的queue大小 + +client-pool: + kafka-consumer: + min-idle-client-num: 24 # 最小空闲客户端数 + max-idle-client-num: 24 # 最大空闲客户端数 + max-total-client-num: 24 # 最大客户端数 + borrow-timeout-unit-ms: 3000 # 租借超时时间,单位毫秒 account: + jump-login: + gateway-api: false # 网关接口 + third-part-api: false # 第三方接口 ldap: enabled: false url: ldap://127.0.0.1:389/ @@ -64,19 +95,20 @@ account: auth-user-registration: true auth-user-registration-role: normal -kcm: - enabled: false - s3: +kcm: # 集群安装部署,仅安装broker + enabled: false # 是否开启 + s3: # s3 存储服务 endpoint: s3.didiyunapi.com access-key: 1234567890 secret-key: 0987654321 bucket: logi-kafka - n9e: - base-url: http://127.0.0.1:8004 - user-token: 12345678 - timeout: 300 - account: root - script-file: kcm_script.sh + n9e: # 夜莺 + base-url: http://127.0.0.1:8004 # 夜莺job服务地址 + user-token: 12345678 # 用户的token + timeout: 300 # 当台操作的超时时间 + account: root # 操作时使用的账号 + script-file: kcm_script.sh # 脚本,已内置好,在源码的kcm模块内,此处配置无需修改 + logikm-url: http://127.0.0.1:8080 # logikm部署地址,部署时kcm_script.sh会调用logikm检查部署中的一些状态 monitor: enabled: false diff --git a/pom.xml b/pom.xml index a7c70e54..8b8db3a2 100644 --- a/pom.xml +++ b/pom.xml @@ -11,14 +11,15 @@ org.springframework.boot spring-boot-starter-parent - 2.1.1.RELEASE + 2.1.18.RELEASE - 2.4.2-SNAPSHOT - 2.7.0 - 1.5.13 + 2.6.0 + 2.1.18.RELEASE + 2.9.2 + 1.5.21 true true @@ -26,7 +27,9 @@ 1.8 UTF-8 UTF-8 - 8.5.66 + 8.5.72 + 2.16.0 + 3.0.0 @@ -42,6 +45,7 @@ kafka-manager-extends/kafka-manager-openapi kafka-manager-task kafka-manager-web + distribution @@ -62,6 +66,11 @@ swagger-annotations ${swagger.version} + + io.swagger + swagger-models + ${swagger.version} + @@ -229,6 +238,25 @@ minio 7.1.0 + + + org.projectlombok + lombok + 1.18.2 + provided + + + + + + + + org.apache.maven.plugins + maven-assembly-plugin + ${maven-assembly-plugin.version} + + + \ No newline at end of file