diff --git a/README.md b/README.md index 104af45e..c5830ffe 100644 --- a/README.md +++ b/README.md @@ -133,3 +133,7 @@ PS: 提问请尽量把问题一次性描述清楚,并告知环境信息情况 **`2、微信群`** 微信加群:添加`mike_zhangliang`、`PenceXie`的微信号备注KnowStreaming加群。 + +## Star History + +[![Star History Chart](https://api.star-history.com/svg?repos=didi/KnowStreaming&type=Date)](https://star-history.com/#didi/KnowStreaming&Date) diff --git a/Releases_Notes.md b/Releases_Notes.md index d2009a5e..8d19e266 100644 --- a/Releases_Notes.md +++ b/Releases_Notes.md @@ -1,6 +1,42 @@ +## v3.0.0-beta.1 + +**文档** +- 新增Task模块说明文档 +- FAQ补充 `Specified key was too long; max key length is 767 bytes ` 错误说明 +- FAQ补充 `出现ESIndexNotFoundException报错` 错误说明 + -## v3.0.0-beta +**Bug修复** +- 修复 Consumer 点击 Stop 未停止检索的问题 +- 修复创建/编辑角色权限报错问题 +- 修复多集群管理/单集群详情均衡卡片状态错误问题 +- 修复版本列表未排序问题 +- 修复Raft集群Controller信息不断记录问题 +- 修复部分版本消费组描述信息获取失败问题 +- 修复分区Offset获取失败的日志中,缺少Topic名称信息问题 +- 修复GitHub图地址错误,及图裂问题 +- 修复Broker默认使用的地址和注释不一致问题 +- 修复 Consumer 列表分页不生效问题 +- 修复操作记录表operation_methods字段缺少默认值问题 +- 修复集群均衡表中move_broker_list字段无效的问题 +- 修复KafkaUser、KafkaACL信息获取时,日志一直重复提示不支持问题 +- 修复指标缺失时,曲线出现掉底的问题 + + +**体验优化** +- 优化前端构建时间和打包体积,增加依赖打包的分包策略 +- 优化产品样式和文案展示 +- 优化ES客户端数为可配置 +- 优化日志中大量出现的MySQL Key冲突日志 + + +**能力提升** +- 增加周期任务,用于主动创建缺少的ES模版及索引的能力,减少额外的脚本操作 +- 增加JMX连接的Broker地址可选择的能力 + + +## v3.0.0-beta.0 **1、多集群管理** diff --git a/docs/assets/KnowStreamingLogo.png b/docs/assets/KnowStreamingLogo.png deleted file mode 100644 index f38dd42a..00000000 Binary files a/docs/assets/KnowStreamingLogo.png and /dev/null differ diff --git a/docs/assets/readme/KnowStreamingPageDemo.jpg b/docs/assets/readme/KnowStreamingPageDemo.jpg deleted file mode 100644 index a8d97df1..00000000 Binary files a/docs/assets/readme/KnowStreamingPageDemo.jpg and /dev/null differ diff --git a/docs/assets/readme/WeChat.png b/docs/assets/readme/WeChat.png deleted file mode 100644 index 262d0aae..00000000 Binary files a/docs/assets/readme/WeChat.png and /dev/null differ diff --git a/docs/assets/readme/ZSXQ.jpeg b/docs/assets/readme/ZSXQ.jpeg deleted file mode 100644 index 121bf9b1..00000000 Binary files a/docs/assets/readme/ZSXQ.jpeg and /dev/null differ diff --git a/docs/dev_guide/Task模块简介.md b/docs/dev_guide/Task模块简介.md new file mode 100644 index 00000000..688e033b --- /dev/null +++ b/docs/dev_guide/Task模块简介.md @@ -0,0 +1,264 @@ +# Task模块简介 + +## 1、Task简介 + +在 KnowStreaming 中(下面简称KS),Task模块主要是用于执行一些周期任务,包括Cluster、Broker、Topic等指标的定时采集,集群元数据定时更新至DB,集群状态的健康巡检等。在KS中,与Task模块相关的代码,我们都统一存放在km-task模块中。 + +Task模块是基于 LogiCommon 中的Logi-Job组件实现的任务周期执行,Logi-Job 的功能类似 XXX-Job,它是 XXX-Job 在 KnowStreaming 的内嵌实现,主要用于简化 KnowStreaming 的部署。 +Logi-Job 的任务总共有两种执行模式,分别是: + ++ 广播模式:同一KS集群下,同一任务周期中,所有KS主机都会执行该定时任务。 ++ 抢占模式:同一KS集群下,同一任务周期中,仅有某一台KS主机会执行该任务。 + +KS集群范围定义:连接同一个DB,且application.yml中的spring.logi-job.app-name的名称一样的KS主机为同一KS集群。 + +## 2、使用指南 + +Task模块基于Logi-Job的广播模式与抢占模式,分别实现了任务的抢占执行、重复执行以及均衡执行,他们之间的差别是: + ++ 抢占执行:同一个KS集群,同一个任务执行周期中,仅有一台KS主机执行该任务; ++ 重复执行:同一个KS集群,同一个任务执行周期中,所有KS主机都执行该任务。比如3台KS主机,3个Kafka集群,此时每台KS主机都会去采集这3个Kafka集群的指标; ++ 均衡执行:同一个KS集群,同一个任务执行周期中,每台KS主机仅执行该任务的一部分,所有的KS主机共同协作完成了任务。比如3台KS主机,3个Kafka集群,稳定运行情况下,每台KS主机将仅采集1个Kafka集群的指标,3台KS主机共同完成3个Kafka集群指标的采集。 + +下面我们看一下具体例子。 + +### 2.1、抢占模式——抢占执行 + +功能说明: + ++ 同一个KS集群,同一个任务执行周期中,仅有一台KS主机执行该任务。 + +代码例子: + +```java +// 1、实现Job接口,重写excute方法; +// 2、在类上添加@Task注解,并且配置好信息,指定为随机抢占模式; +// 效果:KS集群中,每5秒,会有一台KS主机输出 "测试定时任务运行中"; +@Task(name = "TestJob", + description = "测试定时任务", + cron = "*/5 * * * * ?", + autoRegister = true, + consensual = ConsensualEnum.RANDOM, // 这里一定要设置为RANDOM + timeout = 6 * 60) +public class TestJob implements Job { + + @Override + public TaskResult execute(JobContext jobContext) throws Exception { + + System.out.println("测试定时任务运行中"); + return new TaskResult(); + + } + +} +``` + + + +### 2.2、广播模式——重复执行 + +功能说明: + ++ 同一个KS集群,同一个任务执行周期中,所有KS主机都执行该任务。比如3台KS主机,3个Kafka集群,此时每台KS主机都会去重复采集这3个Kafka集群的指标。 + +代码例子: + +```java +// 1、实现Job接口,重写excute方法; +// 2、在类上添加@Task注解,并且配置好信息,指定为广播抢占模式; +// 效果:KS集群中,每5秒,每台KS主机都会输出 "测试定时任务运行中"; +@Task(name = "TestJob", + description = "测试定时任务", + cron = "*/5 * * * * ?", + autoRegister = true, + consensual = ConsensualEnum.BROADCAST, // 这里一定要设置为BROADCAST + timeout = 6 * 60) +public class TestJob implements Job { + + @Override + public TaskResult execute(JobContext jobContext) throws Exception { + + System.out.println("测试定时任务运行中"); + return new TaskResult(); + + } + +} +``` + + + +### 2.3、广播模式——均衡执行 + +功能说明: + ++ 同一个KS集群,同一个任务执行周期中,每台KS主机仅执行该任务的一部分,所有的KS主机共同协作完成了任务。比如3台KS主机,3个Kafka集群,稳定运行情况下,每台KS主机将仅采集1个Kafka集群的指标,3台KS主机共同完成3个Kafka集群指标的采集。 + +代码例子: + ++ 该模式有点特殊,是KS基于Logi-Job的广播模式,做的一个扩展,以下为一个使用例子: + +```java +// 1、继承AbstractClusterPhyDispatchTask,实现processSubTask方法; +// 2、在类上添加@Task注解,并且配置好信息,指定为广播模式; +// 效果:在本样例中,每隔1分钟ks会将所有的kafka集群列表在ks集群主机内均衡拆分,每台主机会将分发到自身的Kafka集群依次执行processSubTask方法,实现KS集群的任务协同处理。 +@Task(name = "kmJobTask", + description = "km job 模块调度执行任务", + cron = "0 0/1 * * * ? *", + autoRegister = true, + consensual = ConsensualEnum.BROADCAST, + timeout = 6 * 60) +public class KMJobTask extends AbstractClusterPhyDispatchTask { + + @Autowired + private JobService jobService; + + @Override + protected TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception { + jobService.scheduleJobByClusterId(clusterPhy.getId()); + return TaskResult.SUCCESS; + } +} +``` + + + +## 3、原理简介 + +### 3.1、Task注解说明 + +```java +public @interface Task { + String name() default ""; //任务名称 + String description() default ""; //任务描述 + String owner() default "system"; //拥有者 + String cron() default ""; //定时执行的时间策略 + int retryTimes() default 0; //失败以后所能重试的最大次数 + long timeout() default 0; //在超时时间里重试 + //是否自动注册任务到数据库中 + //如果设置为false,需要手动去数据库km_task表注册定时任务信息。数据库记录和@Task注解缺一不可 + boolean autoRegister() default false; + //执行模式:广播、随机抢占 + //广播模式:同一集群下的所有服务器都会执行该定时任务 + //随机抢占模式:同一集群下随机一台服务器执行该任务 + ConsensualEnum consensual() default ConsensualEnum.RANDOM; + } +``` + +### 3.2、数据库表介绍 + ++ logi_task:记录项目中的定时任务信息,一个定时任务对应一条记录。 ++ logi_job:具体任务执行信息。 ++ logi_job_log:定时任务的执行日志。 ++ logi_worker:记录机器信息,实现集群控制。 + +### 3.3、均衡执行简介 + +#### 3.3.1、类关系图 + +这里以KMJobTask为例,简单介绍KM中的定时任务实现逻辑。 + +​ ![img](http://img-ys011.didistatic.com/static/dc2img/do1_knC85EtQ8Vbn1BcBzcjz) + ++ Job:使用logi组件实现定时任务,必须实现该接口。 ++ Comparable & EntufyIdInterface:比较接口,实现任务的排序逻辑。 ++ AbstractDispatchTask:实现广播模式下,任务的均衡分发。 ++ AbstractClusterPhyDispatchTask:对分发到当前服务器的集群列表进行枚举。 ++ KMJobTask:实现对单个集群的定时任务处理。 + +#### 3.3.2、关键类代码 + ++ **AbstractDispatchTask类** + +```java +// 实现Job接口的抽象类,进行任务的负载均衡执行 +public abstract class AbstractDispatchTask implements Job { + + // 罗列所有的任务 + protected abstract List listAllTasks(); + + // 执行被分配给该KS主机的任务 + protected abstract TaskResult processTask(List subTaskList, long triggerTimeUnitMs); + + // 被Logi-Job触发执行该方法 + // 该方法进行任务的分配 + @Override + public TaskResult execute(JobContext jobContext) { + try { + + long triggerTimeUnitMs = System.currentTimeMillis(); + + // 获取所有的任务 + List allTaskList = this.listAllTasks(); + + // 计算当前KS机器需要执行的任务 + List subTaskList = this.selectTask(allTaskList, jobContext.getAllWorkerCodes(), jobContext.getCurrentWorkerCode()); + + // 进行任务处理 + return this.processTask(subTaskList, triggerTimeUnitMs); + } catch (Exception e) { + // ... + } + } +} +``` + ++ **AbstractClusterPhyDispatchTask类** + +```java +// 继承AbstractDispatchTask的抽象类,对Kafka集群进行负载均衡执行 +public abstract class AbstractClusterPhyDispatchTask extends AbstractDispatchTask { + + // 执行被分配的任务,具体由子类实现 + protected abstract TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception; + + // 返回所有的Kafka集群 + @Override + public List listAllTasks() { + return clusterPhyService.listAllClusters(); + } + + // 执行被分配给该KS主机的Kafka集群任务 + @Override + public TaskResult processTask(List subTaskList, long triggerTimeUnitMs) { // ... } + +} +``` + ++ **KMJobTask类** + +```java +// 加上@Task注解,并配置任务执行信息 +@Task(name = "kmJobTask", + description = "km job 模块调度执行任务", + cron = "0 0/1 * * * ? *", + autoRegister = true, + consensual = ConsensualEnum.BROADCAST, + timeout = 6 * 60) +// 继承AbstractClusterPhyDispatchTask类 +public class KMJobTask extends AbstractClusterPhyDispatchTask { + + @Autowired + private JobService jobService; + + // 执行该Kafka集群的Job模块的任务 + @Override + protected TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception { + jobService.scheduleJobByClusterId(clusterPhy.getId()); + return TaskResult.SUCCESS; + } +} +``` + +#### 3.3.3、均衡执行总结 + +均衡执行的实现原理总结起来就是以下几点: + ++ Logi-Job设置为广播模式,触发所有的KS主机执行任务; ++ 每台KS主机,被触发执行后,按照统一的规则,对任务列表,KS集群主机列表进行排序。然后按照顺序将任务列表均衡的分配给排序后的KS集群主机。KS集群稳定运行情况下,这一步保证了每台KS主机之间分配到的任务列表不重复,不丢失。 ++ 最后每台KS主机,执行被分配到的任务。 + +## 4、注意事项 + ++ 不能100%保证任务在一个周期内,且仅且执行一次,可能出现重复执行或丢失的情况,所以必须严格是且仅且执行一次的任务,不建议基于Logi-Job进行任务控制。 ++ 尽量让Logi-Job仅负责任务的触发,后续的执行建议放到自己创建的线程池中进行。 diff --git a/docs/dev_guide/assets/multi_version_compatible/registerHandler.png b/docs/dev_guide/assets/multi_version_compatible/registerHandler.png deleted file mode 100644 index f7b040dc..00000000 Binary files a/docs/dev_guide/assets/multi_version_compatible/registerHandler.png and /dev/null differ diff --git a/docs/dev_guide/assets/startup_using_source_code/IDEA配置.jpg b/docs/dev_guide/assets/startup_using_source_code/IDEA配置.jpg deleted file mode 100644 index 237aaa42..00000000 Binary files a/docs/dev_guide/assets/startup_using_source_code/IDEA配置.jpg and /dev/null differ diff --git a/docs/dev_guide/多版本兼容方案.md b/docs/dev_guide/多版本兼容方案.md index 389d0650..f41c01d4 100644 --- a/docs/dev_guide/多版本兼容方案.md +++ b/docs/dev_guide/多版本兼容方案.md @@ -36,7 +36,7 @@ KS-KM 根据其需要纳管的 kafka 版本,按照上述三个维度构建了   KS-KM 的每个版本针对需要纳管的 kafka 版本列表,事先分析各个版本的差异性和产品需求,同时 KS-KM 构建了一套专门处理兼容性的服务,来进行兼容性的注册、字典构建、处理器分发等操作,其中版本兼容性处理器是来具体处理不同 kafka 版本差异性的地方。 -​ ![registerHandler](./assets/multi_version_compatible/registerHandler.png) +​ ![registerHandler](http://img-ys011.didistatic.com/static/dc2img/do1_WxVTzndYE59ah5DFrMfn)   如上图所示,KS-KM 的 topic 服务在面对不同 kafka 版本时,其 topic 的创建、删除、扩容由于 kafka 版本自身的差异,导致 KnowStreaming 的处理也不一样,所以需要根据不同的 kafka 版本来实现不同的兼容性处理器,同时向 KnowStreaming 的兼容服务进行兼容性的注册,构建兼容性字典,后续在 KnowStreaming 的运行过程中,针对不同的 kafka 版本即可分发到不同的处理器中执行。 diff --git a/docs/dev_guide/本地源码启动手册.md b/docs/dev_guide/本地源码启动手册.md index ed21c3b8..35936a9a 100644 --- a/docs/dev_guide/本地源码启动手册.md +++ b/docs/dev_guide/本地源码启动手册.md @@ -29,7 +29,7 @@ - 初始化 MySQL 表及数据 - 初始化 Elasticsearch 索引 -具体见:[快速开始](./1-quick-start.md) 中的最后一步,部署 KnowStreaming 服务中的初始化相关工作。 +具体见:[单机部署手册](../install_guide/单机部署手册.md) 中的最后一步,部署 KnowStreaming 服务中的初始化相关工作。 ### 6.1.4、本地启动 @@ -73,7 +73,7 @@ km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/KnowStreaming.java IDEA 更多具体的配置如下图所示:

- +

**第四步:启动项目** diff --git a/docs/dev_guide/解决连接JMX失败.md b/docs/dev_guide/解决连接JMX失败.md index f66a5ab0..546400d6 100644 --- a/docs/dev_guide/解决连接JMX失败.md +++ b/docs/dev_guide/解决连接JMX失败.md @@ -1,5 +1,5 @@ -![Logo](../assets/KnowStreamingLogo.png) +![Logo](https://user-images.githubusercontent.com/71620349/185368586-aed82d30-1534-453d-86ff-ecfa9d0f35bd.png) ## JMX-连接失败问题解决 @@ -19,7 +19,7 @@ 未开启时,直接到`2、解决方法`查看如何开启即可。 -![check_jmx_opened](./assets/connect_jmx_failed/check_jmx_opened.jpg) +![check_jmx_opened](http://img-ys011.didistatic.com/static/dc2img/do1_dRX6UHE2IUSHqsN95DGb) **类型二:配置错误** diff --git a/docs/install_guide/源码编译打包手册.md b/docs/install_guide/源码编译打包手册.md index b0b20101..708396b6 100644 --- a/docs/install_guide/源码编译打包手册.md +++ b/docs/install_guide/源码编译打包手册.md @@ -1,5 +1,5 @@ -![Logo](../assets/KnowStreamingLogo.png) +![Logo](https://user-images.githubusercontent.com/71620349/185368586-aed82d30-1534-453d-86ff-ecfa9d0f35bd.png) # `Know Streaming` 源码编译打包手册 diff --git a/docs/install_guide/版本升级手册.md b/docs/install_guide/版本升级手册.md index 621d90bc..8c23b9be 100644 --- a/docs/install_guide/版本升级手册.md +++ b/docs/install_guide/版本升级手册.md @@ -1,6 +1,35 @@ ## 6.2、版本升级手册 -**`2.x`版本 升级至 `3.0.0`版本** +注意:如果想升级至具体版本,需要将你当前版本至你期望使用版本的变更统统执行一遍,然后才能正常使用。 + +### 6.2.0、升级至 `master` 版本 + +暂无 + +--- + +### 6.2.1、升级至 `v3.0.0-beta.1`版本 + + +**SQL变更** + +1、在`ks_km_broker`表增加了一个监听信息字段。 +2、为`logi_security_oplog`表operation_methods字段设置默认值''。 +因此需要执行下面的sql对数据库表进行更新。 + +```sql +ALTER TABLE `ks_km_broker` +ADD COLUMN `endpoint_map` VARCHAR(1024) NOT NULL DEFAULT '' COMMENT '监听信息' AFTER `update_time`; + +ALTER TABLE `logi_security_oplog` +ALTER COLUMN `operation_methods` set default ''; + +``` + +--- + + +### 6.2.2、`2.x`版本 升级至 `v3.0.0-beta.0`版本 **升级步骤:** diff --git a/docs/user_guide/faq.md b/docs/user_guide/faq.md index 3062cfa2..60620f73 100644 --- a/docs/user_guide/faq.md +++ b/docs/user_guide/faq.md @@ -109,3 +109,21 @@ SECURITY.TRICK_USERS 设置完成上面两步之后,就可以直接调用需要登录的接口了。 但是还有一点需要注意,绕过的用户仅能调用他有权限的接口,比如一个普通用户,那么他就只能调用普通的接口,不能去调用运维人员的接口。 + +## 8.8、Specified key was too long; max key length is 767 bytes + +**原因:**不同版本的InoDB引擎,参数‘innodb_large_prefix’默认值不同,即在5.6默认值为OFF,5.7默认值为ON。 + +对于引擎为InnoDB,innodb_large_prefix=OFF,且行格式为Antelope即支持REDUNDANT或COMPACT时,索引键前缀长度最大为 767 字节。innodb_large_prefix=ON,且行格式为Barracuda即支持DYNAMIC或COMPRESSED时,索引键前缀长度最大为3072字节。 + +**解决方案:** + +- 减少varchar字符大小低于767/4=191。 +- 将字符集改为latin1(一个字符=一个字节)。 +- 开启‘innodb_large_prefix’,修改默认行格式‘innodb_file_format’为Barracuda,并设置row_format=dynamic。 + +## 8.9、出现ESIndexNotFoundEXception报错 + +**原因 :**没有创建ES索引模版 + +**解决方案:**执行init_es_template.sh脚本,创建ES索引模版即可。 diff --git a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/impl/TopicStateManagerImpl.java b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/impl/TopicStateManagerImpl.java index 81ed009f..a0418bb2 100644 --- a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/impl/TopicStateManagerImpl.java +++ b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/impl/TopicStateManagerImpl.java @@ -129,7 +129,12 @@ public class TopicStateManagerImpl implements TopicStateManager { return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getClusterPhyNotExist(clusterPhyId)); } - // 获取分区offset + // 获取分区beginOffset + Result> beginOffsetsMapResult = partitionService.getPartitionOffsetFromKafka(clusterPhyId, topicName, dto.getFilterPartitionId(), OffsetSpec.earliest(), null); + if (beginOffsetsMapResult.failed()) { + return Result.buildFromIgnoreData(beginOffsetsMapResult); + } + // 获取分区endOffset Result> endOffsetsMapResult = partitionService.getPartitionOffsetFromKafka(clusterPhyId, topicName, dto.getFilterPartitionId(), OffsetSpec.latest(), null); if (endOffsetsMapResult.failed()) { return Result.buildFromIgnoreData(endOffsetsMapResult); @@ -142,13 +147,25 @@ public class TopicStateManagerImpl implements TopicStateManager { // 创建kafka-consumer kafkaConsumer = new KafkaConsumer<>(this.generateClientProperties(clusterPhy, dto.getMaxRecords())); - kafkaConsumer.assign(endOffsetsMapResult.getData().keySet()); - for (Map.Entry entry: endOffsetsMapResult.getData().entrySet()) { - kafkaConsumer.seek(entry.getKey(), Math.max(0, entry.getValue() - dto.getMaxRecords())); + List partitionList = new ArrayList<>(); + long maxMessage = 0; + for (Map.Entry entry : endOffsetsMapResult.getData().entrySet()) { + long begin = beginOffsetsMapResult.getData().get(entry.getKey()); + long end = entry.getValue(); + if (begin == end){ + continue; + } + maxMessage += end - begin; + partitionList.add(entry.getKey()); + } + maxMessage = Math.min(maxMessage, dto.getMaxRecords()); + kafkaConsumer.assign(partitionList); + for (TopicPartition partition : partitionList) { + kafkaConsumer.seek(partition, Math.max(beginOffsetsMapResult.getData().get(partition), endOffsetsMapResult.getData().get(partition) - dto.getMaxRecords())); } // 这里需要减去 KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS 是因为poll一次需要耗时,如果这里不减去,则可能会导致poll之后,超过要求的时间 - while (System.currentTimeMillis() - startTime + KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS <= dto.getPullTimeoutUnitMs() && voList.size() < dto.getMaxRecords()) { + while (System.currentTimeMillis() - startTime <= dto.getPullTimeoutUnitMs() && voList.size() < maxMessage) { ConsumerRecords consumerRecords = kafkaConsumer.poll(Duration.ofMillis(KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS)); for (ConsumerRecord consumerRecord : consumerRecords) { if (this.checkIfIgnore(consumerRecord, dto.getFilterKey(), dto.getFilterValue())) { diff --git a/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/metric/MetricESSender.java b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/metric/MetricESSender.java index 55944f6f..a94a377d 100644 --- a/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/metric/MetricESSender.java +++ b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/metric/MetricESSender.java @@ -5,7 +5,6 @@ import com.didiglobal.logi.log.LogFactory; import com.xiaojukeji.know.streaming.km.common.bean.event.metric.*; import com.xiaojukeji.know.streaming.km.common.bean.po.BaseESPO; import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.*; -import com.xiaojukeji.know.streaming.km.common.enums.metric.KafkaMetricIndexEnum; import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; import com.xiaojukeji.know.streaming.km.common.utils.EnvUtil; import com.xiaojukeji.know.streaming.km.common.utils.NamedThreadFactory; @@ -21,6 +20,8 @@ import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.*; + @Component public class MetricESSender implements ApplicationListener { protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER"); @@ -41,37 +42,37 @@ public class MetricESSender implements ApplicationListener { public void onApplicationEvent(BaseMetricEvent event) { if(event instanceof BrokerMetricEvent) { BrokerMetricEvent brokerMetricEvent = (BrokerMetricEvent)event; - send2es(KafkaMetricIndexEnum.BROKER_INFO, + send2es(BROKER_INDEX, ConvertUtil.list2List(brokerMetricEvent.getBrokerMetrics(), BrokerMetricPO.class) ); } else if(event instanceof ClusterMetricEvent) { ClusterMetricEvent clusterMetricEvent = (ClusterMetricEvent)event; - send2es(KafkaMetricIndexEnum.CLUSTER_INFO, + send2es(CLUSTER_INDEX, ConvertUtil.list2List(clusterMetricEvent.getClusterMetrics(), ClusterMetricPO.class) ); } else if(event instanceof TopicMetricEvent) { TopicMetricEvent topicMetricEvent = (TopicMetricEvent)event; - send2es(KafkaMetricIndexEnum.TOPIC_INFO, + send2es(TOPIC_INDEX, ConvertUtil.list2List(topicMetricEvent.getTopicMetrics(), TopicMetricPO.class) ); } else if(event instanceof PartitionMetricEvent) { PartitionMetricEvent partitionMetricEvent = (PartitionMetricEvent)event; - send2es(KafkaMetricIndexEnum.PARTITION_INFO, + send2es(PARTITION_INDEX, ConvertUtil.list2List(partitionMetricEvent.getPartitionMetrics(), PartitionMetricPO.class) ); } else if(event instanceof GroupMetricEvent) { GroupMetricEvent groupMetricEvent = (GroupMetricEvent)event; - send2es(KafkaMetricIndexEnum.GROUP_INFO, + send2es(GROUP_INDEX, ConvertUtil.list2List(groupMetricEvent.getGroupMetrics(), GroupMetricPO.class) ); } else if(event instanceof ReplicaMetricEvent) { ReplicaMetricEvent replicaMetricEvent = (ReplicaMetricEvent)event; - send2es(KafkaMetricIndexEnum.REPLICATION_INFO, + send2es(REPLICATION_INDEX, ConvertUtil.list2List(replicaMetricEvent.getReplicationMetrics(), ReplicationMetricPO.class) ); } @@ -80,19 +81,19 @@ public class MetricESSender implements ApplicationListener { /** * 根据不同监控维度来发送 */ - private boolean send2es(KafkaMetricIndexEnum stats, List statsList){ + private boolean send2es(String index, List statsList){ if (CollectionUtils.isEmpty(statsList)) { return true; } if (!EnvUtil.isOnline()) { LOGGER.info("class=MetricESSender||method=send2es||ariusStats={}||size={}", - stats.getIndex(), statsList.size()); + index, statsList.size()); } - BaseMetricESDAO baseMetricESDao = BaseMetricESDAO.getByStatsType(stats); + BaseMetricESDAO baseMetricESDao = BaseMetricESDAO.getByStatsType(index); if (Objects.isNull( baseMetricESDao )) { - LOGGER.error("class=MetricESSender||method=send2es||errMsg=fail to find {}", stats.getIndex()); + LOGGER.error("class=MetricESSender||method=send2es||errMsg=fail to find {}", index); return false; } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/broker/Broker.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/broker/Broker.java index 916820d8..d7e3b792 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/broker/Broker.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/broker/Broker.java @@ -1,5 +1,10 @@ package com.xiaojukeji.know.streaming.km.common.bean.entity.broker; + +import com.alibaba.fastjson.TypeReference; +import com.xiaojukeji.know.streaming.km.common.bean.entity.common.IpPortData; +import com.xiaojukeji.know.streaming.km.common.bean.po.broker.BrokerPO; +import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; import com.xiaojukeji.know.streaming.km.common.zookeeper.znode.brokers.BrokerMetadata; import lombok.AllArgsConstructor; import lombok.Data; @@ -7,6 +12,7 @@ import lombok.NoArgsConstructor; import org.apache.kafka.common.Node; import java.io.Serializable; +import java.util.Map; /** * @author didi @@ -55,6 +61,11 @@ public class Broker implements Serializable { */ private Integer status; + /** + * 监听信息 + */ + private Map endpointMap; + public static Broker buildFrom(Long clusterPhyId, Node node, Long startTimestamp) { Broker metadata = new Broker(); metadata.setClusterPhyId(clusterPhyId); @@ -78,9 +89,31 @@ public class Broker implements Serializable { metadata.setStartTimestamp(brokerMetadata.getTimestamp()); metadata.setRack(brokerMetadata.getRack()); metadata.setStatus(1); + metadata.setEndpointMap(brokerMetadata.getEndpointMap()); return metadata; } + public static Broker buildFrom(BrokerPO brokerPO) { + Broker broker = ConvertUtil.obj2Obj(brokerPO, Broker.class); + String endpointMapStr = brokerPO.getEndpointMap(); + if (broker == null || endpointMapStr == null || endpointMapStr.equals("")) { + return broker; + } + + // 填充endpoint信息 + Map endpointMap = ConvertUtil.str2ObjByJson(endpointMapStr, new TypeReference>(){}); + broker.setEndpointMap(endpointMap); + return broker; + } + + public String getJmxHost(String endPoint) { + if (endPoint == null || endpointMap == null) { + return host; + } + IpPortData ip = endpointMap.get(endPoint); + return ip == null ? ip.getIp() : host; + } + public boolean alive() { return status != null && status > 0; } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/config/JmxConfig.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/config/JmxConfig.java index 5c78183c..87607c1f 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/config/JmxConfig.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/config/JmxConfig.java @@ -27,6 +27,9 @@ public class JmxConfig implements Serializable { @ApiModelProperty(value="SSL情况下的token", example = "KsKmCCY19") private String token; + + @ApiModelProperty(value="使用哪个endpoint网络", example = "EXTERNAL") + private String useWhichEndpoint; } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/partition/BatchPartitionParam.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/partition/BatchPartitionParam.java new file mode 100644 index 00000000..d316112f --- /dev/null +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/partition/BatchPartitionParam.java @@ -0,0 +1,19 @@ +package com.xiaojukeji.know.streaming.km.common.bean.entity.param.partition; + +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam; +import lombok.Data; +import lombok.NoArgsConstructor; +import org.apache.kafka.common.TopicPartition; + +import java.util.List; + +@Data +@NoArgsConstructor +public class BatchPartitionParam extends ClusterPhyParam { + private List tpList; + + public BatchPartitionParam(Long clusterPhyId, List tpList) { + super(clusterPhyId); + this.tpList = tpList; + } +} diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/partition/PartitionOffsetParam.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/partition/PartitionOffsetParam.java index f342d64c..02907a6c 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/partition/PartitionOffsetParam.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/partition/PartitionOffsetParam.java @@ -1,6 +1,6 @@ package com.xiaojukeji.know.streaming.km.common.bean.entity.param.partition; -import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicParam; import lombok.Data; import lombok.NoArgsConstructor; import org.apache.kafka.clients.admin.OffsetSpec; @@ -10,13 +10,13 @@ import java.util.Map; @Data @NoArgsConstructor -public class PartitionOffsetParam extends ClusterPhyParam { +public class PartitionOffsetParam extends TopicParam { private Map topicPartitionOffsets; private Long timestamp; - public PartitionOffsetParam(Long clusterPhyId, Map topicPartitionOffsets, Long timestamp) { - super(clusterPhyId); + public PartitionOffsetParam(Long clusterPhyId, String topicName, Map topicPartitionOffsets, Long timestamp) { + super(clusterPhyId, topicName); this.topicPartitionOffsets = topicPartitionOffsets; this.timestamp = timestamp; } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/topic/TopicParam.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/topic/TopicParam.java index d6a6b516..5c66a521 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/topic/TopicParam.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/topic/TopicParam.java @@ -15,4 +15,12 @@ public class TopicParam extends ClusterPhyParam { super(clusterPhyId); this.topicName = topicName; } + + @Override + public String toString() { + return "TopicParam{" + + "clusterPhyId=" + clusterPhyId + + ", topicName='" + topicName + '\'' + + '}'; + } } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/broker/BrokerPO.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/broker/BrokerPO.java index 2f50480d..16f98d88 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/broker/BrokerPO.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/broker/BrokerPO.java @@ -42,4 +42,9 @@ public class BrokerPO extends BasePO { * Broker状态 */ private Integer status; + + /** + * 监听信息 + */ + private String endpointMap; } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/vo/metrics/point/MetricPointVO.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/vo/metrics/point/MetricPointVO.java index 1dc894f7..c647b222 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/vo/metrics/point/MetricPointVO.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/vo/metrics/point/MetricPointVO.java @@ -29,6 +29,10 @@ public class MetricPointVO implements Comparable { @Override public int compareTo(MetricPointVO o) { if(null == o){return 0;} + if(null == this.getTimeStamp() + || null == o.getTimeStamp()){ + return 0; + } return this.getTimeStamp().intValue() - o.getTimeStamp().intValue(); } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/Constant.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/Constant.java index d7c1c960..fae5db21 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/Constant.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/Constant.java @@ -63,4 +63,5 @@ public class Constant { public static final String COLLECT_METRICS_COST_TIME_METRICS_NAME = "CollectMetricsCostTimeUnitSec"; public static final Float COLLECT_METRICS_ERROR_COST_TIME = -1.0F; + public static final Integer DEFAULT_RETRY_TIME = 3; } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/ESIndexConstant.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/ESIndexConstant.java new file mode 100644 index 00000000..0de516f7 --- /dev/null +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/ESIndexConstant.java @@ -0,0 +1,647 @@ +package com.xiaojukeji.know.streaming.km.common.constant; + +public class ESIndexConstant { + + public final static String TOPIC_INDEX = "ks_kafka_topic_metric"; + public final static String TOPIC_TEMPLATE = "{\n" + + " \"order\" : 10,\n" + + " \"index_patterns\" : [\n" + + " \"ks_kafka_topic_metric*\"\n" + + " ],\n" + + " \"settings\" : {\n" + + " \"index\" : {\n" + + " \"number_of_shards\" : \"10\"\n" + + " }\n" + + " },\n" + + " \"mappings\" : {\n" + + " \"properties\" : {\n" + + " \"brokerId\" : {\n" + + " \"type\" : \"long\"\n" + + " },\n" + + " \"routingValue\" : {\n" + + " \"type\" : \"text\",\n" + + " \"fields\" : {\n" + + " \"keyword\" : {\n" + + " \"ignore_above\" : 256,\n" + + " \"type\" : \"keyword\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"topic\" : {\n" + + " \"type\" : \"keyword\"\n" + + " },\n" + + " \"clusterPhyId\" : {\n" + + " \"type\" : \"long\"\n" + + " },\n" + + " \"metrics\" : {\n" + + " \"properties\" : {\n" + + " \"BytesIn_min_15\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"Messages\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"BytesRejected\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"PartitionURP\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"HealthCheckTotal\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"ReplicationCount\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"ReplicationBytesOut\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"ReplicationBytesIn\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"FailedFetchRequests\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"BytesIn_min_5\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"HealthScore\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"LogSize\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"BytesOut\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"BytesOut_min_15\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"FailedProduceRequests\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"BytesIn\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"BytesOut_min_5\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"MessagesIn\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"TotalProduceRequests\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"HealthCheckPassed\" : {\n" + + " \"type\" : \"float\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"brokerAgg\" : {\n" + + " \"type\" : \"keyword\"\n" + + " },\n" + + " \"key\" : {\n" + + " \"type\" : \"text\",\n" + + " \"fields\" : {\n" + + " \"keyword\" : {\n" + + " \"ignore_above\" : 256,\n" + + " \"type\" : \"keyword\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"timestamp\" : {\n" + + " \"format\" : \"yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis\",\n" + + " \"index\" : true,\n" + + " \"type\" : \"date\",\n" + + " \"doc_values\" : true\n" + + " }\n" + + " }\n" + + " },\n" + + " \"aliases\" : { }\n" + + " }"; + + public final static String CLUSTER_INDEX = "ks_kafka_cluster_metric"; + public final static String CLUSTER_TEMPLATE = "{\n" + + " \"order\" : 10,\n" + + " \"index_patterns\" : [\n" + + " \"ks_kafka_cluster_metric*\"\n" + + " ],\n" + + " \"settings\" : {\n" + + " \"index\" : {\n" + + " \"number_of_shards\" : \"10\"\n" + + " }\n" + + " },\n" + + " \"mappings\" : {\n" + + " \"properties\" : {\n" + + " \"routingValue\" : {\n" + + " \"type\" : \"text\",\n" + + " \"fields\" : {\n" + + " \"keyword\" : {\n" + + " \"ignore_above\" : 256,\n" + + " \"type\" : \"keyword\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"clusterPhyId\" : {\n" + + " \"type\" : \"long\"\n" + + " },\n" + + " \"metrics\" : {\n" + + " \"properties\" : {\n" + + " \"Connections\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"BytesIn_min_15\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"PartitionURP\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"HealthScore_Topics\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"EventQueueSize\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"ActiveControllerCount\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"GroupDeads\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"BytesIn_min_5\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"HealthCheckTotal_Topics\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"Partitions\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"BytesOut\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"Groups\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"BytesOut_min_15\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"TotalRequestQueueSize\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"HealthCheckPassed_Groups\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"TotalProduceRequests\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"HealthCheckPassed\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"TotalLogSize\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"GroupEmptys\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"PartitionNoLeader\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"HealthScore_Brokers\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"Messages\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"Topics\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"PartitionMinISR_E\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"HealthCheckTotal\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"Brokers\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"Replicas\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"HealthCheckTotal_Groups\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"GroupRebalances\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"MessageIn\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"HealthScore\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"HealthCheckPassed_Topics\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"HealthCheckTotal_Brokers\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"PartitionMinISR_S\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"BytesIn\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"BytesOut_min_5\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"GroupActives\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"MessagesIn\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"GroupReBalances\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"HealthCheckPassed_Brokers\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"HealthScore_Groups\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"TotalResponseQueueSize\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"Zookeepers\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"LeaderMessages\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"HealthScore_Cluster\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"HealthCheckPassed_Cluster\" : {\n" + + " \"type\" : \"double\"\n" + + " },\n" + + " \"HealthCheckTotal_Cluster\" : {\n" + + " \"type\" : \"double\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"key\" : {\n" + + " \"type\" : \"text\",\n" + + " \"fields\" : {\n" + + " \"keyword\" : {\n" + + " \"ignore_above\" : 256,\n" + + " \"type\" : \"keyword\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"timestamp\" : {\n" + + " \"format\" : \"yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis\",\n" + + " \"type\" : \"date\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"aliases\" : { }\n" + + " }"; + + public final static String BROKER_INDEX = "ks_kafka_broker_metric"; + public final static String BROKER_TEMPLATE = "{\n" + + " \"order\" : 10,\n" + + " \"index_patterns\" : [\n" + + " \"ks_kafka_broker_metric*\"\n" + + " ],\n" + + " \"settings\" : {\n" + + " \"index\" : {\n" + + " \"number_of_shards\" : \"10\"\n" + + " }\n" + + " },\n" + + " \"mappings\" : {\n" + + " \"properties\" : {\n" + + " \"brokerId\" : {\n" + + " \"type\" : \"long\"\n" + + " },\n" + + " \"routingValue\" : {\n" + + " \"type\" : \"text\",\n" + + " \"fields\" : {\n" + + " \"keyword\" : {\n" + + " \"ignore_above\" : 256,\n" + + " \"type\" : \"keyword\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"clusterPhyId\" : {\n" + + " \"type\" : \"long\"\n" + + " },\n" + + " \"metrics\" : {\n" + + " \"properties\" : {\n" + + " \"NetworkProcessorAvgIdle\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"UnderReplicatedPartitions\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"BytesIn_min_15\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"HealthCheckTotal\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"RequestHandlerAvgIdle\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"connectionsCount\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"BytesIn_min_5\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"HealthScore\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"BytesOut\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"BytesOut_min_15\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"BytesIn\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"BytesOut_min_5\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"TotalRequestQueueSize\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"MessagesIn\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"TotalProduceRequests\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"HealthCheckPassed\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"TotalResponseQueueSize\" : {\n" + + " \"type\" : \"float\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"key\" : {\n" + + " \"type\" : \"text\",\n" + + " \"fields\" : {\n" + + " \"keyword\" : {\n" + + " \"ignore_above\" : 256,\n" + + " \"type\" : \"keyword\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"timestamp\" : {\n" + + " \"format\" : \"yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis\",\n" + + " \"index\" : true,\n" + + " \"type\" : \"date\",\n" + + " \"doc_values\" : true\n" + + " }\n" + + " }\n" + + " },\n" + + " \"aliases\" : { }\n" + + " }"; + + public final static String PARTITION_INDEX = "ks_kafka_partition_metric"; + public final static String PARTITION_TEMPLATE = "{\n" + + " \"order\" : 10,\n" + + " \"index_patterns\" : [\n" + + " \"ks_kafka_partition_metric*\"\n" + + " ],\n" + + " \"settings\" : {\n" + + " \"index\" : {\n" + + " \"number_of_shards\" : \"10\"\n" + + " }\n" + + " },\n" + + " \"mappings\" : {\n" + + " \"properties\" : {\n" + + " \"brokerId\" : {\n" + + " \"type\" : \"long\"\n" + + " },\n" + + " \"partitionId\" : {\n" + + " \"type\" : \"long\"\n" + + " },\n" + + " \"routingValue\" : {\n" + + " \"type\" : \"text\",\n" + + " \"fields\" : {\n" + + " \"keyword\" : {\n" + + " \"ignore_above\" : 256,\n" + + " \"type\" : \"keyword\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"clusterPhyId\" : {\n" + + " \"type\" : \"long\"\n" + + " },\n" + + " \"topic\" : {\n" + + " \"type\" : \"keyword\"\n" + + " },\n" + + " \"metrics\" : {\n" + + " \"properties\" : {\n" + + " \"LogStartOffset\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"Messages\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"LogEndOffset\" : {\n" + + " \"type\" : \"float\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"key\" : {\n" + + " \"type\" : \"text\",\n" + + " \"fields\" : {\n" + + " \"keyword\" : {\n" + + " \"ignore_above\" : 256,\n" + + " \"type\" : \"keyword\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"timestamp\" : {\n" + + " \"format\" : \"yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis\",\n" + + " \"index\" : true,\n" + + " \"type\" : \"date\",\n" + + " \"doc_values\" : true\n" + + " }\n" + + " }\n" + + " },\n" + + " \"aliases\" : { }\n" + + " }"; + + public final static String GROUP_INDEX = "ks_kafka_group_metric"; + public final static String GROUP_TEMPLATE = "{\n" + + " \"order\" : 10,\n" + + " \"index_patterns\" : [\n" + + " \"ks_kafka_group_metric*\"\n" + + " ],\n" + + " \"settings\" : {\n" + + " \"index\" : {\n" + + " \"number_of_shards\" : \"10\"\n" + + " }\n" + + " },\n" + + " \"mappings\" : {\n" + + " \"properties\" : {\n" + + " \"group\" : {\n" + + " \"type\" : \"keyword\"\n" + + " },\n" + + " \"partitionId\" : {\n" + + " \"type\" : \"long\"\n" + + " },\n" + + " \"routingValue\" : {\n" + + " \"type\" : \"text\",\n" + + " \"fields\" : {\n" + + " \"keyword\" : {\n" + + " \"ignore_above\" : 256,\n" + + " \"type\" : \"keyword\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"clusterPhyId\" : {\n" + + " \"type\" : \"long\"\n" + + " },\n" + + " \"topic\" : {\n" + + " \"type\" : \"keyword\"\n" + + " },\n" + + " \"metrics\" : {\n" + + " \"properties\" : {\n" + + " \"HealthScore\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"Lag\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"OffsetConsumed\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"HealthCheckTotal\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"HealthCheckPassed\" : {\n" + + " \"type\" : \"float\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"groupMetric\" : {\n" + + " \"type\" : \"keyword\"\n" + + " },\n" + + " \"key\" : {\n" + + " \"type\" : \"text\",\n" + + " \"fields\" : {\n" + + " \"keyword\" : {\n" + + " \"ignore_above\" : 256,\n" + + " \"type\" : \"keyword\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"timestamp\" : {\n" + + " \"format\" : \"yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis\",\n" + + " \"index\" : true,\n" + + " \"type\" : \"date\",\n" + + " \"doc_values\" : true\n" + + " }\n" + + " }\n" + + " },\n" + + " \"aliases\" : { }\n" + + " }"; + + public final static String REPLICATION_INDEX = "ks_kafka_replication_metric"; + public final static String REPLICATION_TEMPLATE = "{\n" + + " \"order\" : 10,\n" + + " \"index_patterns\" : [\n" + + " \"ks_kafka_partition_metric*\"\n" + + " ],\n" + + " \"settings\" : {\n" + + " \"index\" : {\n" + + " \"number_of_shards\" : \"10\"\n" + + " }\n" + + " },\n" + + " \"mappings\" : {\n" + + " \"properties\" : {\n" + + " \"brokerId\" : {\n" + + " \"type\" : \"long\"\n" + + " },\n" + + " \"partitionId\" : {\n" + + " \"type\" : \"long\"\n" + + " },\n" + + " \"routingValue\" : {\n" + + " \"type\" : \"text\",\n" + + " \"fields\" : {\n" + + " \"keyword\" : {\n" + + " \"ignore_above\" : 256,\n" + + " \"type\" : \"keyword\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"clusterPhyId\" : {\n" + + " \"type\" : \"long\"\n" + + " },\n" + + " \"topic\" : {\n" + + " \"type\" : \"keyword\"\n" + + " },\n" + + " \"metrics\" : {\n" + + " \"properties\" : {\n" + + " \"LogStartOffset\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"Messages\" : {\n" + + " \"type\" : \"float\"\n" + + " },\n" + + " \"LogEndOffset\" : {\n" + + " \"type\" : \"float\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"key\" : {\n" + + " \"type\" : \"text\",\n" + + " \"fields\" : {\n" + + " \"keyword\" : {\n" + + " \"ignore_above\" : 256,\n" + + " \"type\" : \"keyword\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"timestamp\" : {\n" + + " \"format\" : \"yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis\",\n" + + " \"index\" : true,\n" + + " \"type\" : \"date\",\n" + + " \"doc_values\" : true\n" + + " }\n" + + " }\n" + + " },\n" + + " \"aliases\" : { }\n" + + " }[root@10-255-0-23 template]# cat ks_kafka_replication_metric\n" + + "PUT _template/ks_kafka_replication_metric\n" + + "{\n" + + " \"order\" : 10,\n" + + " \"index_patterns\" : [\n" + + " \"ks_kafka_replication_metric*\"\n" + + " ],\n" + + " \"settings\" : {\n" + + " \"index\" : {\n" + + " \"number_of_shards\" : \"10\"\n" + + " }\n" + + " },\n" + + " \"mappings\" : {\n" + + " \"properties\" : {\n" + + " \"timestamp\" : {\n" + + " \"format\" : \"yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis\",\n" + + " \"index\" : true,\n" + + " \"type\" : \"date\",\n" + + " \"doc_values\" : true\n" + + " }\n" + + " }\n" + + " },\n" + + " \"aliases\" : { }\n" + + " }"; + +} diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/KafkaConstant.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/KafkaConstant.java index c6205152..3b768e01 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/KafkaConstant.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/KafkaConstant.java @@ -33,7 +33,7 @@ public class KafkaConstant { public static final Integer DATA_VERSION_ONE = 1; - public static final Integer ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS = 3000; + public static final Integer ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS = 5000; public static final Integer KAFKA_SASL_SCRAM_ITERATIONS = 8192; diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/health/HealthCheckNameEnum.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/health/HealthCheckNameEnum.java index d64a42ae..724f5ed3 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/health/HealthCheckNameEnum.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/health/HealthCheckNameEnum.java @@ -26,7 +26,7 @@ public enum HealthCheckNameEnum { HealthCheckDimensionEnum.CLUSTER, "Controller", Constant.HC_CONFIG_NAME_PREFIX + "CLUSTER_NO_CONTROLLER", - "集群Controller数错误", + "集群Controller数正常", HealthCompareValueConfig.class ), @@ -34,7 +34,7 @@ public enum HealthCheckNameEnum { HealthCheckDimensionEnum.BROKER, "RequestQueueSize", Constant.HC_CONFIG_NAME_PREFIX + "BROKER_REQUEST_QUEUE_FULL", - "Broker-RequestQueueSize被打满", + "Broker-RequestQueueSize指标", HealthCompareValueConfig.class ), @@ -42,7 +42,7 @@ public enum HealthCheckNameEnum { HealthCheckDimensionEnum.BROKER, "NetworkProcessorAvgIdlePercent", Constant.HC_CONFIG_NAME_PREFIX + "BROKER_NETWORK_PROCESSOR_AVG_IDLE_TOO_LOW", - "Broker-NetworkProcessorAvgIdlePercent的Idle过低", + "Broker-NetworkProcessorAvgIdlePercent指标", HealthCompareValueConfig.class ), @@ -50,7 +50,7 @@ public enum HealthCheckNameEnum { HealthCheckDimensionEnum.GROUP, "Group Re-Balance", Constant.HC_CONFIG_NAME_PREFIX + "GROUP_RE_BALANCE_TOO_FREQUENTLY", - "Group re-balance太频繁", + "Group re-balance频率", HealthDetectedInLatestMinutesConfig.class ), @@ -66,7 +66,7 @@ public enum HealthCheckNameEnum { HealthCheckDimensionEnum.TOPIC, "UnderReplicaTooLong", Constant.HC_CONFIG_NAME_PREFIX + "TOPIC_UNDER_REPLICA_TOO_LONG", - "Topic 长期处于未同步状态", + "Topic 未同步持续时间", HealthDetectedInLatestMinutesConfig.class ), diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/metric/KafkaMetricIndexEnum.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/metric/KafkaMetricIndexEnum.java deleted file mode 100644 index 25535864..00000000 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/metric/KafkaMetricIndexEnum.java +++ /dev/null @@ -1,54 +0,0 @@ -package com.xiaojukeji.know.streaming.km.common.enums.metric; - -/** - * @author: D10865 - * @description: - * @date: Create on 2019/3/11 下午2:19 - * @modified By D10865 - * - * 不同维度的es监控数据 - */ -public enum KafkaMetricIndexEnum { - - /** - * topic 维度 - */ - TOPIC_INFO("ks_kafka_topic_metric"), - - /** - * 集群 维度 - */ - CLUSTER_INFO("ks_kafka_cluster_metric"), - - /** - * broker 维度 - */ - BROKER_INFO("ks_kafka_broker_metric"), - - /** - * partition 维度 - */ - PARTITION_INFO("ks_kafka_partition_metric"), - - /** - * group 维度 - */ - GROUP_INFO("ks_kafka_group_metric"), - - /** - * replication 维度 - */ - REPLICATION_INFO("ks_kafka_replication_metric"), - - ; - - private String index; - - KafkaMetricIndexEnum(String index) { - this.index = index; - } - - public String getIndex() { - return index; - } -} diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/version/VersionItemTypeEnum.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/version/VersionItemTypeEnum.java index 270999b4..15f13175 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/version/VersionItemTypeEnum.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/version/VersionItemTypeEnum.java @@ -31,9 +31,11 @@ public enum VersionItemTypeEnum { SERVICE_OP_PARTITION(320, "service_partition_operation"), + SERVICE_OP_PARTITION_LEADER(321, "service_partition-leader_operation"), SERVICE_OP_REASSIGNMENT(330, "service_reassign_operation"), + /** * 前端操作 */ diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/brokers/BrokerMetadata.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/brokers/BrokerMetadata.java index 1f598091..480867af 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/brokers/BrokerMetadata.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/brokers/BrokerMetadata.java @@ -113,7 +113,7 @@ public class BrokerMetadata implements Serializable { brokerMetadata.getEndpointMap().put(endpoint.substring(0, idx1), new IpPortData(brokerHost, brokerPort)); - if (KafkaConstant.EXTERNAL_KEY.equals(endpoint.substring(0, idx1))) { + if (KafkaConstant.INTERNAL_KEY.equals(endpoint.substring(0, idx1))) { // 优先使用internal的地址进行展示 brokerMetadata.setHost(brokerHost); brokerMetadata.setPort(ConvertUtil.string2Integer(brokerPort)); diff --git a/km-console/packages/config-manager-fe/src/app.tsx b/km-console/packages/config-manager-fe/src/app.tsx index aad916e0..2f29654d 100644 --- a/km-console/packages/config-manager-fe/src/app.tsx +++ b/km-console/packages/config-manager-fe/src/app.tsx @@ -2,8 +2,8 @@ import React from 'react'; import { BrowserRouter as Router, Redirect, Switch } from 'react-router-dom'; import _ from 'lodash'; import './constants/axiosConfig'; -import dantdZhCN from 'knowdesign/lib/locale/zh_CN'; -import dantdEnUS from 'knowdesign/lib/locale/en_US'; +import dantdZhCN from 'knowdesign/es/locale/zh_CN'; +import dantdEnUS from 'knowdesign/es/locale/en_US'; import intlZhCN from './locales/zh'; import intlEnUS from './locales/en'; import { AppContainer, RouteGuard, DProLayout } from 'knowdesign'; diff --git a/km-console/packages/config-manager-fe/src/components/TagsWithHide/index.tsx b/km-console/packages/config-manager-fe/src/components/TagsWithHide/index.tsx index 2b676698..dcf80ee4 100644 --- a/km-console/packages/config-manager-fe/src/components/TagsWithHide/index.tsx +++ b/km-console/packages/config-manager-fe/src/components/TagsWithHide/index.tsx @@ -1,6 +1,6 @@ import { DownOutlined } from '@ant-design/icons'; import { Popover } from 'knowdesign'; -import { TooltipPlacement } from 'knowdesign/lib/basic/tooltip'; +import { TooltipPlacement } from 'knowdesign/es/basic/tooltip'; import React, { useState, useRef, useEffect } from 'react'; import './index.less'; @@ -90,8 +90,9 @@ export default (props: PropsType) => { return (
= curState.endI ? 'hide' : 'show') : '' - }`} + className={`container-item ${ + curState.calculated ? (curState.isHideExpandNode ? 'show' : i >= curState.endI ? 'hide' : 'show') : '' + }`} > {item}
diff --git a/km-console/packages/config-manager-fe/src/pages/UserManage/RoleTabContent.tsx b/km-console/packages/config-manager-fe/src/pages/UserManage/RoleTabContent.tsx index c72fbb1b..67dd0634 100644 --- a/km-console/packages/config-manager-fe/src/pages/UserManage/RoleTabContent.tsx +++ b/km-console/packages/config-manager-fe/src/pages/UserManage/RoleTabContent.tsx @@ -77,6 +77,7 @@ const RoleDetailAndUpdate = forwardRef((props, ref): JSX.Element => { const onSubmit = () => { form.validateFields().then((formData) => { + formData.permissionIdList = formData.permissionIdList.filter((l) => l); formData.permissionIdList.forEach((arr, i) => { // 如果分配的系统下的子权限,自动赋予该系统的权限 if (arr !== null && arr.length) { @@ -212,7 +213,7 @@ const RoleDetailAndUpdate = forwardRef((props, ref): JSX.Element => { rules={[ () => ({ validator(_, value) { - if (Array.isArray(value) && value.some((item) => !!item.length)) { + if (Array.isArray(value) && value.some((item) => !!item?.length)) { return Promise.resolve(); } return Promise.reject(new Error('请为角色至少分配一项权限')); diff --git a/km-console/packages/layout-clusters-fe/config/CoverHtmlWebpackPlugin.js b/km-console/packages/layout-clusters-fe/config/CoverHtmlWebpackPlugin.js index be47ab5b..dfb6754e 100755 --- a/km-console/packages/layout-clusters-fe/config/CoverHtmlWebpackPlugin.js +++ b/km-console/packages/layout-clusters-fe/config/CoverHtmlWebpackPlugin.js @@ -86,12 +86,12 @@ class CoverHtmlWebpackPlugin { assetJson.reverse().forEach((item) => { if (/\.js$/.test(item)) { - // if (item.includes('vendor~')) { - // vendors += ``; - // } else { - // TODO: entry 只有一个 - portalMap['@portal/layout'] = item; - // } + if (item.includes('vendor~')) { + vendors += ``; + } else { + // TODO: entry 只有一个 + portalMap['@portal/layout'] = item; + } } else if (/\.css$/.test(item)) { links += ``; } diff --git a/km-console/packages/layout-clusters-fe/config/d1-webpack.base.js b/km-console/packages/layout-clusters-fe/config/d1-webpack.base.js index d810e58a..9e21c70d 100644 --- a/km-console/packages/layout-clusters-fe/config/d1-webpack.base.js +++ b/km-console/packages/layout-clusters-fe/config/d1-webpack.base.js @@ -9,7 +9,6 @@ const TerserJSPlugin = require('terser-webpack-plugin'); const OptimizeCSSAssetsPlugin = require('optimize-css-assets-webpack-plugin'); const theme = require('./theme'); const ReactRefreshWebpackPlugin = require('@pmmmwh/react-refresh-webpack-plugin'); -const HardSourceWebpackPlugin = require('hard-source-webpack-plugin'); const isProd = process.env.NODE_ENV === 'production'; const babelOptions = { @@ -43,7 +42,6 @@ const babelOptions = { module.exports = () => { const cssFileName = isProd ? '[name]-[chunkhash].css' : '[name].css'; const plugins = [ - // !isProd && new HardSourceWebpackPlugin(), new CoverHtmlWebpackPlugin(), new ProgressBarPlugin(), new CaseSensitivePathsPlugin(), @@ -150,23 +148,21 @@ module.exports = () => { ], }, optimization: Object.assign( - // { - // splitChunks: { - // cacheGroups: { - // vendor: { - // test: /[\\/]node_modules[\\/]/, - // chunks: 'all', - // name: 'vendor', - // priority: 10, - // enforce: true, - // minChunks: 1, - // maxSize: 3500000, - // }, - // }, - // }, - // }, isProd ? { + splitChunks: { + cacheGroups: { + vendor: { + test: /[\\/]node_modules[\\/]/, + chunks: 'all', + name: 'vendor', + priority: 10, + enforce: true, + minChunks: 1, + maxSize: 3000000, + }, + }, + }, minimizer: [ new TerserJSPlugin({ cache: true, diff --git a/km-console/packages/layout-clusters-fe/src/app.tsx b/km-console/packages/layout-clusters-fe/src/app.tsx index 0c5545ab..a2dd0a30 100755 --- a/km-console/packages/layout-clusters-fe/src/app.tsx +++ b/km-console/packages/layout-clusters-fe/src/app.tsx @@ -4,8 +4,8 @@ import React, { useState, useEffect, useLayoutEffect } from 'react'; import { BrowserRouter, Switch, Route, useLocation, useHistory } from 'react-router-dom'; import { get as lodashGet } from 'lodash'; import { DProLayout, AppContainer, IconFont, Menu, Utils, Page403, Page404, Page500, Modal } from 'knowdesign'; -import dantdZhCN from 'knowdesign/lib/locale/zh_CN'; -import dantdEnUS from 'knowdesign/lib/locale/en_US'; +import dantdZhCN from 'knowdesign/es/locale/zh_CN'; +import dantdEnUS from 'knowdesign/es/locale/en_US'; import { DotChartOutlined } from '@ant-design/icons'; import { licenseEventBus } from './constants/axiosConfig'; import intlZhCN from './locales/zh'; diff --git a/km-console/packages/layout-clusters-fe/src/components/TagsWithHide/index.tsx b/km-console/packages/layout-clusters-fe/src/components/TagsWithHide/index.tsx index 685f4d30..29454309 100644 --- a/km-console/packages/layout-clusters-fe/src/components/TagsWithHide/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/components/TagsWithHide/index.tsx @@ -1,6 +1,6 @@ import { DownOutlined } from '@ant-design/icons'; import { Popover } from 'knowdesign'; -import { TooltipPlacement } from 'knowdesign/lib/basic/tooltip'; +import { TooltipPlacement } from 'knowdesign/es/basic/tooltip'; import React, { useState, useRef, useEffect } from 'react'; import './index.less'; @@ -93,8 +93,9 @@ export default (props: PropsType) => { return (
= curState.endI ? 'hide' : 'show') : '' - }`} + className={`container-item ${ + curState.calculated ? (curState.isHideExpandNode ? 'show' : i >= curState.endI ? 'hide' : 'show') : '' + }`} > {item}
diff --git a/km-console/packages/layout-clusters-fe/src/pages/BrokerControllerChangeLog/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/BrokerControllerChangeLog/index.tsx index bd2443cd..7782bc15 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/BrokerControllerChangeLog/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/BrokerControllerChangeLog/index.tsx @@ -5,7 +5,7 @@ import API from '../../api'; import { getControllerChangeLogListColumns, defaultPagination } from './config'; import BrokerDetail from '../BrokerDetail'; import BrokerHealthCheck from '@src/components/CardBar/BrokerHealthCheck'; -import DBreadcrumb from 'knowdesign/lib/extend/d-breadcrumb'; +import DBreadcrumb from 'knowdesign/es/extend/d-breadcrumb'; import './index.less'; const { request } = Utils; diff --git a/km-console/packages/layout-clusters-fe/src/pages/BrokerDashboard/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/BrokerDashboard/index.tsx index 528ced31..a3398dec 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/BrokerDashboard/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/BrokerDashboard/index.tsx @@ -2,7 +2,7 @@ import React from 'react'; import { MetricType } from '@src/api'; import BrokerHealthCheck from '@src/components/CardBar/BrokerHealthCheck'; import DashboardDragChart from '@src/components/DashboardDragChart'; -import DBreadcrumb from 'knowdesign/lib/extend/d-breadcrumb'; +import DBreadcrumb from 'knowdesign/es/extend/d-breadcrumb'; import { AppContainer } from 'knowdesign'; const BrokerDashboard = (): JSX.Element => { diff --git a/km-console/packages/layout-clusters-fe/src/pages/BrokerList/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/BrokerList/index.tsx index 68230f60..8be77597 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/BrokerList/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/BrokerList/index.tsx @@ -7,7 +7,7 @@ import { dealTableRequestParams } from '../../constants/common'; import BrokerDetail from '../BrokerDetail'; import CardBar from '@src/components/CardBar'; import BrokerHealthCheck from '@src/components/CardBar/BrokerHealthCheck'; -import DBreadcrumb from 'knowdesign/lib/extend/d-breadcrumb'; +import DBreadcrumb from 'knowdesign/es/extend/d-breadcrumb'; import './index.less'; const { request } = Utils; diff --git a/km-console/packages/layout-clusters-fe/src/pages/Consumers/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/Consumers/index.tsx index 7f2d2325..46a77761 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/Consumers/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/Consumers/index.tsx @@ -7,7 +7,7 @@ import { getOperatingStateListParams } from './interface'; import { useParams } from 'react-router-dom'; import ConsumerGroupDetail from './ConsumerGroupDetail'; import ConsumerGroupHealthCheck from '@src/components/CardBar/ConsumerGroupHealthCheck'; -import DBreadcrumb from 'knowdesign/lib/extend/d-breadcrumb'; +import DBreadcrumb from 'knowdesign/es/extend/d-breadcrumb'; import { hashDataParse } from '@src/constants/common'; const { Option } = Select; diff --git a/km-console/packages/layout-clusters-fe/src/pages/Jobs/config.tsx b/km-console/packages/layout-clusters-fe/src/pages/Jobs/config.tsx index a9af8a89..3474cb2e 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/Jobs/config.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/Jobs/config.tsx @@ -19,9 +19,9 @@ export const jobType = [ }, process.env.BUSSINESS_VERSION ? { - label: '集群均衡', - value: 2, - } + label: '集群均衡', + value: 2, + } : undefined, ].filter((t) => t); @@ -75,6 +75,7 @@ export const getJobsListColumns = (arg?: any) => { title: '任务ID', dataIndex: 'id', key: 'id', + width: 70, }, { title: '任务类型', diff --git a/km-console/packages/layout-clusters-fe/src/pages/Jobs/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/Jobs/index.tsx index 1704c7cd..29448b1d 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/Jobs/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/Jobs/index.tsx @@ -4,7 +4,7 @@ import { ProTable, Drawer, Utils, AppContainer, Form, Select, Input, Button, mes import API from '../../api'; import { getJobsListColumns, defaultPagination, runningStatus, jobType } from './config'; import JobsCheck from '@src/components/CardBar/JobsCheck'; -import DBreadcrumb from 'knowdesign/lib/extend/d-breadcrumb'; +import DBreadcrumb from 'knowdesign/es/extend/d-breadcrumb'; import { ViewJobsProgress } from './ViewJobsProgress'; import './index.less'; import ReplicaChange from '@src/components/TopicJob/ReplicaChange'; diff --git a/km-console/packages/layout-clusters-fe/src/pages/LoadRebalance/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/LoadRebalance/index.tsx index 06cb7289..cc0a6c02 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/LoadRebalance/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/LoadRebalance/index.tsx @@ -2,7 +2,7 @@ import React, { useState, useEffect, useRef } from 'react'; import { Select, Form, Utils, AppContainer, Input, Button, ProTable, Badge, Tag, SearchInput } from 'knowdesign'; import BalanceDrawer from './BalanceDrawer'; import HistoryDrawer from './HistoryDrawer'; -import DBreadcrumb from 'knowdesign/lib/extend/d-breadcrumb'; +import DBreadcrumb from 'knowdesign/es/extend/d-breadcrumb'; import { getSizeAndUnit } from '../../constants/common'; import api from '../../api'; import './index.less'; diff --git a/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/AccessCluster.tsx b/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/AccessCluster.tsx index e106c313..e7446095 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/AccessCluster.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/AccessCluster.tsx @@ -1,5 +1,3 @@ -/* eslint-disable react/display-name */ - import { Button, Divider, Drawer, Form, Input, InputNumber, message, Radio, Select, Spin, Space, Utils } from 'knowdesign'; import * as React from 'react'; import { useIntl } from 'react-intl'; @@ -16,10 +14,9 @@ const clientPropertiesPlaceholder = `用于创建Kafka客户端进行信息获 { "security.protocol": "SASL_PLAINTEXT", "sasl.mechanism": "SCRAM-SHA-256", - "sasl.jaas.config": - "org.apache.kafka.common.security.scram. - ScramLoginModule required username="xxxxxx" - password="xxxxxx";" + "sasl.jaas.config": "org.apache.kafka.common.security. +scram.ScramLoginModule required username=\\"xxxxxx\\" pass +word=\\"xxxxxx\\";" } `; diff --git a/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/HomePage.tsx b/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/HomePage.tsx index f2cc3c8c..4839f54c 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/HomePage.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/HomePage.tsx @@ -93,7 +93,10 @@ const MultiClusterPage = () => { setVersionLoading(true); Utils.request(API.getClustersVersion) .then((versions: string[]) => { - setExistKafkaVersion(versions || []); + if (!Array.isArray(versions)) { + versions = []; + } + setExistKafkaVersion(versions.sort().reverse() || []); setVersionLoading(false); setCheckedKafkaVersions(versions || []); }) @@ -299,7 +302,9 @@ const MultiClusterPage = () => {
- {renderList} +
+ {renderList} +
); }; diff --git a/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/List.tsx b/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/List.tsx index 6835cd2e..d5e7edfe 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/List.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/List.tsx @@ -147,9 +147,9 @@ const ListScroll = (props: { loadMoreData: any; list: any; pagination: any; getP }} > {[ - ['BytesIn', loadReBalanceEnable && loadReBalanceNwIn], - ['BytesOut', loadReBalanceEnable && loadReBalanceNwOut], - ['Disk', loadReBalanceEnable && loadReBalanceDisk], + ['BytesIn', loadReBalanceNwIn === 1], + ['BytesOut', loadReBalanceNwOut === 1], + ['Disk', loadReBalanceDisk === 1], ].map(([name, isBalanced]) => { return isBalanced ? (
{name} 已均衡
diff --git a/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/config.ts b/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/config.ts index d5b8bf31..ccc879c7 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/config.ts +++ b/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/config.ts @@ -1,4 +1,4 @@ -import { FormItemType, IFormItem } from 'knowdesign/lib/extend/x-form'; +import { FormItemType, IFormItem } from 'knowdesign/es/extend/x-form'; export const bootstrapServersErrCodes = [10, 11, 12]; export const zkErrCodes = [20, 21]; diff --git a/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/index.less b/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/index.less index 76ff6b41..1d79b7db 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/index.less +++ b/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/index.less @@ -329,6 +329,11 @@ } } } + &-dashboard { + & > .dcloud-spin-nested-loading > .dcloud-spin-container::after { + background: transparent; + } + } .multi-cluster-list { box-sizing: content-box; diff --git a/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/DetailChart/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/DetailChart/index.tsx index 798b69b1..36077890 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/DetailChart/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/DetailChart/index.tsx @@ -201,6 +201,8 @@ const DetailChart = (props: { children: JSX.Element }): JSX.Element => { if (Number.isNaN(parsedValue)) { parsedValue = values.MessagesIn; } else { + // 为避免出现过小的数字影响图表展示效果,图表值统一只保留到小数点后三位 + parsedValue = parseFloat(parsedValue.toFixed(3)); if (maxValue < parsedValue) maxValue = parsedValue; } const valuesWithUnit = Object.entries(values).map(([key, value]) => { @@ -287,8 +289,8 @@ const DetailChart = (props: { children: JSX.Element }): JSX.Element => { checkboxProps: (record: MetricInfo) => { return record.name === DEFAULT_METRIC ? { - disabled: true, - } + disabled: true, + } : {}; }, submitCallback: indicatorChangeCallback, diff --git a/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/LeftSider.tsx b/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/LeftSider.tsx index 7bd3e948..dc104a1a 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/LeftSider.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/LeftSider.tsx @@ -165,9 +165,9 @@ const LeftSider = () => {
{clusterInfo?.kafkaVersion ?? '-'}
{clusterMetrics?.LoadReBalanceEnable !== undefined && [ - ['BytesIn', clusterMetrics?.LoadReBalanceEnable && clusterMetrics?.LoadReBalanceNwIn], - ['BytesOut', clusterMetrics?.LoadReBalanceEnable && clusterMetrics?.LoadReBalanceNwOut], - ['Disk', clusterMetrics?.LoadReBalanceEnable && clusterMetrics?.LoadReBalanceDisk], + ['BytesIn', clusterMetrics?.LoadReBalanceNwIn === 1], + ['BytesOut', clusterMetrics?.LoadReBalanceNwOut === 1], + ['Disk', clusterMetrics?.LoadReBalanceDisk === 1], ].map(([name, isBalanced]) => { return isBalanced ? (
{name} 已均衡
diff --git a/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/index.tsx index 481bf0b5..e4b4b2c6 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/index.tsx @@ -1,4 +1,4 @@ -import DBreadcrumb from 'knowdesign/lib/extend/d-breadcrumb'; +import DBreadcrumb from 'knowdesign/es/extend/d-breadcrumb'; import React from 'react'; import TourGuide, { ClusterDetailSteps } from '@src/components/TourGuide'; import './index.less'; diff --git a/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/Consume.tsx b/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/Consume.tsx index 274f7de4..5ec957a6 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/Consume.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/Consume.tsx @@ -192,8 +192,10 @@ const ConsumeClientTest = () => { // 过滤出消费数量不足设定值的partition const filtersPartition = _partitionList.filter((item: any) => item.recordCount < untilMsgNum); curPartitionList.current = filtersPartition; // 用作下一次请求的入参 - setIsStop(filtersPartition.length < 1); - isStopStatus.current = filtersPartition.length < 1; + if (!isStop) { + setIsStop(filtersPartition.length < 1); + isStopStatus.current = filtersPartition.length < 1; + } break; case 'max size': setIsStop(+recordSizeCur.current >= unitMsgSize); @@ -202,8 +204,10 @@ const ConsumeClientTest = () => { case 'max size per partition': // 过滤出消费size不足设定值的partition const filters = partitionConsumedList.filter((item: any) => item.recordSizeUnitB < unitMsgSize); - setIsStop(filters.length < 1); - isStopStatus.current = filters.length < 1; + if (!isStop) { + setIsStop(filters.length < 1); + isStopStatus.current = filters.length < 1; + } curPartitionList.current = filters; break; } diff --git a/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/component/ConfigForm.tsx b/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/component/ConfigForm.tsx index 4aa38014..a4f8f838 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/component/ConfigForm.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/component/ConfigForm.tsx @@ -1,5 +1,5 @@ import { Button, XForm } from 'knowdesign'; -import { IFormItem } from 'knowdesign/lib/extend/x-form'; +import { IFormItem } from 'knowdesign/es/extend/x-form'; import * as React from 'react'; import './style/form.less'; import { useIntl } from 'react-intl'; diff --git a/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/config.tsx b/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/config.tsx index 097f9f71..d27f8d4e 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/config.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/config.tsx @@ -1,4 +1,4 @@ -import { FormItemType, IFormItem } from 'knowdesign/lib/extend/x-form'; +import { FormItemType, IFormItem } from 'knowdesign/es/extend/x-form'; import moment from 'moment'; import React from 'react'; import { timeFormat } from '../../constants/common'; diff --git a/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/index.tsx index 59709947..e3e3f6b9 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/index.tsx @@ -1,5 +1,5 @@ import { AppContainer } from 'knowdesign'; -import DBreadcrumb from 'knowdesign/lib/extend/d-breadcrumb'; +import DBreadcrumb from 'knowdesign/es/extend/d-breadcrumb'; import * as React from 'react'; import { useParams } from 'react-router-dom'; import TaskTabs from './component/TaskTabs'; diff --git a/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/component/ConfigFrom.tsx b/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/component/ConfigFrom.tsx index 0a7b14d5..dd9d9fda 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/component/ConfigFrom.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/component/ConfigFrom.tsx @@ -1,5 +1,5 @@ import { Button, Col, Form, Row } from 'knowdesign'; -import { FormItemType, handleFormItem, IFormItem, renderFormItem } from 'knowdesign/lib/extend/x-form'; +import { FormItemType, handleFormItem, IFormItem, renderFormItem } from 'knowdesign/es/extend/x-form'; import * as React from 'react'; import './style/form.less'; import EditTable from './EditTable'; diff --git a/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/config.tsx b/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/config.tsx index 6739a457..81503271 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/config.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/config.tsx @@ -1,6 +1,6 @@ import { QuestionCircleOutlined } from '@ant-design/icons'; import { IconFont, Switch, Tooltip } from 'knowdesign'; -import { FormItemType, IFormItem } from 'knowdesign/lib/extend/x-form'; +import { FormItemType, IFormItem } from 'knowdesign/es/extend/x-form'; import moment from 'moment'; import React from 'react'; import { timeFormat, getRandomStr } from '@src/constants/common'; diff --git a/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/index.tsx index ed7dbf82..5be8338d 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/index.tsx @@ -3,7 +3,7 @@ import * as React from 'react'; import ProduceClientTest from './Produce'; import './index.less'; import TaskTabs from '../TestingConsumer/component/TaskTabs'; -import DBreadcrumb from 'knowdesign/lib/extend/d-breadcrumb'; +import DBreadcrumb from 'knowdesign/es/extend/d-breadcrumb'; import { useParams } from 'react-router-dom'; const Produce = () => { diff --git a/km-console/packages/layout-clusters-fe/src/pages/TopicDashboard/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/TopicDashboard/index.tsx index 32b6a4aa..72797227 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TopicDashboard/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TopicDashboard/index.tsx @@ -3,7 +3,7 @@ import { MetricType } from '@src/api'; import TopicHealthCheck from '@src/components/CardBar/TopicHealthCheck'; import DashboardDragChart from '@src/components/DashboardDragChart'; import { AppContainer } from 'knowdesign'; -import DBreadcrumb from 'knowdesign/lib/extend/d-breadcrumb'; +import DBreadcrumb from 'knowdesign/es/extend/d-breadcrumb'; const TopicDashboard = () => { const [global] = AppContainer.useGlobalValue(); diff --git a/km-console/packages/layout-clusters-fe/src/pages/TopicList/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/TopicList/index.tsx index 787eaf12..02db801e 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TopicList/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TopicList/index.tsx @@ -10,7 +10,7 @@ import TopicHealthCheck from '@src/components/CardBar/TopicHealthCheck'; import TopicDetail from '../TopicDetail'; import Delete from './Delete'; import { ClustersPermissionMap } from '../CommonConfig'; -import DBreadcrumb from 'knowdesign/lib/extend/d-breadcrumb'; +import DBreadcrumb from 'knowdesign/es/extend/d-breadcrumb'; import ReplicaChange from '@src/components/TopicJob/ReplicaChange'; import SmallChart from '@src/components/SmallChart'; import ReplicaMove from '@src/components/TopicJob/ReplicaMove'; diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/cache/CollectedMetricsLocalCache.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/cache/CollectedMetricsLocalCache.java index c0469fb6..bc5b1c34 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/cache/CollectedMetricsLocalCache.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/cache/CollectedMetricsLocalCache.java @@ -10,13 +10,13 @@ import java.util.concurrent.TimeUnit; public class CollectedMetricsLocalCache { private static final Cache brokerMetricsCache = Caffeine.newBuilder() - .expireAfterWrite(60, TimeUnit.SECONDS) - .maximumSize(2000) + .expireAfterWrite(90, TimeUnit.SECONDS) + .maximumSize(10000) .build(); private static final Cache> topicMetricsCache = Caffeine.newBuilder() .expireAfterWrite(90, TimeUnit.SECONDS) - .maximumSize(5000) + .maximumSize(10000) .build(); private static final Cache> partitionMetricsCache = Caffeine.newBuilder() @@ -29,63 +29,64 @@ public class CollectedMetricsLocalCache { .maximumSize(20000) .build(); - public static Float getBrokerMetrics(Long clusterPhyId, Integer brokerId, String metricName) { - return brokerMetricsCache.getIfPresent(CollectedMetricsLocalCache.genBrokerMetricKey(clusterPhyId, brokerId, metricName)); + public static Float getBrokerMetrics(String brokerMetricKey) { + return brokerMetricsCache.getIfPresent(brokerMetricKey); } - public static void putBrokerMetrics(Long clusterPhyId, Integer brokerId, String metricName, Float value) { + public static void putBrokerMetrics(String brokerMetricKey, Float value) { if (value == null) { return; } - brokerMetricsCache.put(CollectedMetricsLocalCache.genBrokerMetricKey(clusterPhyId, brokerId, metricName), value); + + brokerMetricsCache.put(brokerMetricKey, value); } - public static List getTopicMetrics(Long clusterPhyId, String topicName, String metricName) { - return topicMetricsCache.getIfPresent(CollectedMetricsLocalCache.genClusterTopicMetricKey(clusterPhyId, topicName, metricName)); + public static List getTopicMetrics(String topicMetricKey) { + return topicMetricsCache.getIfPresent(topicMetricKey); } - public static void putTopicMetrics(Long clusterPhyId, String topicName, String metricName, List metricsList) { + public static void putTopicMetrics(String topicMetricKey, List metricsList) { if (metricsList == null) { return; } - topicMetricsCache.put(CollectedMetricsLocalCache.genClusterTopicMetricKey(clusterPhyId, topicName, metricName), metricsList); + + topicMetricsCache.put(topicMetricKey, metricsList); } - public static List getPartitionMetricsList(Long clusterPhyId, String topicName, String metricName) { - return partitionMetricsCache.getIfPresent(CollectedMetricsLocalCache.genClusterTopicMetricKey(clusterPhyId, topicName, metricName)); + public static List getPartitionMetricsList(String partitionMetricKey) { + return partitionMetricsCache.getIfPresent(partitionMetricKey); } - public static void putPartitionMetricsList(Long clusterPhyId, String topicName, String metricName, List metricsList) { + public static void putPartitionMetricsList(String partitionMetricsKey, List metricsList) { if (metricsList == null) { return; } - partitionMetricsCache.put(CollectedMetricsLocalCache.genClusterTopicMetricKey(clusterPhyId, topicName, metricName), metricsList); + partitionMetricsCache.put(partitionMetricsKey, metricsList); } - public static Float getReplicaMetrics(Long clusterPhyId, Integer brokerId, String topicName, Integer partitionId, String metricName) { - return replicaMetricsValueCache.getIfPresent(CollectedMetricsLocalCache.genReplicaMetricCacheKey(clusterPhyId, brokerId, topicName, partitionId, metricName)); + public static Float getReplicaMetrics(String replicaMetricsKey) { + return replicaMetricsValueCache.getIfPresent(replicaMetricsKey); } - public static void putReplicaMetrics(Long clusterPhyId, Integer brokerId, String topicName, Integer partitionId, String metricName, Float value) { + public static void putReplicaMetrics(String replicaMetricsKey, Float value) { if (value == null) { return; } - replicaMetricsValueCache.put(CollectedMetricsLocalCache.genReplicaMetricCacheKey(clusterPhyId, brokerId, topicName, partitionId, metricName), value); + replicaMetricsValueCache.put(replicaMetricsKey, value); } - - /**************************************************** private method ****************************************************/ - - - private static String genBrokerMetricKey(Long clusterPhyId, Integer brokerId, String metricName) { + public static String genBrokerMetricKey(Long clusterPhyId, Integer brokerId, String metricName) { return clusterPhyId + "@" + brokerId + "@" + metricName; } - private static String genClusterTopicMetricKey(Long clusterPhyId, String topicName, String metricName) { + public static String genClusterTopicMetricKey(Long clusterPhyId, String topicName, String metricName) { return clusterPhyId + "@" + topicName + "@" + metricName; } - private static String genReplicaMetricCacheKey(Long clusterPhyId, Integer brokerId, String topicName, Integer partitionId, String metricName) { + public static String genReplicaMetricCacheKey(Long clusterPhyId, Integer brokerId, String topicName, Integer partitionId, String metricName) { return clusterPhyId + "@" + brokerId + "@" + topicName + "@" + partitionId + "@" + metricName; } + + /**************************************************** private method ****************************************************/ + } diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/impl/KafkaAclServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/impl/KafkaAclServiceImpl.java index 5c52af61..836c7d56 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/impl/KafkaAclServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/impl/KafkaAclServiceImpl.java @@ -3,6 +3,7 @@ package com.xiaojukeji.know.streaming.km.core.service.acl.impl; import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; import com.didiglobal.logi.log.ILog; import com.didiglobal.logi.log.LogFactory; +import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy; import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam; import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; @@ -10,10 +11,12 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus; import com.xiaojukeji.know.streaming.km.common.bean.po.KafkaAclPO; import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant; import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant; +import com.xiaojukeji.know.streaming.km.common.enums.cluster.ClusterAuthTypeEnum; import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum; import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException; import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; import com.xiaojukeji.know.streaming.km.core.service.acl.KafkaAclService; +import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService; import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService; import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache; import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient; @@ -58,6 +61,9 @@ public class KafkaAclServiceImpl extends BaseVersionControlService implements Ka @Autowired private KafkaAdminZKClient kafkaAdminZKClient; + @Autowired + private ClusterPhyService clusterPhyService; + @Override protected VersionItemTypeEnum getVersionItemType() { return VersionItemTypeEnum.SERVICE_OP_ACL; @@ -175,6 +181,18 @@ public class KafkaAclServiceImpl extends BaseVersionControlService implements Ka private Result> getAclByKafkaClient(VersionItemParam itemParam) { ClusterPhyParam param = (ClusterPhyParam) itemParam; try { + // 获取集群 + ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(param.getClusterPhyId()); + if (clusterPhy == null) { + return Result.buildFromRSAndMsg(ResultStatus.CLUSTER_NOT_EXIST, MsgConstant.getClusterPhyNotExist(param.getClusterPhyId())); + } + + // 判断是否开启认证 + if (!ClusterAuthTypeEnum.enableAuth(clusterPhy.getAuthType())) { + log.warn("method=getAclByKafkaClient||clusterPhyId={}||msg=not open auth and ignore get acls", clusterPhy.getId()); + return Result.buildSuc(new ArrayList<>()); + } + AdminClient adminClient = kafkaAdminClient.getClient(param.getClusterPhyId()); DescribeAclsResult describeAclsResult = diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/BrokerService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/BrokerService.java index d1f181b4..62f03e65 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/BrokerService.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/BrokerService.java @@ -44,6 +44,7 @@ public interface BrokerService { * 获取具体Broker */ Broker getBroker(Long clusterPhyId, Integer brokerId); + Broker getBrokerFromCacheFirst(Long clusterPhyId, Integer brokerId); /** * 获取BrokerLog-Dir信息 diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/impl/BrokerMetricServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/impl/BrokerMetricServiceImpl.java index d47aa2ea..93c343ff 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/impl/BrokerMetricServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/impl/BrokerMetricServiceImpl.java @@ -110,9 +110,10 @@ public class BrokerMetricServiceImpl extends BaseMetricService implements Broker } @Override - public Result collectBrokerMetricsFromKafkaWithCacheFirst(Long clusterId, Integer brokerId, String metric){ + public Result collectBrokerMetricsFromKafkaWithCacheFirst(Long clusterId, Integer brokerId, String metric) { + String brokerMetricKey = CollectedMetricsLocalCache.genBrokerMetricKey(clusterId, brokerId, metric); - Float keyValue = CollectedMetricsLocalCache.getBrokerMetrics(clusterId, brokerId, metric); + Float keyValue = CollectedMetricsLocalCache.getBrokerMetrics(brokerMetricKey); if(null != keyValue) { BrokerMetrics brokerMetrics = new BrokerMetrics(clusterId, brokerId); brokerMetrics.putMetric(metric, keyValue); @@ -124,7 +125,7 @@ public class BrokerMetricServiceImpl extends BaseMetricService implements Broker Map metricsMap = ret.getData().getMetrics(); for(Map.Entry metricNameAndValueEntry : metricsMap.entrySet()){ - CollectedMetricsLocalCache.putBrokerMetrics(clusterId, brokerId, metricNameAndValueEntry.getKey(), metricNameAndValueEntry.getValue()); + CollectedMetricsLocalCache.putBrokerMetrics(brokerMetricKey, metricNameAndValueEntry.getValue()); } return ret; @@ -178,11 +179,16 @@ public class BrokerMetricServiceImpl extends BaseMetricService implements Broker @Override public Result> getMetricPointsFromES(Long clusterPhyId, Integer brokerId, MetricDTO dto) { - Map metricPointMap = brokerMetricESDAO.getBrokerMetricsPoint(clusterPhyId, brokerId, - dto.getMetricsNames(), dto.getAggType(), dto.getStartTime(), dto.getEndTime()); + Map metricPointMap = brokerMetricESDAO.getBrokerMetricsPoint( + clusterPhyId, + brokerId, + dto.getMetricsNames(), + dto.getAggType(), + dto.getStartTime(), + dto.getEndTime() + ); - List metricPoints = new ArrayList<>(metricPointMap.values()); - return Result.buildSuc(metricPoints); + return Result.buildSuc(new ArrayList<>(metricPointMap.values())); } @Override @@ -199,8 +205,10 @@ public class BrokerMetricServiceImpl extends BaseMetricService implements Broker brokerMetrics.add(ConvertUtil.obj2Obj(brokerMetricPO, BrokerMetrics.class)); } catch (Exception e) { - LOGGER.error("method=getLatestMetricsFromES||clusterPhyId={}||brokerId={}||errMsg=exception", - clusterPhyId, brokerId, e); + LOGGER.error( + "method=getLatestMetricsFromES||clusterPhyId={}||brokerId={}||errMsg=exception", + clusterPhyId, brokerId, e + ); } } @@ -219,6 +227,7 @@ public class BrokerMetricServiceImpl extends BaseMetricService implements Broker } /**************************************************** private method ****************************************************/ + private List listTopNBrokerIds(Long clusterId, Integer topN){ List brokers = brokerService.listAliveBrokersFromDB(clusterId); if(CollectionUtils.isEmpty(brokers)){return new ArrayList<>();} diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/impl/BrokerServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/impl/BrokerServiceImpl.java index 3ab9f3fa..dc702388 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/impl/BrokerServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/impl/BrokerServiceImpl.java @@ -130,6 +130,9 @@ public class BrokerServiceImpl extends BaseVersionControlService implements Brok // 如果当前Broker还存活,则更新DB信息 BrokerPO newBrokerPO = ConvertUtil.obj2Obj(presentAliveBroker, BrokerPO.class); + if (presentAliveBroker.getEndpointMap() != null) { + newBrokerPO.setEndpointMap(ConvertUtil.obj2Json(presentAliveBroker.getEndpointMap())); + } newBrokerPO.setId(inDBBrokerPO.getId()); newBrokerPO.setStatus(Constant.ALIVE); newBrokerPO.setCreateTime(inDBBrokerPO.getCreateTime()); @@ -203,7 +206,23 @@ public class BrokerServiceImpl extends BaseVersionControlService implements Brok lambdaQueryWrapper.eq(BrokerPO::getClusterPhyId, clusterPhyId); lambdaQueryWrapper.eq(BrokerPO::getBrokerId, brokerId); - return ConvertUtil.obj2Obj(brokerDAO.selectOne(lambdaQueryWrapper), Broker.class); + return Broker.buildFrom(brokerDAO.selectOne(lambdaQueryWrapper)); + } + + @Override + public Broker getBrokerFromCacheFirst(Long clusterPhyId, Integer brokerId) { + List brokerList = this.listAliveBrokersFromCacheFirst(clusterPhyId); + if (brokerList == null) { + return null; + } + + for (Broker broker: brokerList) { + if (brokerId.equals(broker.getBrokerId())) { + return broker; + } + } + + return null; } @Override @@ -256,9 +275,8 @@ public class BrokerServiceImpl extends BaseVersionControlService implements Brok /**************************************************** private method ****************************************************/ private List listAllBrokersAndUpdateCache(Long clusterPhyId) { - List allBrokerList = ConvertUtil.list2List(this.getAllBrokerPOsFromDB(clusterPhyId), Broker.class); + List allBrokerList = getAllBrokerPOsFromDB(clusterPhyId).stream().map(elem -> Broker.buildFrom(elem)).collect(Collectors.toList()); brokersCache.put(clusterPhyId, allBrokerList); - return allBrokerList; } diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/change/record/impl/KafkaChangeRecordServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/change/record/impl/KafkaChangeRecordServiceImpl.java index 0c4eefe7..40265a8f 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/change/record/impl/KafkaChangeRecordServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/change/record/impl/KafkaChangeRecordServiceImpl.java @@ -5,14 +5,19 @@ import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.didiglobal.logi.log.ILog; import com.didiglobal.logi.log.LogFactory; +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO; import com.xiaojukeji.know.streaming.km.common.bean.po.changerecord.KafkaChangeRecordPO; import com.xiaojukeji.know.streaming.km.core.service.change.record.KafkaChangeRecordService; import com.xiaojukeji.know.streaming.km.persistence.mysql.changerecord.KafkaChangeRecordDAO; import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.dao.DuplicateKeyException; import org.springframework.stereotype.Service; +import java.util.List; +import java.util.concurrent.TimeUnit; + + @Service public class KafkaChangeRecordServiceImpl implements KafkaChangeRecordService { private static final ILog log = LogFactory.getLog(KafkaChangeRecordServiceImpl.class); @@ -20,11 +25,24 @@ public class KafkaChangeRecordServiceImpl implements KafkaChangeRecordService { @Autowired private KafkaChangeRecordDAO kafkaChangeRecordDAO; + private static final Cache recordCache = Caffeine.newBuilder() + .expireAfterWrite(12, TimeUnit.HOURS) + .maximumSize(1000) + .build(); + @Override public int insertAndIgnoreDuplicate(KafkaChangeRecordPO recordPO) { try { + String cacheData = recordCache.getIfPresent(recordPO.getUniqueField()); + if (cacheData != null || this.checkExistInDB(recordPO.getUniqueField())) { + // 已存在时,则直接返回 + return 0; + } + + recordCache.put(recordPO.getUniqueField(), recordPO.getUniqueField()); + return kafkaChangeRecordDAO.insert(recordPO); - } catch (DuplicateKeyException dke) { + } catch (Exception e) { return 0; } } @@ -40,4 +58,12 @@ public class KafkaChangeRecordServiceImpl implements KafkaChangeRecordService { /**************************************************** private method ****************************************************/ + private boolean checkExistInDB(String uniqueField) { + LambdaQueryWrapper lambdaQueryWrapper = new LambdaQueryWrapper<>(); + lambdaQueryWrapper.eq(KafkaChangeRecordPO::getUniqueField, uniqueField); + + List poList = kafkaChangeRecordDAO.selectList(lambdaQueryWrapper); + + return poList != null && !poList.isEmpty(); + } } diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/ClusterPhyService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/ClusterPhyService.java index b55594b1..56b6640b 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/ClusterPhyService.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/ClusterPhyService.java @@ -73,5 +73,5 @@ public interface ClusterPhyService { * 获取系统已存在的kafka版本列表 * @return */ - Set getClusterVersionSet(); + List getClusterVersionList(); } diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterMetricServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterMetricServiceImpl.java index fee8fb0e..075c53c2 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterMetricServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterMetricServiceImpl.java @@ -126,7 +126,7 @@ public class ClusterMetricServiceImpl extends BaseMetricService implements Clust private TopicMetricService topicMetricService; @Autowired - private TopicService topicService; + private TopicService topicService; @Autowired private PartitionService partitionService; @@ -728,13 +728,10 @@ public class ClusterMetricServiceImpl extends BaseMetricService implements Clust Long clusterId = param.getClusterId(); //1、获取jmx的属性信息 - VersionJmxInfo jmxInfo = getJMXInfo(clusterId, metric); - if(null == jmxInfo){return Result.buildFailure(VC_ITEM_JMX_NOT_EXIST);} - List brokers = brokerService.listAliveBrokersFromDB(clusterId); float metricVale = 0f; - for(Broker broker : brokers){ + for(Broker broker : brokers) { Result ret = brokerMetricService.collectBrokerMetricsFromKafkaWithCacheFirst(clusterId, broker.getBrokerId(), metric); if(null == ret || ret.failed() || null == ret.getData()){continue;} diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterPhyServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterPhyServiceImpl.java index 562645c0..2ba13738 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterPhyServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterPhyServiceImpl.java @@ -24,8 +24,9 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.dao.DuplicateKeyException; import org.springframework.stereotype.Service; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; -import java.util.Set; import java.util.stream.Collectors; /** @@ -111,7 +112,7 @@ public class ClusterPhyServiceImpl implements ClusterPhyService { throw new DuplicateException(String.format("clusterName:%s duplicated", clusterPhyPO.getName())); } catch (Exception e) { - log.error("cmethod=addClusterPhy||clusterPhyId={}||operator={}||msg=add cluster failed||errMsg=exception!", clusterPhyPO.getId(), operator, e); + log.error("method=addClusterPhy||clusterPhyId={}||operator={}||msg=add cluster failed||errMsg=exception!", clusterPhyPO.getId(), operator, e); throw new AdminOperateException("add cluster failed", e, ResultStatus.MYSQL_OPERATE_FAILED); } @@ -205,9 +206,12 @@ public class ClusterPhyServiceImpl implements ClusterPhyService { } @Override - public Set getClusterVersionSet() { - List clusterPhyList = listAllClusters(); - Set versionSet = clusterPhyList.stream().map(elem -> elem.getKafkaVersion()).collect(Collectors.toSet()); - return versionSet; + public List getClusterVersionList() { + List clusterPhyList = this.listAllClusters(); + + List versionList = new ArrayList<>(clusterPhyList.stream().map(elem -> elem.getKafkaVersion()).collect(Collectors.toSet())); + Collections.sort(versionList); + + return versionList; } } diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupServiceImpl.java index 184ea1dd..b029330c 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupServiceImpl.java @@ -102,7 +102,10 @@ public class GroupServiceImpl extends BaseVersionControlService implements Group AdminClient adminClient = kafkaAdminClient.getClient(clusterPhyId); try { - DescribeConsumerGroupsResult describeConsumerGroupsResult = adminClient.describeConsumerGroups(Arrays.asList(groupName), new DescribeConsumerGroupsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS).includeAuthorizedOperations(true)); + DescribeConsumerGroupsResult describeConsumerGroupsResult = adminClient.describeConsumerGroups( + Arrays.asList(groupName), + new DescribeConsumerGroupsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS).includeAuthorizedOperations(false) + ); return describeConsumerGroupsResult.all().get().get(groupName); } catch(Exception e){ @@ -151,12 +154,12 @@ public class GroupServiceImpl extends BaseVersionControlService implements Group lambdaQueryWrapper.eq(GroupMemberPO::getClusterPhyId, clusterPhyId); lambdaQueryWrapper.eq(!ValidateUtils.isBlank(topicName), GroupMemberPO::getTopicName, topicName); lambdaQueryWrapper.eq(!ValidateUtils.isBlank(groupName), GroupMemberPO::getGroupName, groupName); - lambdaQueryWrapper.eq(GroupMemberPO::getClusterPhyId, clusterPhyId); lambdaQueryWrapper.like(!ValidateUtils.isBlank(searchTopicKeyword), GroupMemberPO::getTopicName, searchTopicKeyword); lambdaQueryWrapper.like(!ValidateUtils.isBlank(searchGroupKeyword), GroupMemberPO::getGroupName, searchGroupKeyword); + lambdaQueryWrapper.orderByDesc(GroupMemberPO::getClusterPhyId, GroupMemberPO::getTopicName); IPage iPage = new Page<>(); - iPage.setPages(dto.getPageNo()); + iPage.setCurrent(dto.getPageNo()); iPage.setSize(dto.getPageSize()); iPage = groupMemberDAO.selectPage(iPage, lambdaQueryWrapper); diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/kafkacontroller/impl/KafkaControllerServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/kafkacontroller/impl/KafkaControllerServiceImpl.java index 42311eef..1fb3f488 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/kafkacontroller/impl/KafkaControllerServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/kafkacontroller/impl/KafkaControllerServiceImpl.java @@ -56,7 +56,7 @@ public class KafkaControllerServiceImpl implements KafkaControllerService { @Override public int insertAndIgnoreDuplicateException(KafkaController kafkaController) { try { - Broker broker = brokerService.getBroker(kafkaController.getClusterPhyId(), kafkaController.getBrokerId()); + Broker broker = brokerService.getBrokerFromCacheFirst(kafkaController.getClusterPhyId(), kafkaController.getBrokerId()); KafkaControllerPO kafkaControllerPO = new KafkaControllerPO(); kafkaControllerPO.setClusterPhyId(kafkaController.getClusterPhyId()); @@ -136,34 +136,56 @@ public class KafkaControllerServiceImpl implements KafkaControllerService { /**************************************************** private method ****************************************************/ private Result getControllerFromAdminClient(ClusterPhy clusterPhy) { + AdminClient adminClient = null; try { - AdminClient adminClient = null; - try { - adminClient = kafkaAdminClient.getClient(clusterPhy.getId()); - } catch (Exception e) { - log.error("class=KafkaControllerServiceImpl||method=getControllerFromAdminClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e); - - // 集群已经加载进来,但是创建admin-client失败,则设置无controller - return Result.buildSuc(); - } - - DescribeClusterResult describeClusterResult = adminClient.describeCluster(new DescribeClusterOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS)); - - Node controllerNode = describeClusterResult.controller().get(); - if (controllerNode == null) { - return Result.buildSuc(); - } - - return Result.buildSuc(new KafkaController( - clusterPhy.getId(), - controllerNode.id(), - System.currentTimeMillis() - )); + adminClient = kafkaAdminClient.getClient(clusterPhy.getId()); } catch (Exception e) { log.error("class=KafkaControllerServiceImpl||method=getControllerFromAdminClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e); - return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage()); + // 集群已经加载进来,但是创建admin-client失败,则设置无controller + return Result.buildSuc(); } + + // 先从DB获取该集群controller + KafkaController dbKafkaController = null; + + for (int i = 1; i <= Constant.DEFAULT_RETRY_TIME; ++i) { + try { + if (i == 1) { + // 获取DB中的controller信息 + dbKafkaController = this.getKafkaControllerFromDB(clusterPhy.getId()); + } + + DescribeClusterResult describeClusterResult = adminClient.describeCluster( + new DescribeClusterOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS) + ); + + Node controllerNode = describeClusterResult.controller().get(); + if (controllerNode == null) { + return Result.buildSuc(); + } + + if (dbKafkaController != null && controllerNode.id() == dbKafkaController.getBrokerId()) { + // ID没有变化,直接返回原先的 + return Result.buildSuc(dbKafkaController); + } + + // 发生了变化 + return Result.buildSuc(new KafkaController( + clusterPhy.getId(), + controllerNode.id(), + System.currentTimeMillis() + )); + } catch (Exception e) { + log.error( + "class=KafkaControllerServiceImpl||method=getControllerFromAdminClient||clusterPhyId={}||tryTime={}||errMsg=exception", + clusterPhy.getId(), i, e + ); + } + } + + // 三次出错,则直接返回无controller + return Result.buildSuc(); } private Result getControllerFromZKClient(ClusterPhy clusterPhy) { diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/kafkauser/impl/KafkaUserServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/kafkauser/impl/KafkaUserServiceImpl.java index 7e5fa91f..e939f00d 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/kafkauser/impl/KafkaUserServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/kafkauser/impl/KafkaUserServiceImpl.java @@ -7,6 +7,7 @@ import com.didiglobal.logi.log.LogFactory; import com.didiglobal.logi.security.common.dto.oplog.OplogDTO; import com.didiglobal.logi.security.util.PWEncryptUtil; import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO; +import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy; import com.xiaojukeji.know.streaming.km.common.bean.entity.kafkauser.KafkaUser; import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam; import com.xiaojukeji.know.streaming.km.common.bean.entity.param.kafkauser.KafkaUserParam; @@ -17,11 +18,13 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus; import com.xiaojukeji.know.streaming.km.common.bean.po.KafkaUserPO; import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant; import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant; +import com.xiaojukeji.know.streaming.km.common.enums.cluster.ClusterAuthTypeEnum; import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.ModuleEnum; import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum; import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum; import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException; import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; +import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService; import com.xiaojukeji.know.streaming.km.core.service.kafkauser.KafkaUserService; import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService; import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService; @@ -32,7 +35,6 @@ import kafka.admin.ConfigCommand; import kafka.server.ConfigType; import kafka.zk.*; import org.apache.kafka.clients.admin.*; -import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.security.scram.ScramCredential; import org.apache.kafka.common.security.scram.internals.ScramCredentialUtils; import org.apache.kafka.common.security.scram.internals.ScramFormatter; @@ -71,6 +73,9 @@ public class KafkaUserServiceImpl extends BaseVersionControlService implements K @Autowired private OpLogWrapService opLogWrapService; + @Autowired + private ClusterPhyService clusterPhyService; + @Override protected VersionItemTypeEnum getVersionItemType() { return VersionItemTypeEnum.SERVICE_OP_KAFKA_USER; @@ -571,6 +576,18 @@ public class KafkaUserServiceImpl extends BaseVersionControlService implements K private Result> getKafkaUserByKafkaClient(VersionItemParam itemParam) { KafkaUserParam param = (KafkaUserParam) itemParam; try { + // 获取集群 + ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(param.getClusterPhyId()); + if (clusterPhy == null) { + return Result.buildFromRSAndMsg(ResultStatus.CLUSTER_NOT_EXIST, MsgConstant.getClusterPhyNotExist(param.getClusterPhyId())); + } + + // 判断认证模式,如果是非scram模式,直接返回 + if (!ClusterAuthTypeEnum.isScram(clusterPhy.getAuthType())) { + log.warn("method=getKafkaUserByKafkaClient||clusterPhyId={}||msg=not scram auth type and ignore get users", clusterPhy.getId()); + return Result.buildSuc(new ArrayList<>()); + } + AdminClient adminClient = kafkaAdminClient.getClient(param.getClusterPhyId()); // 查询集群kafka-user diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/OpPartitionService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/OpPartitionService.java new file mode 100644 index 00000000..6a3611f8 --- /dev/null +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/OpPartitionService.java @@ -0,0 +1,14 @@ +package com.xiaojukeji.know.streaming.km.core.service.partition; + +import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; +import org.apache.kafka.common.TopicPartition; + +import java.util.List; + +public interface OpPartitionService { + + /** + * 优先副本选举 + */ + Result preferredReplicaElection(Long clusterPhyId, List tpList); +} diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/OpPartitionServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/OpPartitionServiceImpl.java new file mode 100644 index 00000000..0f1186ef --- /dev/null +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/OpPartitionServiceImpl.java @@ -0,0 +1,119 @@ +package com.xiaojukeji.know.streaming.km.core.service.partition.impl; + +import com.didiglobal.logi.log.ILog; +import com.didiglobal.logi.log.LogFactory; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.partition.BatchPartitionParam; +import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; +import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus; +import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant; +import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum; +import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException; +import com.xiaojukeji.know.streaming.km.core.service.partition.OpPartitionService; +import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService; +import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient; +import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient; +import kafka.zk.KafkaZkClient; +import org.apache.kafka.clients.admin.AdminClient; +import org.apache.kafka.clients.admin.ElectLeadersOptions; +import org.apache.kafka.clients.admin.ElectLeadersResult; +import org.apache.kafka.common.ElectionType; +import org.apache.kafka.common.TopicPartition; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; +import scala.jdk.javaapi.CollectionConverters; + +import javax.annotation.PostConstruct; +import java.util.HashSet; +import java.util.List; + +import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus.VC_HANDLE_NOT_EXIST; +import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.*; +import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.SERVICE_OP_PARTITION_LEADER; + + +/** + * @author didi + */ +@Service +public class OpPartitionServiceImpl extends BaseVersionControlService implements OpPartitionService { + private static final ILog LOGGER = LogFactory.getLog(OpPartitionServiceImpl.class); + + @Autowired + private KafkaAdminClient kafkaAdminClient; + + @Autowired + private KafkaAdminZKClient kafkaAdminZKClient; + + public static final String PREFERRED_REPLICA_ELECTION = "PreferredReplicaElection"; + + @Override + protected VersionItemTypeEnum getVersionItemType() { + return SERVICE_OP_PARTITION_LEADER; + } + + @PostConstruct + private void init() { + registerVCHandler(PREFERRED_REPLICA_ELECTION, V_0_10_0_0, V_2_8_0, "preferredReplicaElectionByZKClient", this::preferredReplicaElectionByZKClient); + registerVCHandler(PREFERRED_REPLICA_ELECTION, V_2_8_0, V_MAX, "preferredReplicaElectionByKafkaClient", this::preferredReplicaElectionByKafkaClient); + } + + @Override + public Result preferredReplicaElection(Long clusterPhyId, List tpList) { + try { + return (Result) doVCHandler( + clusterPhyId, + PREFERRED_REPLICA_ELECTION, + new BatchPartitionParam(clusterPhyId, tpList) + ); + } catch (VCHandlerNotExistException e) { + return Result.buildFailure(VC_HANDLE_NOT_EXIST); + } + } + + /**************************************************** private method ****************************************************/ + + private Result preferredReplicaElectionByZKClient(VersionItemParam itemParam) { + BatchPartitionParam partitionParam = (BatchPartitionParam) itemParam; + + try { + KafkaZkClient kafkaZkClient = kafkaAdminZKClient.getClient(partitionParam.getClusterPhyId()); + + kafkaZkClient.createPreferredReplicaElection(CollectionConverters.asScala(partitionParam.getTpList()).toSet()); + + return Result.buildSuc(); + } catch (Exception e) { + LOGGER.error( + "class=OpPartitionServiceImpl||method=preferredReplicaElectionByZKClient||clusterPhyId={}||errMsg=exception", + partitionParam.getClusterPhyId(), e + ); + + return Result.buildFromRSAndMsg(ResultStatus.ZK_OPERATE_FAILED, e.getMessage()); + } + } + + private Result preferredReplicaElectionByKafkaClient(VersionItemParam itemParam) { + BatchPartitionParam partitionParam = (BatchPartitionParam) itemParam; + + try { + AdminClient adminClient = kafkaAdminClient.getClient(partitionParam.getClusterPhyId()); + + ElectLeadersResult electLeadersResult = adminClient.electLeaders( + ElectionType.PREFERRED, + new HashSet<>(partitionParam.getTpList()), + new ElectLeadersOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS) + ); + + electLeadersResult.all().get(); + + return Result.buildSuc(); + } catch (Exception e) { + LOGGER.error( + "class=OpPartitionServiceImpl||method=preferredReplicaElectionByKafkaClient||clusterPhyId={}||errMsg=exception", + partitionParam.getClusterPhyId(), e + ); + + return Result.buildFromRSAndMsg(ResultStatus.ZK_OPERATE_FAILED, e.getMessage()); + } + } +} diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/PartitionMetricServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/PartitionMetricServiceImpl.java index 9e354634..9104b398 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/PartitionMetricServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/PartitionMetricServiceImpl.java @@ -75,7 +75,9 @@ public class PartitionMetricServiceImpl extends BaseMetricService implements Par @Override public Result> collectPartitionsMetricsFromKafkaWithCache(Long clusterPhyId, String topicName, String metricName) { - List metricsList = CollectedMetricsLocalCache.getPartitionMetricsList(clusterPhyId, topicName, metricName); + String partitionMetricsKey = CollectedMetricsLocalCache.genClusterTopicMetricKey(clusterPhyId, topicName, metricName); + + List metricsList = CollectedMetricsLocalCache.getPartitionMetricsList(partitionMetricsKey); if(null != metricsList) { return Result.buildSuc(metricsList); } @@ -88,12 +90,7 @@ public class PartitionMetricServiceImpl extends BaseMetricService implements Par // 更新cache PartitionMetrics metrics = metricsResult.getData().get(0); metrics.getMetrics().entrySet().forEach( - metricEntry -> CollectedMetricsLocalCache.putPartitionMetricsList( - clusterPhyId, - metrics.getTopic(), - metricEntry.getKey(), - metricsResult.getData() - ) + metricEntry -> CollectedMetricsLocalCache.putPartitionMetricsList(partitionMetricsKey, metricsResult.getData()) ); return metricsResult; diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/PartitionServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/PartitionServiceImpl.java index 92b54c16..13eedb41 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/PartitionServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/PartitionServiceImpl.java @@ -207,7 +207,7 @@ public class PartitionServiceImpl extends BaseVersionControlService implements P .forEach(elem -> topicPartitionOffsets.put(new TopicPartition(topicName, elem.getPartitionId()), offsetSpec)); try { - return (Result>) doVCHandler(clusterPhyId, PARTITION_OFFSET_GET, new PartitionOffsetParam(clusterPhyId, topicPartitionOffsets, timestamp)); + return (Result>) doVCHandler(clusterPhyId, PARTITION_OFFSET_GET, new PartitionOffsetParam(clusterPhyId, topicName, topicPartitionOffsets, timestamp)); } catch (VCHandlerNotExistException e) { return Result.buildFailure(VC_HANDLE_NOT_EXIST); } @@ -226,7 +226,7 @@ public class PartitionServiceImpl extends BaseVersionControlService implements P .forEach(elem -> topicPartitionOffsets.put(new TopicPartition(topicName, elem.getPartitionId()), offsetSpec)); try { - return (Result>) doVCHandler(clusterPhyId, PARTITION_OFFSET_GET, new PartitionOffsetParam(clusterPhyId, topicPartitionOffsets, timestamp)); + return (Result>) doVCHandler(clusterPhyId, PARTITION_OFFSET_GET, new PartitionOffsetParam(clusterPhyId, topicName, topicPartitionOffsets, timestamp)); } catch (VCHandlerNotExistException e) { return Result.buildFailure(VC_HANDLE_NOT_EXIST); } @@ -300,7 +300,10 @@ public class PartitionServiceImpl extends BaseVersionControlService implements P } catch (NotExistException nee) { return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getClusterPhyNotExist(offsetParam.getClusterPhyId())); } catch (Exception e) { - log.error("method=getPartitionOffsetFromKafkaAdminClient||clusterPhyId={}||errMsg=exception!", offsetParam.getClusterPhyId(), e); + log.error( + "class=PartitionServiceImpl||method=getPartitionOffsetFromKafkaAdminClient||clusterPhyId={}||topicName={}||errMsg=exception!", + offsetParam.getClusterPhyId(), offsetParam.getTopicName(), e + ); return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage()); } @@ -355,7 +358,10 @@ public class PartitionServiceImpl extends BaseVersionControlService implements P } catch (NotExistException nee) { return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getClusterPhyNotExist(offsetParam.getClusterPhyId())); } catch (Exception e) { - log.error("method=getPartitionOffsetFromKafkaConsumerClient||clusterPhyId={}||errMsg=exception!", offsetParam.getClusterPhyId(), e); + log.error( + "class=PartitionServiceImpl||method=getPartitionOffsetFromKafkaConsumerClient||clusterPhyId={}||topicName={}||errMsg=exception!", + offsetParam.getClusterPhyId(), offsetParam.getTopicName(), e + ); return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage()); } finally { diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/replica/impl/ReplicaMetricServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/replica/impl/ReplicaMetricServiceImpl.java index 5240e8b9..460e6520 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/replica/impl/ReplicaMetricServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/replica/impl/ReplicaMetricServiceImpl.java @@ -77,9 +77,14 @@ public class ReplicaMetricServiceImpl extends BaseMetricService implements Repli } @Override - public Result collectReplicaMetricsFromKafkaWithCache(Long clusterPhyId, String topic, - Integer brokerId, Integer partitionId, String metric){ - Float keyValue = CollectedMetricsLocalCache.getReplicaMetrics(clusterPhyId, brokerId, topic, partitionId, metric); + public Result collectReplicaMetricsFromKafkaWithCache(Long clusterPhyId, + String topic, + Integer brokerId, + Integer partitionId, + String metric) { + String replicaMetricsKey = CollectedMetricsLocalCache.genReplicaMetricCacheKey(clusterPhyId, brokerId, topic, partitionId, metric); + + Float keyValue = CollectedMetricsLocalCache.getReplicaMetrics(replicaMetricsKey); if(null != keyValue){ ReplicationMetrics replicationMetrics = new ReplicationMetrics(clusterPhyId, topic, partitionId, brokerId); replicationMetrics.putMetric(metric, keyValue); @@ -92,11 +97,7 @@ public class ReplicaMetricServiceImpl extends BaseMetricService implements Repli // 更新cache ret.getData().getMetrics().entrySet().stream().forEach( metricNameAndValueEntry -> CollectedMetricsLocalCache.putReplicaMetrics( - clusterPhyId, - brokerId, - topic, - partitionId, - metricNameAndValueEntry.getKey(), + replicaMetricsKey, metricNameAndValueEntry.getValue() ) ); diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/TopicMetricServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/TopicMetricServiceImpl.java index d7cca017..478c142b 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/TopicMetricServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/TopicMetricServiceImpl.java @@ -120,7 +120,9 @@ public class TopicMetricServiceImpl extends BaseMetricService implements TopicMe @Override public Result> collectTopicMetricsFromKafkaWithCacheFirst(Long clusterPhyId, String topicName, String metricName) { - List metricsList = CollectedMetricsLocalCache.getTopicMetrics(clusterPhyId, topicName, metricName); + String topicMetricsKey = CollectedMetricsLocalCache.genClusterTopicMetricKey(clusterPhyId, topicName, metricName); + + List metricsList = CollectedMetricsLocalCache.getTopicMetrics(topicMetricsKey); if(null != metricsList) { return Result.buildSuc(metricsList); } @@ -133,12 +135,7 @@ public class TopicMetricServiceImpl extends BaseMetricService implements TopicMe // 更新cache TopicMetrics metrics = metricsResult.getData().get(0); metrics.getMetrics().entrySet().forEach( - metricEntry -> CollectedMetricsLocalCache.putTopicMetrics( - clusterPhyId, - metrics.getTopic(), - metricEntry.getKey(), - metricsResult.getData() - ) + metricEntry -> CollectedMetricsLocalCache.putTopicMetrics(topicMetricsKey, metricsResult.getData()) ); return metricsResult; diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/metrics/ClusterMetricVersionItems.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/metrics/ClusterMetricVersionItems.java index d4c58d69..53b98479 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/metrics/ClusterMetricVersionItems.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/metrics/ClusterMetricVersionItems.java @@ -35,6 +35,7 @@ public class ClusterMetricVersionItems extends BaseMetricVersionMetric { public static final String CLUSTER_METRIC_HEALTH_SCORE_CLUSTER = "HealthScore_Cluster"; public static final String CLUSTER_METRIC_HEALTH_CHECK_PASSED_CLUSTER = "HealthCheckPassed_Cluster"; public static final String CLUSTER_METRIC_HEALTH_CHECK_TOTAL_CLUSTER = "HealthCheckTotal_Cluster"; + public static final String CLUSTER_METRIC_TOTAL_REQ_QUEUE_SIZE = "TotalRequestQueueSize"; public static final String CLUSTER_METRIC_TOTAL_RES_QUEUE_SIZE = "TotalResponseQueueSize"; public static final String CLUSTER_METRIC_EVENT_QUEUE_SIZE = "EventQueueSize"; @@ -63,11 +64,13 @@ public class ClusterMetricVersionItems extends BaseMetricVersionMetric { public static final String CLUSTER_METRIC_BYTES_OUT = "BytesOut"; public static final String CLUSTER_METRIC_BYTES_OUT_5_MIN = "BytesOut_min_5"; public static final String CLUSTER_METRIC_BYTES_OUT_15_MIN = "BytesOut_min_15"; + public static final String CLUSTER_METRIC_GROUP = "Groups"; public static final String CLUSTER_METRIC_GROUP_ACTIVES = "GroupActives"; public static final String CLUSTER_METRIC_GROUP_EMPTYS = "GroupEmptys"; public static final String CLUSTER_METRIC_GROUP_REBALANCES = "GroupRebalances"; public static final String CLUSTER_METRIC_GROUP_DEADS = "GroupDeads"; + public static final String CLUSTER_METRIC_ALIVE = "Alive"; public static final String CLUSTER_METRIC_ACL_ENABLE = "AclEnable"; diff --git a/km-dist/init/sql/ddl-ks-km.sql b/km-dist/init/sql/ddl-ks-km.sql index 90f588a6..50696917 100644 --- a/km-dist/init/sql/ddl-ks-km.sql +++ b/km-dist/init/sql/ddl-ks-km.sql @@ -13,6 +13,7 @@ CREATE TABLE `ks_km_broker` ( `status` int(16) NOT NULL DEFAULT '0' COMMENT '状态: 1存活,0未存活', `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', + `endpoint_map` varchar(1024) NOT NULL DEFAULT '' COMMENT '监听信息', PRIMARY KEY (`id`), UNIQUE KEY `uniq_cluster_phy_id_broker_id` (`cluster_phy_id`,`broker_id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Broker信息表'; @@ -51,7 +52,6 @@ CREATE TABLE `ks_km_cluster_balance_job` ( `total_reassign_size` double NOT NULL DEFAULT '0' COMMENT '总迁移大小', `total_reassign_replica_num` int(16) NOT NULL DEFAULT '0' COMMENT '总迁移副本数', `move_in_topic_list` varchar(4096) NOT NULL DEFAULT '' COMMENT '移入topic', - `move_broker_list` varchar(1024) NOT NULL DEFAULT '' COMMENT '移除节点', `broker_balance_detail` text COMMENT '节点均衡详情', `status` int(16) NOT NULL DEFAULT '0' COMMENT '任务状态 1:进行中,2:准备,3,成功,4:失败,5:取消', `creator` varchar(64) NOT NULL DEFAULT '' COMMENT '操作人', diff --git a/km-dist/init/sql/ddl-logi-security.sql b/km-dist/init/sql/ddl-logi-security.sql index 3ac20657..69fcdc66 100644 --- a/km-dist/init/sql/ddl-logi-security.sql +++ b/km-dist/init/sql/ddl-logi-security.sql @@ -39,7 +39,7 @@ CREATE TABLE `logi_security_oplog` operate_type varchar(16) not null comment '操作类型', target_type varchar(16) not null comment '对象分类', target varchar(20) not null comment '操作对象', - operation_methods varchar(20) not null comment '操作方式', + operation_methods varchar(20) not null default '' comment '操作方式', detail text null comment '日志详情', create_time timestamp default CURRENT_TIMESTAMP null, update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间', diff --git a/km-dist/init/sql/dml-ks-km.sql b/km-dist/init/sql/dml-ks-km.sql index f986533d..2d354a87 100644 --- a/km-dist/init/sql/dml-ks-km.sql +++ b/km-dist/init/sql/dml-ks-km.sql @@ -1,7 +1,7 @@ -- 检查检查配置 -INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_CLUSTER_NO_CONTROLLER','{ \"value\": 1, \"weight\": 30 } ','集群Controller数错误','know-stream'); -INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_BROKER_REQUEST_QUEUE_FULL','{ \"value\": 10, \"weight\": 20 } ','Broker请求队列被打满','know-stream'); -INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_BROKER_NETWORK_PROCESSOR_AVG_IDLE_TOO_LOW','{ \"value\": 0.8, \"weight\": 20 } ','Broker网络处理线程Idle过低','know-stream'); -INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_GROUP_RE_BALANCE_TOO_FREQUENTLY','{\n \"latestMinutes\": 10,\n \"detectedTimes\": 8,\n \"weight\": 10\n}\n','Group的re-balance太频繁','know-stream'); -INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_TOPIC_NO_LEADER','{ \"value\": 1, \"weight\": 10 } ','Topic无Leader数','know-stream'); -INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_TOPIC_UNDER_REPLICA_TOO_LONG','{ \"latestMinutes\": 10, \"detectedTimes\": 8, \"weight\": 10 } ','Topic长期处于未同步状态','know-stream'); +INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_CLUSTER_NO_CONTROLLER','{ \"value\": 1, \"weight\": 30 } ','集群Controller数正常','know-streaming'); +INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_BROKER_REQUEST_QUEUE_FULL','{ \"value\": 10, \"weight\": 20 } ','Broker-RequestQueueSize指标','know-streaming'); +INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_BROKER_NETWORK_PROCESSOR_AVG_IDLE_TOO_LOW','{ \"value\": 0.8, \"weight\": 20 } ','Broker-NetworkProcessorAvgIdlePercent指标','know-streaming'); +INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_GROUP_RE_BALANCE_TOO_FREQUENTLY','{\n \"latestMinutes\": 10,\n \"detectedTimes\": 8,\n \"weight\": 10\n}\n','Group的re-balance频率','know-streaming'); +INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_TOPIC_NO_LEADER','{ \"value\": 1, \"weight\": 10 } ','Topic 无Leader数','know-stream'); +INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_TOPIC_UNDER_REPLICA_TOO_LONG','{ \"latestMinutes\": 10, \"detectedTimes\": 8, \"weight\": 10 } ','Topic 未同步持续时间','know-streaming'); diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/BaseESDAO.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/BaseESDAO.java index dff96236..62bc6a57 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/BaseESDAO.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/BaseESDAO.java @@ -8,7 +8,7 @@ import org.springframework.beans.factory.annotation.Autowired; /** * 直接操作es集群的dao */ -public class BaseESDAO { +public abstract class BaseESDAO { protected static final ILog LOGGER = LogFactory.getLog("ES_LOGGER"); /** diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/ESOpClient.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/ESOpClient.java index d0ca75e9..1200699a 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/ESOpClient.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/ESOpClient.java @@ -11,12 +11,15 @@ import com.didiglobal.logi.elasticsearch.client.request.batch.ESBatchRequest; import com.didiglobal.logi.elasticsearch.client.request.query.query.ESQueryRequest; import com.didiglobal.logi.elasticsearch.client.response.batch.ESBatchResponse; import com.didiglobal.logi.elasticsearch.client.response.batch.IndexResultItemNode; +import com.didiglobal.logi.elasticsearch.client.response.indices.gettemplate.ESIndicesGetTemplateResponse; +import com.didiglobal.logi.elasticsearch.client.response.indices.putindex.ESIndicesPutIndexResponse; +import com.didiglobal.logi.elasticsearch.client.response.indices.puttemplate.ESIndicesPutTemplateResponse; import com.didiglobal.logi.elasticsearch.client.response.query.query.ESQueryResponse; +import com.didiglobal.logi.elasticsearch.client.response.setting.template.TemplateConfig; import com.didiglobal.logi.log.ILog; import com.didiglobal.logi.log.LogFactory; import com.google.common.collect.Lists; import com.xiaojukeji.know.streaming.km.common.bean.po.BaseESPO; -import com.xiaojukeji.know.streaming.km.common.constant.ESConstant; import com.xiaojukeji.know.streaming.km.common.utils.EnvUtil; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; @@ -37,7 +40,6 @@ import java.util.function.Function; @Component public class ESOpClient { - private static final ILog LOGGER = LogFactory.getLog("ES_LOGGER"); /** @@ -45,6 +47,7 @@ public class ESOpClient { */ @Value("${es.client.address}") private String esAddress; + /** * es 访问密码 */ @@ -54,22 +57,32 @@ public class ESOpClient { /** * 客户端个数 */ - private static final int ES_CLIENT_COUNT = 30; + @Value("${es.client.client-cnt:10}") + private Integer clientCnt; - private static final int MAX_RETRY_CNT = 5; - - private static final int ES_IO_THREAD_COUNT = 4; + /** + * 最大重试次数 + */ + @Value("${es.client.max-retry-cnt:5}") + private Integer maxRetryCnt; + /** + * IO线程数 + */ + @Value("${es.client.io-thread-cnt:2}") + private Integer ioThreadCnt; /** * 更新es数据的客户端连接队列 */ - private LinkedBlockingQueue esClientPool = new LinkedBlockingQueue<>( ES_CLIENT_COUNT ); + private LinkedBlockingQueue esClientPool; @PostConstruct public void init(){ - for (int i = 0; i < ES_CLIENT_COUNT; ++i) { - ESClient esClient = buildEsClient(esAddress, esPass, "", ""); + esClientPool = new LinkedBlockingQueue<>( clientCnt ); + + for (int i = 0; i < clientCnt; ++i) { + ESClient esClient = this.buildEsClient(esAddress, esPass, "", ""); if (esClient != null) { this.esClientPool.add(esClient); LOGGER.info("class=ESOpClient||method=init||msg=add new es client {}", esAddress); @@ -245,7 +258,7 @@ public class ESOpClient { esIndexRequest.source(source); esIndexRequest.id(id); - for (int i = 0; i < MAX_RETRY_CNT; ++i) { + for (int i = 0; i < this.maxRetryCnt; ++i) { response = esClient.index(esIndexRequest).actionGet(10, TimeUnit.SECONDS); if (response == null) { continue; @@ -307,7 +320,7 @@ public class ESOpClient { batchRequest.addNode(BatchType.INDEX, indexName, null, po.getKey(), JSON.toJSONString(po)); } - for (int i = 0; i < MAX_RETRY_CNT; ++i) { + for (int i = 0; i < this.maxRetryCnt; ++i) { response = esClient.batch(batchRequest).actionGet(2, TimeUnit.MINUTES); if (response == null) {continue;} @@ -331,7 +344,94 @@ public class ESOpClient { return false; } + /** + * 根据表达式判断索引是否已存在 + */ + public boolean indexExist(String indexName) { + ESClient esClient = null; + try { + esClient = this.getESClientFromPool(); + if (esClient == null) { + return false; + } + + // 检查索引是否存在 + return esClient.admin().indices().prepareExists(indexName).execute().actionGet(30, TimeUnit.SECONDS).isExists(); + } catch (Exception e){ + LOGGER.warn("class=ESOpClient||method=indexExist||indexName={}||msg=exception!", indexName, e); + } finally { + if (esClient != null) { + returnESClientToPool(esClient); + } + } + + return false; + } + + /** + * 创建索引 + */ + public boolean createIndex(String indexName) { + if (indexExist(indexName)) { + return true; + } + + ESClient client = getESClientFromPool(); + if (client != null) { + try { + ESIndicesPutIndexResponse response = client.admin().indices().preparePutIndex(indexName).execute() + .actionGet(30, TimeUnit.SECONDS); + return response.getAcknowledged(); + } catch (Exception e){ + LOGGER.warn( "msg=create index fail||indexName={}", indexName, e); + } finally { + returnESClientToPool(client); + } + } + + return false; + } + + /** + * 创建索引模板 + */ + public boolean createIndexTemplateIfNotExist(String indexTemplateName, String config) { + ESClient esClient = null; + + try { + esClient = this.getESClientFromPool(); + + // 获取es中原来index template的配置 + ESIndicesGetTemplateResponse getTemplateResponse = + esClient.admin().indices().prepareGetTemplate( indexTemplateName ).execute().actionGet( 30, TimeUnit.SECONDS ); + + TemplateConfig templateConfig = getTemplateResponse.getMultiTemplatesConfig().getSingleConfig(); + + if (null != templateConfig) { + return true; + } + + // 创建新的模板 + ESIndicesPutTemplateResponse response = esClient.admin().indices().preparePutTemplate( indexTemplateName ) + .setTemplateConfig( config ).execute().actionGet( 30, TimeUnit.SECONDS ); + + return response.getAcknowledged(); + } catch (Exception e) { + LOGGER.warn( + "class=ESOpClient||method=createIndexTemplateIfNotExist||indexTemplateName={}||config={}||msg=exception!", + indexTemplateName, config, e + ); + } finally { + if (esClient != null) { + this.returnESClientToPool(esClient); + } + } + + return false; + } + /**************************************************** private method ****************************************************/ + /** * 执行查询 * @param request @@ -428,8 +528,8 @@ public class ESOpClient { if(StringUtils.isNotBlank(password)){ esClient.setPassword(password); } - if(ES_IO_THREAD_COUNT > 0) { - esClient.setIoThreadCount( ES_IO_THREAD_COUNT ); + if(this.ioThreadCnt > 0) { + esClient.setIoThreadCount( this.ioThreadCnt ); } // 配置http超时 @@ -439,11 +539,13 @@ public class ESOpClient { return esClient; } catch (Exception e) { - esClient.close(); - - LOGGER.error("class=ESESOpClient||method=buildEsClient||errMsg={}||address={}", e.getMessage(), address, - e); + try { + esClient.close(); + } catch (Exception innerE) { + // ignore + } + LOGGER.error("class=ESESOpClient||method=buildEsClient||errMsg={}||address={}", e.getMessage(), address, e); return null; } } diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/BaseMetricESDAO.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/BaseMetricESDAO.java index 8a0f96a9..fe04e4d1 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/BaseMetricESDAO.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/BaseMetricESDAO.java @@ -8,11 +8,12 @@ import com.google.common.collect.Maps; import com.xiaojukeji.know.streaming.km.common.bean.entity.search.*; import com.xiaojukeji.know.streaming.km.common.bean.po.BaseESPO; import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.BaseMetricESPO; -import com.xiaojukeji.know.streaming.km.common.enums.metric.KafkaMetricIndexEnum; +import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO; import com.xiaojukeji.know.streaming.km.common.utils.IndexNameUtils; import com.xiaojukeji.know.streaming.km.persistence.es.BaseESDAO; import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslsConstant; import lombok.NoArgsConstructor; +import org.springframework.scheduling.annotation.Scheduled; import org.springframework.util.CollectionUtils; import java.util.*; @@ -25,7 +26,8 @@ public class BaseMetricESDAO extends BaseESDAO { /** * 操作的索引名称 */ - protected String indexName; + protected String indexName; + protected String indexTemplate; protected static final Long ONE_MIN = 60 * 1000L; protected static final Long FIVE_MIN = 5 * ONE_MIN; @@ -35,10 +37,24 @@ public class BaseMetricESDAO extends BaseESDAO { /** * 不同维度 kafka 监控数据 */ - private static Map ariusStatsEsDaoMap = Maps + private static Map ariusStatsEsDaoMap = Maps .newConcurrentMap(); - public static BaseMetricESDAO getByStatsType(KafkaMetricIndexEnum statsType) { + /** + * 检查 es 索引是否存在,不存在则创建索引 + */ + @Scheduled(cron = "0 3/5 * * * ?") + public void checkCurrentDayIndexExist(){ + String realIndex = IndexNameUtils.genCurrentDailyIndexName(indexName); + + if(esOpClient.indexExist(realIndex)){return;} + + if(esOpClient.createIndexTemplateIfNotExist(indexName, indexTemplate)){ + esOpClient.createIndex(realIndex); + } + } + + public static BaseMetricESDAO getByStatsType(String statsType) { return ariusStatsEsDaoMap.get(statsType); } @@ -48,7 +64,7 @@ public class BaseMetricESDAO extends BaseESDAO { * @param statsType * @param baseAriusStatsEsDao */ - public static void register(KafkaMetricIndexEnum statsType, BaseMetricESDAO baseAriusStatsEsDao) { + public static void register(String statsType, BaseMetricESDAO baseAriusStatsEsDao) { ariusStatsEsDaoMap.put(statsType, baseAriusStatsEsDao); } @@ -358,7 +374,50 @@ public class BaseMetricESDAO extends BaseESDAO { String dsl = dslLoaderUtil.getFormatDslByFileName(DslsConstant.GET_LATEST_METRIC_TIME, startTime, endTime, appendQueryDsl); String realIndexName = IndexNameUtils.genDailyIndexName(indexName, startTime, endTime); - return esOpClient.performRequest(realIndexName, dsl, s -> s.getHits().getHits().isEmpty() - ? System.currentTimeMillis() : ((Map)s.getHits().getHits().get(0).getSource()).get(TIME_STAMP), 3); + return esOpClient.performRequest( + realIndexName, + dsl, + s -> s == null || s.getHits().getHits().isEmpty() ? System.currentTimeMillis() : ((Map)s.getHits().getHits().get(0).getSource()).get(TIME_STAMP), + 3 + ); + } + + /** + * 对 metricPointVOS 进行缺点优化 + */ + protected List optimizeMetricPoints(List metricPointVOS){ + if(CollectionUtils.isEmpty(metricPointVOS)){return metricPointVOS;} + + int size = metricPointVOS.size(); + if(size < 2){return metricPointVOS;} + + Collections.sort(metricPointVOS); + + List rets = new ArrayList<>(); + for(int first = 0, second = first + 1; second < size; first++, second++){ + MetricPointVO firstPoint = metricPointVOS.get(first); + MetricPointVO secondPoint = metricPointVOS.get(second); + + if(null != firstPoint && null != secondPoint){ + rets.add(firstPoint); + + //说明有空点,那就增加一个点 + if(secondPoint.getTimeStamp() - firstPoint.getTimeStamp() > ONE_MIN){ + MetricPointVO addPoint = new MetricPointVO(); + addPoint.setName(firstPoint.getName()); + addPoint.setAggType(firstPoint.getAggType()); + addPoint.setValue(firstPoint.getValue()); + addPoint.setTimeStamp(firstPoint.getTimeStamp() + ONE_MIN); + + rets.add(addPoint); + } + + if(second == size - 1){ + rets.add(secondPoint); + } + } + } + + return rets; } } diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/BrokerMetricESDAO.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/BrokerMetricESDAO.java index 1af1e357..edc186f4 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/BrokerMetricESDAO.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/BrokerMetricESDAO.java @@ -18,14 +18,16 @@ import java.util.*; import java.util.stream.Collectors; import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.*; -import static com.xiaojukeji.know.streaming.km.common.enums.metric.KafkaMetricIndexEnum.BROKER_INFO; +import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.*; @Component public class BrokerMetricESDAO extends BaseMetricESDAO { @PostConstruct public void init() { - super.indexName = BROKER_INFO.getIndex(); - BaseMetricESDAO.register(BROKER_INFO, this); + super.indexName = BROKER_INDEX; + super.indexTemplate = BROKER_TEMPLATE; + checkCurrentDayIndexExist(); + BaseMetricESDAO.register(indexName, this); } protected FutureWaitUtil queryFuture = FutureWaitUtil.init("BrokerMetricESDAO", 4,8, 500); @@ -41,7 +43,11 @@ public class BrokerMetricESDAO extends BaseMetricESDAO { DslsConstant.GET_BROKER_LATEST_METRICS, clusterId, brokerId, startTime, endTime); BrokerMetricPO brokerMetricPO = esOpClient.performRequestAndTakeFirst( - brokerId.toString(), realIndex(startTime, endTime), dsl, BrokerMetricPO.class); + brokerId.toString(), + realIndex(startTime, endTime), + dsl, + BrokerMetricPO.class + ); return (null == brokerMetricPO) ? new BrokerMetricPO(clusterId, brokerId) : brokerMetricPO; } @@ -49,8 +55,12 @@ public class BrokerMetricESDAO extends BaseMetricESDAO { /** * 获取集群 clusterPhyId 中每个 metric 的指定 broker 在指定时间[startTime、endTime]区间内聚合计算(avg、max)之后的统计值 */ - public Map getBrokerMetricsPoint(Long clusterPhyId, Integer brokerId, List metrics, - String aggType, Long startTime, Long endTime){ + public Map getBrokerMetricsPoint(Long clusterPhyId, + Integer brokerId, + List metrics, + String aggType, + Long startTime, + Long endTime) { //1、获取需要查下的索引 String realIndex = realIndex(startTime, endTime); @@ -60,8 +70,13 @@ public class BrokerMetricESDAO extends BaseMetricESDAO { String dsl = dslLoaderUtil.getFormatDslByFileName( DslsConstant.GET_BROKER_AGG_SINGLE_METRICS, clusterPhyId, brokerId, startTime, endTime, aggDsl); - return esOpClient.performRequestWithRouting(String.valueOf(brokerId), realIndex, dsl, - s -> handleSingleESQueryResponse(s, metrics, aggType), 3); + return esOpClient.performRequestWithRouting( + String.valueOf(brokerId), + realIndex, + dsl, + s -> handleSingleESQueryResponse(s, metrics, aggType), + 3 + ); } /** @@ -75,10 +90,19 @@ public class BrokerMetricESDAO extends BaseMetricESDAO { Map> metricBrokerIds = getTopNBrokerIds(clusterPhyId, metrics, aggType, topN, startTime, endTime); Table> table = HashBasedTable.create(); + //2、查询指标 for(String metric : metricBrokerIds.keySet()){ - table.putAll(listBrokerMetricsByBrokerIds(clusterPhyId, Arrays.asList(metric), - aggType, metricBrokerIds.getOrDefault(metric, brokerIds), startTime, endTime)); + table.putAll( + this.listBrokerMetricsByBrokerIds( + clusterPhyId, + Arrays.asList(metric), + aggType, + metricBrokerIds.getOrDefault(metric, brokerIds), + startTime, + endTime + ) + ); } return table; @@ -87,9 +111,12 @@ public class BrokerMetricESDAO extends BaseMetricESDAO { /** * 获取集群 clusterPhyId 中每个 metric 的指定 brokers 在指定时间[startTime、endTime]区间内所有的指标 */ - public Table> listBrokerMetricsByBrokerIds(Long clusterPhyId, List metrics, - String aggType, List brokerIds, - Long startTime, Long endTime){ + public Table> listBrokerMetricsByBrokerIds(Long clusterPhyId, + List metrics, + String aggType, + List brokerIds, + Long startTime, + Long endTime){ //1、获取需要查下的索引 String realIndex = realIndex(startTime, endTime); @@ -105,22 +132,34 @@ public class BrokerMetricESDAO extends BaseMetricESDAO { for(Long brokerId : brokerIds){ try { String dsl = dslLoaderUtil.getFormatDslByFileName( - DslsConstant.GET_BROKER_AGG_LIST_METRICS, clusterPhyId, brokerId, startTime, endTime, interval, aggDsl); + DslsConstant.GET_BROKER_AGG_LIST_METRICS, + clusterPhyId, + brokerId, + startTime, + endTime, + interval, + aggDsl + ); queryFuture.runnableTask( String.format("class=BrokerMetricESDAO||method=listBrokerMetricsByBrokerIds||ClusterPhyId=%d", clusterPhyId), 5000, () -> { - Map> metricMap = esOpClient.performRequestWithRouting(String.valueOf(brokerId), realIndex, dsl, - s -> handleListESQueryResponse(s, metrics, aggType), 3); + Map> metricMap = esOpClient.performRequestWithRouting( + String.valueOf(brokerId), + realIndex, + dsl, + s -> handleListESQueryResponse(s, metrics, aggType), + 3 + ); - synchronized (table){ + synchronized (table) { for(String metric : metricMap.keySet()){ table.put(metric, brokerId, metricMap.get(metric)); } } }); - }catch (Exception e){ + } catch (Exception e){ LOGGER.error("method=listBrokerMetricsByBrokerIds||clusterPhyId={}||brokerId{}||errMsg=exception!", clusterPhyId, brokerId, e); } } @@ -221,7 +260,7 @@ public class BrokerMetricESDAO extends BaseMetricESDAO { } } ); - metricMap.put(metric, metricPoints); + metricMap.put(metric, optimizeMetricPoints(metricPoints)); } return metricMap; diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/ClusterMetricESDAO.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/ClusterMetricESDAO.java index 63a9f3f1..82a86253 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/ClusterMetricESDAO.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/ClusterMetricESDAO.java @@ -23,15 +23,17 @@ import java.util.List; import java.util.Map; import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.*; -import static com.xiaojukeji.know.streaming.km.common.enums.metric.KafkaMetricIndexEnum.CLUSTER_INFO; +import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.*; @Component public class ClusterMetricESDAO extends BaseMetricESDAO { @PostConstruct public void init() { - super.indexName = CLUSTER_INFO.getIndex(); - BaseMetricESDAO.register(CLUSTER_INFO, this); + super.indexName = CLUSTER_INDEX; + super.indexTemplate = CLUSTER_TEMPLATE; + checkCurrentDayIndexExist(); + BaseMetricESDAO.register(indexName, this); } protected FutureWaitUtil queryFuture = FutureWaitUtil.init("ClusterMetricESDAO", 4,8, 500); @@ -207,7 +209,7 @@ public class ClusterMetricESDAO extends BaseMetricESDAO { } } ); - metricMap.put(metric, metricPoints); + metricMap.put(metric, optimizeMetricPoints(metricPoints)); } return metricMap; diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/GroupMetricESDAO.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/GroupMetricESDAO.java index 42ae0ace..cf65e6ef 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/GroupMetricESDAO.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/GroupMetricESDAO.java @@ -23,16 +23,17 @@ import java.util.stream.Collectors; import static com.xiaojukeji.know.streaming.km.common.constant.Constant.ZERO; import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.*; -import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.KEY; -import static com.xiaojukeji.know.streaming.km.common.enums.metric.KafkaMetricIndexEnum.GROUP_INFO; +import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.*; @Component public class GroupMetricESDAO extends BaseMetricESDAO { @PostConstruct public void init() { - super.indexName = GROUP_INFO.getIndex(); - BaseMetricESDAO.register(GROUP_INFO, this); + super.indexName = GROUP_INDEX; + super.indexTemplate = GROUP_TEMPLATE; + checkCurrentDayIndexExist(); + BaseMetricESDAO.register(indexName, this); } protected FutureWaitUtil queryFuture = FutureWaitUtil.init("GroupMetricESDAO", 4,8, 500); @@ -206,7 +207,7 @@ public class GroupMetricESDAO extends BaseMetricESDAO { } } ); - metricMap.put(metric, metricPoints); + metricMap.put(metric, optimizeMetricPoints(metricPoints)); } return metricMap; diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/PartitionMetricESDAO.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/PartitionMetricESDAO.java index 85dc55df..4f86852b 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/PartitionMetricESDAO.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/PartitionMetricESDAO.java @@ -8,7 +8,7 @@ import javax.annotation.PostConstruct; import java.util.List; -import static com.xiaojukeji.know.streaming.km.common.enums.metric.KafkaMetricIndexEnum.PARTITION_INFO; +import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.*; /** * @author didi @@ -18,8 +18,10 @@ public class PartitionMetricESDAO extends BaseMetricESDAO { @PostConstruct public void init() { - super.indexName = PARTITION_INFO.getIndex(); - BaseMetricESDAO.register(PARTITION_INFO, this); + super.indexName = PARTITION_INDEX; + super.indexTemplate = PARTITION_TEMPLATE; + checkCurrentDayIndexExist(); + BaseMetricESDAO.register(indexName, this); } public PartitionMetricPO getPartitionLatestMetrics(Long clusterPhyId, String topic, diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/ReplicationMetricESDAO.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/ReplicationMetricESDAO.java index e5f9f164..1f604cc0 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/ReplicationMetricESDAO.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/ReplicationMetricESDAO.java @@ -14,7 +14,7 @@ import java.util.List; import java.util.Map; import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.VALUE; -import static com.xiaojukeji.know.streaming.km.common.enums.metric.KafkaMetricIndexEnum.REPLICATION_INFO; +import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.*; /** * @author didi @@ -24,8 +24,10 @@ public class ReplicationMetricESDAO extends BaseMetricESDAO { @PostConstruct public void init() { - super.indexName = REPLICATION_INFO.getIndex(); - BaseMetricESDAO.register(REPLICATION_INFO, this); + super.indexName = REPLICATION_INDEX; + super.indexTemplate = REPLICATION_TEMPLATE; + checkCurrentDayIndexExist(); + BaseMetricESDAO.register(indexName, this); } /** diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/TopicMetricESDAO.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/TopicMetricESDAO.java index 402333ee..e9089c17 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/TopicMetricESDAO.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/TopicMetricESDAO.java @@ -22,15 +22,17 @@ import java.util.*; import java.util.stream.Collectors; import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.*; -import static com.xiaojukeji.know.streaming.km.common.enums.metric.KafkaMetricIndexEnum.TOPIC_INFO; +import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.*; @Component public class TopicMetricESDAO extends BaseMetricESDAO { @PostConstruct public void init() { - super.indexName = TOPIC_INFO.getIndex(); - BaseMetricESDAO.register(TOPIC_INFO, this); + super.indexName = TOPIC_INDEX; + super.indexTemplate = TOPIC_TEMPLATE; + checkCurrentDayIndexExist(); + BaseMetricESDAO.register(indexName, this); } protected FutureWaitUtil queryFuture = FutureWaitUtil.init("TopicMetricESDAO", 4,8, 500); @@ -352,7 +354,7 @@ public class TopicMetricESDAO extends BaseMetricESDAO { } } ); - metricMap.put(metric, metricPoints); + metricMap.put(metric, optimizeMetricPoints(metricPoints)); } return metricMap; diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/KafkaJMXClient.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/KafkaJMXClient.java index afa904af..68d1011e 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/KafkaJMXClient.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/KafkaJMXClient.java @@ -165,8 +165,8 @@ public class KafkaJMXClient extends AbstractClusterLoadedChangedHandler { clusterPhy.getId(), brokerId, broker.getStartTimestamp(), - broker.getHost(), - broker.getJmxPort() != null? broker.getJmxPort(): jmxConfig.getJmxPort(), + jmxConfig != null ? broker.getJmxHost(jmxConfig.getUseWhichEndpoint()) : broker.getHost(), + broker.getJmxPort() != null ? broker.getJmxPort() : jmxConfig.getJmxPort(), jmxConfig ); @@ -191,6 +191,6 @@ public class KafkaJMXClient extends AbstractClusterLoadedChangedHandler { lambdaQueryWrapper.eq(BrokerPO::getStatus, Constant.ALIVE); BrokerPO brokerPO = brokerDAO.selectOne(lambdaQueryWrapper); - return ConvertUtil.obj2Obj(brokerPO, Broker.class); + return Broker.buildFrom(brokerPO); } } diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/mysql/broker/BrokerDAO.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/mysql/broker/BrokerDAO.java index c05a66ad..5169bbad 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/mysql/broker/BrokerDAO.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/mysql/broker/BrokerDAO.java @@ -6,5 +6,4 @@ import org.springframework.stereotype.Repository; @Repository public interface BrokerDAO extends BaseMapper { - int replace(BrokerPO brokerPO); } diff --git a/km-persistence/src/main/resources/mybatis/BrokerMapper.xml b/km-persistence/src/main/resources/mybatis/BrokerMapper.xml index 360fe9c8..3d9f5d8f 100644 --- a/km-persistence/src/main/resources/mybatis/BrokerMapper.xml +++ b/km-persistence/src/main/resources/mybatis/BrokerMapper.xml @@ -14,12 +14,7 @@ + - - REPLACE ks_km_broker - (cluster_phy_id, broker_id, host, port, jmx_port, start_timestamp, status, update_time) - VALUES - (#{clusterPhyId}, #{brokerId}, #{host}, #{port}, #{jmxPort}, #{startTimestamp}, #{status}, #{updateTime}) - diff --git a/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/cluster/MultiClusterPhyController.java b/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/cluster/MultiClusterPhyController.java index d443bcac..34b907a8 100644 --- a/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/cluster/MultiClusterPhyController.java +++ b/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/cluster/MultiClusterPhyController.java @@ -16,7 +16,7 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.validation.annotation.Validated; import org.springframework.web.bind.annotation.*; -import java.util.Set; +import java.util.List; /** @@ -49,7 +49,7 @@ public class MultiClusterPhyController { @ApiOperation(value = "多物理集群-已存在kafka版本", notes = "") @GetMapping(value = "physical-clusters/exist-version") - public Result> getClusterPhysVersion() { - return Result.buildSuc(clusterPhyService.getClusterVersionSet()); + public Result> getClusterPhysVersion() { + return Result.buildSuc(clusterPhyService.getClusterVersionList()); } } diff --git a/km-rest/src/main/resources/application.yml b/km-rest/src/main/resources/application.yml index a6417157..4b0831c7 100644 --- a/km-rest/src/main/resources/application.yml +++ b/km-rest/src/main/resources/application.yml @@ -73,8 +73,13 @@ client-pool: borrow-timeout-unit-ms: 5000 # 租借超时时间,单位秒 -# es客户端服务地址 -es.client.address: 127.0.0.1:8060 +# ES客户端配置 +es: + client: + address: 127.0.0.1:8091,127.0.0.1:8061,127.0.0.1:8061 + client-cnt: 10 + io-thread-cnt: 2 + max-retry-cnt: 5 # 普罗米修斯指标导出相关配置 management: diff --git a/km-rest/src/test/java/com/xiaojukeji/know/streaming/km/persistence/es/ClusterMetricESDAOTest.java b/km-rest/src/test/java/com/xiaojukeji/know/streaming/km/persistence/es/ClusterMetricESDAOTest.java index 2cdb895e..c69f7129 100644 --- a/km-rest/src/test/java/com/xiaojukeji/know/streaming/km/persistence/es/ClusterMetricESDAOTest.java +++ b/km-rest/src/test/java/com/xiaojukeji/know/streaming/km/persistence/es/ClusterMetricESDAOTest.java @@ -20,8 +20,8 @@ public class ClusterMetricESDAOTest extends KnowStreamApplicationTest { @Test public void listClusterMetricsByClusterIdsTest(){ - List metrics = Arrays.asList("BytesIn_min_1", "BytesOut_min_1"); - List clusterIds = Arrays.asList(123L); + List metrics = Arrays.asList("MessagesIn"); + List clusterIds = Arrays.asList(293L); Long endTime = System.currentTimeMillis(); Long startTime = endTime - 4 * 60 * 60 * 1000; diff --git a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/HealthCheckTask.java b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/HealthCheckTask.java index 0d4c8db9..3e661418 100644 --- a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/HealthCheckTask.java +++ b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/HealthCheckTask.java @@ -16,6 +16,7 @@ import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; import com.xiaojukeji.know.streaming.km.core.service.health.checkresult.HealthCheckResultService; import com.xiaojukeji.know.streaming.km.core.service.health.checker.AbstractHealthCheckService; import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask; +import com.xiaojukeji.know.streaming.km.task.service.TaskThreadPoolService; import lombok.AllArgsConstructor; import lombok.NoArgsConstructor; import org.springframework.beans.factory.annotation.Autowired; @@ -24,11 +25,18 @@ import java.util.*; @NoArgsConstructor @AllArgsConstructor -@Task(name = "HealthCheckTask", description = "健康检查", cron = "0 0/1 * * * ? *", - autoRegister = true, consensual = ConsensualEnum.BROADCAST, timeout = 2 * 60) +@Task(name = "HealthCheckTask", + description = "健康检查", + cron = "0 0/1 * * * ? *", + autoRegister = true, + consensual = ConsensualEnum.BROADCAST, + timeout = 2 * 60) public class HealthCheckTask extends AbstractClusterPhyDispatchTask { private static final ILog log = LogFactory.getLog(HealthCheckTask.class); + @Autowired + private TaskThreadPoolService taskThreadPoolService; + @Autowired private HealthCheckResultService healthCheckResultService; @@ -38,6 +46,16 @@ public class HealthCheckTask extends AbstractClusterPhyDispatchTask { @Override public TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) { + taskThreadPoolService.submitHeavenTask( + String.format("TaskName=%s clusterPhyId=%d", this.taskName, clusterPhy.getId()), + 100000, + () -> this.calAndUpdateHealthCheckResult(clusterPhy, triggerTimeUnitMs) + ); + + return TaskResult.SUCCESS; + } + + private void calAndUpdateHealthCheckResult(ClusterPhy clusterPhy, long triggerTimeUnitMs) { // 获取配置,<配置名,配置信息> Map healthConfigMap = healthCheckResultService.getClusterHealthConfig(clusterPhy.getId()); @@ -73,8 +91,6 @@ public class HealthCheckTask extends AbstractClusterPhyDispatchTask { } catch (Exception e) { log.error("method=processSubTask||clusterPhyId={}||errMsg=exception!", clusterPhy.getId(), e); } - - return TaskResult.SUCCESS; } private List getNoResResult(Long clusterPhyId, AbstractHealthCheckService healthCheckService, Map healthConfigMap) { diff --git a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/service/TaskThreadPoolService.java b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/service/TaskThreadPoolService.java index 5e7d222a..884a572d 100644 --- a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/service/TaskThreadPoolService.java +++ b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/service/TaskThreadPoolService.java @@ -10,7 +10,7 @@ import javax.annotation.PostConstruct; /** * 为了尽量避免大任务的执行,由LogIJob的线程执行, * 因此,在Task模块,需要有自己的线程池来执行相关任务, - * 而 FutureUtilsService 的职责就是负责任务的执行。 + * 而 TaskThreadPoolService 的职责就是负责任务的执行。 */ @Service @NoArgsConstructor diff --git a/pom.xml b/pom.xml index d1dd7544..5d0052d8 100644 --- a/pom.xml +++ b/pom.xml @@ -15,7 +15,7 @@ - 3.0.0-beta + 3.0.0-beta.1 8 8