diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ef4ad6a7..8a8b38a9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -13,7 +13,7 @@ Before sending pull request to this project, please read and follow guidelines b Add device mode, API version, related log, screenshots and other related information in your pull request if possible. -NOTE: We assume all your contribution can be licensed under the [Apache License 2.0](LICENSE). +NOTE: We assume all your contribution can be licensed under the [AGPL-3.0](LICENSE). ## Issues diff --git a/README.md b/README.md index 70585679..1ae6ca59 100644 --- a/README.md +++ b/README.md @@ -133,6 +133,8 @@ PS: 提问请尽量把问题一次性描述清楚,并告知环境信息情况 **`2、微信群`** 微信加群:添加`mike_zhangliang`、`PenceXie`的微信号备注KnowStreaming加群。 +
+wx ## Star History diff --git a/Releases_Notes.md b/Releases_Notes.md index 616d1eab..f453b582 100644 --- a/Releases_Notes.md +++ b/Releases_Notes.md @@ -1,5 +1,66 @@ +## v3.0.0 + +**Bug修复** +- 修复 Group 指标防重复采集不生效问题 +- 修复自动创建 ES 索引模版失败问题 +- 修复 Group+Topic 列表中存在已删除Topic的问题 +- 修复使用 MySQL-8 ,因兼容问题, start_time 信息为 NULL 时,会导致创建任务失败的问题 +- 修复 Group 信息表更新时,出现死锁的问题 +- 修复图表补点逻辑与图表时间范围不适配的问题 + + +**体验优化** +- 按照资源类别,拆分健康巡检任务 +- 优化 Group 详情页的指标为实时获取 +- 图表拖拽排序支持用户级存储 +- 多集群列表 ZK 信息展示兼容无 ZK 情况 +- Topic 详情消息预览支持复制功能 +- 部分内容大数字支持千位分割符展示 + + +**新增** +- 集群信息中,新增 Zookeeper 客户端配置字段 +- 集群信息中,新增 Kafka 集群运行模式字段 +- 新增 docker-compose 的部署方式 + + + +## v3.0.0-beta.3 + +**文档** +- FAQ 补充权限识别失败问题的说明 +- 同步更新文档,保持与官网一致 + + +**Bug修复** +- Offset 信息获取时,过滤掉无 Leader 的分区 +- 升级 oshi-core 版本至 5.6.1 版本,修复 Windows 系统获取系统指标失败问题 +- 修复 JMX 连接被关闭后,未进行重建的问题 +- 修复因 DB 中 Broker 信息不存在导致 TotalLogSize 指标获取时抛空指针问题 +- 修复 dml-logi.sql 中,SQL 注释错误的问题 +- 修复 startup.sh 中,识别操作系统类型错误的问题 +- 修复配置管理页面删除配置失败的问题 +- 修复系统管理应用文件引用路径 +- 修复 Topic Messages 详情提示信息点击跳转 404 的问题 +- 修复扩副本时,当前副本数不显示问题 + + +**体验优化** +- Topic-Messages 页面,增加返回数据的排序以及按照Earliest/Latest的获取方式 +- 优化 GroupOffsetResetEnum 类名为 OffsetTypeEnum,使得类名含义更准确 +- 移动 KafkaZKDAO 类,及 Kafka Znode 实体类的位置,使得 Kafka Zookeeper DAO 更加内聚及便于识别 +- 后端补充 Overview 页面指标排序的功能 +- 前端 Webpack 配置优化 +- Cluster Overview 图表取消放大展示功能 +- 列表页增加手动刷新功能 +- 接入/编辑集群,优化 JMX-PORT,Version 信息的回显,优化JMX信息的展示 +- 提高登录页面图片展示清晰度 +- 部分样式和文案优化 + +--- + ## v3.0.0-beta.2 **文档** diff --git a/docs/install_guide/单机部署手册.md b/docs/install_guide/单机部署手册.md index f9f5ad1a..c42e6318 100644 --- a/docs/install_guide/单机部署手册.md +++ b/docs/install_guide/单机部署手册.md @@ -59,6 +59,8 @@ sh deploy_KnowStreaming-offline.sh ### 2.1.3、容器部署 +#### 2.1.3.1、Helm + **环境依赖** - Kubernetes >= 1.14 ,Helm >= 2.17.0 @@ -72,11 +74,11 @@ sh deploy_KnowStreaming-offline.sh ```bash # 相关镜像在Docker Hub都可以下载 # 快速安装(NAMESPACE需要更改为已存在的,安装启动需要几分钟初始化请稍等~) -helm install -n [NAMESPACE] [NAME] http://download.knowstreaming.com/charts/knowstreaming-manager-0.1.3.tgz +helm install -n [NAMESPACE] [NAME] http://download.knowstreaming.com/charts/knowstreaming-manager-0.1.5.tgz # 获取KnowStreaming前端ui的service. 默认nodeport方式. # (http://nodeIP:nodeport,默认用户名密码:admin/admin2022_) -# `v3.0.0-beta.2`版本开始,默认账号密码为`admin` / `admin`; +# `v3.0.0-beta.2`版本开始(helm chart包版本0.1.4开始),默认账号密码为`admin` / `admin`; # 添加仓库 helm repo add knowstreaming http://download.knowstreaming.com/charts @@ -87,6 +89,156 @@ helm pull knowstreaming/knowstreaming-manager   +#### 2.1.3.2、Docker Compose +**环境依赖** + +- [Docker](https://docs.docker.com/engine/install/) +- [Docker Compose](https://docs.docker.com/compose/install/) + + +**安装命令** +```bash +# `v3.0.0-beta.2`版本开始(docker镜像为0.2.0版本开始),默认账号密码为`admin` / `admin`; +# https://hub.docker.com/u/knowstreaming 在此处寻找最新镜像版本 +# mysql与es可以使用自己搭建的服务,调整对应配置即可 + +# 复制docker-compose.yml到指定位置后执行下方命令即可启动 +docker-compose up -d +``` + +**验证安装** +```shell +docker-compose ps +# 验证启动 - 状态为 UP 则表示成功 + Name Command State Ports +---------------------------------------------------------------------------------------------------- +elasticsearch-single /usr/local/bin/docker-entr ... Up 9200/tcp, 9300/tcp +knowstreaming-init /bin/bash /es_template_cre ... Up +knowstreaming-manager /bin/sh /ks-start.sh Up 80/tcp +knowstreaming-mysql /entrypoint.sh mysqld Up (health: starting) 3306/tcp, 33060/tcp +knowstreaming-ui /docker-entrypoint.sh ngin ... Up 0.0.0.0:80->80/tcp + +# 稍等一分钟左右 knowstreaming-init 会退出,表示es初始化完成,可以访问页面 + Name Command State Ports +------------------------------------------------------------------------------------------- +knowstreaming-init /bin/bash /es_template_cre ... Exit 0 +knowstreaming-mysql /entrypoint.sh mysqld Up (healthy) 3306/tcp, 33060/tcp +``` + +**访问** +```http request +http://127.0.0.1:80/ +``` + + +**docker-compose.yml** +```yml +version: "2" +services: + # *不要调整knowstreaming-manager服务名称,ui中会用到 + knowstreaming-manager: + image: knowstreaming/knowstreaming-manager:latest + container_name: knowstreaming-manager + privileged: true + restart: always + depends_on: + - elasticsearch-single + - knowstreaming-mysql + expose: + - 80 + command: + - /bin/sh + - /ks-start.sh + environment: + TZ: Asia/Shanghai + # mysql服务地址 + SERVER_MYSQL_ADDRESS: knowstreaming-mysql:3306 + # mysql数据库名 + SERVER_MYSQL_DB: know_streaming + # mysql用户名 + SERVER_MYSQL_USER: root + # mysql用户密码 + SERVER_MYSQL_PASSWORD: admin2022_ + # es服务地址 + SERVER_ES_ADDRESS: elasticsearch-single:9200 + # 服务JVM参数 + JAVA_OPTS: -Xmx1g -Xms1g + # 对于kafka中ADVERTISED_LISTENERS填写的hostname可以通过该方式完成 +# extra_hosts: +# - "hostname:x.x.x.x" + # 服务日志路径 +# volumes: +# - /ks/manage/log:/logs + knowstreaming-ui: + image: knowstreaming/knowstreaming-ui:latest + container_name: knowstreaming-ui + restart: always + ports: + - '80:80' + environment: + TZ: Asia/Shanghai + depends_on: + - knowstreaming-manager +# extra_hosts: +# - "hostname:x.x.x.x" + elasticsearch-single: + image: docker.io/library/elasticsearch:7.6.2 + container_name: elasticsearch-single + restart: always + expose: + - 9200 + - 9300 +# ports: +# - '9200:9200' +# - '9300:9300' + environment: + TZ: Asia/Shanghai + # es的JVM参数 + ES_JAVA_OPTS: -Xms512m -Xmx512m + # 单节点配置,多节点集群参考 https://www.elastic.co/guide/en/elasticsearch/reference/7.6/docker.html#docker-compose-file + discovery.type: single-node + # 数据持久化路径 +# volumes: +# - /ks/es/data:/usr/share/elasticsearch/data + + # es初始化服务,与manager使用同一镜像 + # 首次启动es需初始化模版和索引,后续会自动创建 + knowstreaming-init: + image: knowstreaming/knowstreaming-manager:latest + container_name: knowstreaming-init + depends_on: + - elasticsearch-single + command: + - /bin/bash + - /es_template_create.sh + environment: + TZ: Asia/Shanghai + # es服务地址 + SERVER_ES_ADDRESS: elasticsearch-single:9200 + + knowstreaming-mysql: + image: knowstreaming/knowstreaming-mysql:latest + container_name: knowstreaming-mysql + restart: always + environment: + TZ: Asia/Shanghai + # root 用户密码 + MYSQL_ROOT_PASSWORD: admin2022_ + # 初始化时创建的数据库名称 + MYSQL_DATABASE: know_streaming + # 通配所有host,可以访问远程 + MYSQL_ROOT_HOST: '%' + expose: + - 3306 +# ports: +# - '3306:3306' + # 数据持久化路径 +# volumes: +# - /ks/mysql/data:/data/mysql +``` + +  + ### 2.1.4、手动部署 **部署流程** diff --git a/docs/install_guide/版本升级手册.md b/docs/install_guide/版本升级手册.md index 2af3f69a..a75f71fd 100644 --- a/docs/install_guide/版本升级手册.md +++ b/docs/install_guide/版本升级手册.md @@ -1,12 +1,28 @@ ## 6.2、版本升级手册 -注意:如果想升级至具体版本,需要将你当前版本至你期望使用版本的变更统统执行一遍,然后才能正常使用。 +注意: +- 如果想升级至具体版本,需要将你当前版本至你期望使用版本的变更统统执行一遍,然后才能正常使用。 +- 如果中间某个版本没有升级信息,则表示该版本直接替换安装包即可从前一个版本升级至当前版本。 + ### 6.2.0、升级至 `master` 版本 暂无 -### 6.2.1、升级至 `v3.0.0-beta.2`版本 + +### 6.2.1、升级至 `v3.0.0` 版本 + +**SQL 变更** + +```sql +ALTER TABLE `ks_km_physical_cluster` +ADD COLUMN `zk_properties` TEXT NULL COMMENT 'ZK配置' AFTER `jmx_properties`; +``` + +--- + + +### 6.2.2、升级至 `v3.0.0-beta.2`版本 **配置变更** @@ -77,7 +93,7 @@ ALTER TABLE `logi_security_oplog` --- -### 6.2.2、升级至 `v3.0.0-beta.1`版本 +### 6.2.3、升级至 `v3.0.0-beta.1`版本 **SQL 变更** @@ -96,7 +112,7 @@ ALTER COLUMN `operation_methods` set default ''; --- -### 6.2.3、`2.x`版本 升级至 `v3.0.0-beta.0`版本 +### 6.2.4、`2.x`版本 升级至 `v3.0.0-beta.0`版本 **升级步骤:** diff --git a/docs/user_guide/faq.md b/docs/user_guide/faq.md index 764c58b9..98dfbf83 100644 --- a/docs/user_guide/faq.md +++ b/docs/user_guide/faq.md @@ -166,3 +166,19 @@ Node 版本: v12.22.12 需要到具体的应用中执行 `npm run start`,例如 `cd packages/layout-clusters-fe` 后,执行 `npm run start`。 应用启动后需要到基座应用中查看(需要启动基座应用,即 layout-clusters-fe)。 + + +## 8.12、权限识别失败问题 +1、使用admin账号登陆KnowStreaming时,点击系统管理-用户管理-角色管理-新增角色,查看页面是否正常。 + + + +2、查看'/logi-security/api/v1/permission/tree'接口返回值,出现如下图所示乱码现象。 +![接口返回值](http://img-ys011.didistatic.com/static/dc2img/do1_jTxBkwNGU9vZuYQQbdNw) + +3、查看logi_security_permission表,看看是否出现了中文乱码现象。 + +根据以上几点,我们可以确定是由于数据库乱码造成的权限识别失败问题。 + ++ 原因:由于数据库编码和我们提供的脚本不一致,数据库里的数据发生了乱码,因此出现权限识别失败问题。 ++ 解决方案:清空数据库数据,将数据库字符集调整为utf8,最后重新执行[dml-logi.sql](https://github.com/didi/KnowStreaming/blob/master/km-dist/init/sql/dml-logi.sql)脚本导入数据即可。 diff --git a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/impl/GroupManagerImpl.java b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/impl/GroupManagerImpl.java index 1095d5ee..5ccc3e98 100644 --- a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/impl/GroupManagerImpl.java +++ b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/impl/GroupManagerImpl.java @@ -272,15 +272,11 @@ public class GroupManagerImpl implements GroupManager { // 获取Group指标信息 - Result> groupMetricsResult = groupMetricService.listPartitionLatestMetricsFromES( - clusterPhyId, - groupName, - topicName, - latestMetricNames == null? Arrays.asList(): latestMetricNames - ); + Result> groupMetricsResult = groupMetricService.collectGroupMetricsFromKafka(clusterPhyId, groupName, latestMetricNames == null ? Arrays.asList() : latestMetricNames); + // 转换Group指标 - List esGroupMetricsList = groupMetricsResult.hasData()? groupMetricsResult.getData(): new ArrayList<>(); + List esGroupMetricsList = groupMetricsResult.hasData() ? groupMetricsResult.getData().stream().filter(elem -> topicName.equals(elem.getTopic())).collect(Collectors.toList()) : new ArrayList<>(); Map esMetricsMap = new HashMap<>(); for (GroupMetrics groupMetrics: esGroupMetricsList) { esMetricsMap.put(groupMetrics.getPartitionId(), groupMetrics); diff --git a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/version/impl/VersionControlManagerImpl.java b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/version/impl/VersionControlManagerImpl.java index 994b2c8a..52a91520 100644 --- a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/version/impl/VersionControlManagerImpl.java +++ b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/version/impl/VersionControlManagerImpl.java @@ -7,12 +7,14 @@ import com.didiglobal.logi.log.LogFactory; import com.didiglobal.logi.security.common.dto.config.ConfigDTO; import com.didiglobal.logi.security.service.ConfigService; import com.xiaojukeji.know.streaming.km.biz.version.VersionControlManager; +import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.MetricDetailDTO; import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.UserMetricConfigDTO; import com.xiaojukeji.know.streaming.km.common.bean.entity.config.metric.UserMetricConfig; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionControlItem; import com.xiaojukeji.know.streaming.km.common.bean.vo.config.metric.UserMetricConfigVO; import com.xiaojukeji.know.streaming.km.common.bean.vo.version.VersionItemVO; +import com.xiaojukeji.know.streaming.km.common.constant.Constant; import com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum; import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; import com.xiaojukeji.know.streaming.km.common.utils.VersionUtil; @@ -47,29 +49,29 @@ public class VersionControlManagerImpl implements VersionControlManager { @PostConstruct public void init(){ defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_HEALTH_SCORE, true)); - defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_TOTAL_PRODUCE_REQUESTS, true)); defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_FAILED_FETCH_REQ, true)); defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_FAILED_PRODUCE_REQ, true)); - defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_MESSAGE_IN, true)); defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_UNDER_REPLICA_PARTITIONS, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_TOTAL_PRODUCE_REQUESTS, true)); defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_BYTES_IN, true)); defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_BYTES_OUT, true)); defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_BYTES_REJECTED, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_MESSAGE_IN, true)); defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_HEALTH_SCORE, true)); - defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_REQ_QUEUE_SIZE, true)); - defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_RES_QUEUE_SIZE, true)); defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_ACTIVE_CONTROLLER_COUNT, true)); - defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_PRODUCE_REQ, true)); - defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_LOG_SIZE, true)); - defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_CONNECTIONS, true)); - defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_MESSAGES_IN, true)); defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_BYTES_IN, true)); defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_BYTES_OUT, true)); - defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_GROUP_REBALANCES, true)); - defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_JOB_RUNNING, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_CONNECTIONS, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_MESSAGES_IN, true)); defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_PARTITIONS_NO_LEADER, true)); defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_PARTITION_URP, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_LOG_SIZE, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_PRODUCE_REQ, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_REQ_QUEUE_SIZE, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_RES_QUEUE_SIZE, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_GROUP_REBALANCES, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_JOB_RUNNING, true)); defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_OFFSET_CONSUMED, true)); defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_LAG, true)); @@ -77,18 +79,18 @@ public class VersionControlManagerImpl implements VersionControlManager { defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_HEALTH_SCORE, true)); defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_HEALTH_SCORE, true)); - defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_TOTAL_REQ_QUEUE, true)); - defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_TOTAL_RES_QUEUE, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_CONNECTION_COUNT, true)); defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_MESSAGE_IN, true)); - defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_TOTAL_PRODUCE_REQ, true)); defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_NETWORK_RPO_AVG_IDLE, true)); defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_REQ_AVG_IDLE, true)); - defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_CONNECTION_COUNT, true)); - defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_BYTES_IN, true)); - defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_BYTES_OUT, true)); - defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_PARTITIONS_SKEW, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_TOTAL_PRODUCE_REQ, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_TOTAL_REQ_QUEUE, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_TOTAL_RES_QUEUE, true)); defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_LEADERS_SKEW, true)); defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_UNDER_REPLICATE_PARTITION, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_PARTITIONS_SKEW, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_BYTES_IN, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_BYTES_OUT, true)); } @Autowired @@ -159,6 +161,9 @@ public class VersionControlManagerImpl implements VersionControlManager { UserMetricConfig umc = userMetricConfigMap.get(itemType + "@" + metric); userMetricConfigVO.setSet(null != umc && umc.isSet()); + if (umc != null) { + userMetricConfigVO.setRank(umc.getRank()); + } userMetricConfigVO.setName(itemVO.getName()); userMetricConfigVO.setType(itemVO.getType()); userMetricConfigVO.setDesc(itemVO.getDesc()); @@ -178,13 +183,29 @@ public class VersionControlManagerImpl implements VersionControlManager { @Override public Result updateUserMetricItem(Long clusterId, Integer type, UserMetricConfigDTO dto, String operator) { Map metricsSetMap = dto.getMetricsSet(); - if(null == metricsSetMap || metricsSetMap.isEmpty()){ + + //转换metricDetailDTOList + List metricDetailDTOList = dto.getMetricDetailDTOList(); + Map metricDetailMap = new HashMap<>(); + if (metricDetailDTOList != null && !metricDetailDTOList.isEmpty()) { + metricDetailMap = metricDetailDTOList.stream().collect(Collectors.toMap(MetricDetailDTO::getMetric, Function.identity())); + } + + //转换metricsSetMap + if (metricsSetMap != null && !metricsSetMap.isEmpty()) { + for (Map.Entry metricAndShowEntry : metricsSetMap.entrySet()) { + if (metricDetailMap.containsKey(metricAndShowEntry.getKey())) continue; + metricDetailMap.put(metricAndShowEntry.getKey(), new MetricDetailDTO(metricAndShowEntry.getKey(), metricAndShowEntry.getValue(), null)); + } + } + + if (metricDetailMap.isEmpty()) { return Result.buildSuc(); } Set userMetricConfigs = getUserMetricConfig(operator); - for(Map.Entry metricAndShowEntry : metricsSetMap.entrySet()){ - UserMetricConfig userMetricConfig = new UserMetricConfig(type, metricAndShowEntry.getKey(), metricAndShowEntry.getValue()); + for (MetricDetailDTO metricDetailDTO : metricDetailMap.values()) { + UserMetricConfig userMetricConfig = new UserMetricConfig(type, metricDetailDTO.getMetric(), metricDetailDTO.getSet(), metricDetailDTO.getRank()); userMetricConfigs.remove(userMetricConfig); userMetricConfigs.add(userMetricConfig); } @@ -228,7 +249,7 @@ public class VersionControlManagerImpl implements VersionControlManager { return defaultMetrics; } - return JSON.parseObject(value, new TypeReference>(){}); + return JSON.parseObject(value, new TypeReference>() {}); } public static void main(String[] args){ diff --git a/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/metric/MetricESSender.java b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/metric/MetricESSender.java deleted file mode 100644 index a94a377d..00000000 --- a/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/metric/MetricESSender.java +++ /dev/null @@ -1,121 +0,0 @@ -package com.xiaojukeji.know.streaming.km.collector.metric; - -import com.didiglobal.logi.log.ILog; -import com.didiglobal.logi.log.LogFactory; -import com.xiaojukeji.know.streaming.km.common.bean.event.metric.*; -import com.xiaojukeji.know.streaming.km.common.bean.po.BaseESPO; -import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.*; -import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; -import com.xiaojukeji.know.streaming.km.common.utils.EnvUtil; -import com.xiaojukeji.know.streaming.km.common.utils.NamedThreadFactory; -import com.xiaojukeji.know.streaming.km.persistence.es.dao.BaseMetricESDAO; -import org.apache.commons.collections.CollectionUtils; -import org.springframework.context.ApplicationListener; -import org.springframework.stereotype.Component; - -import javax.annotation.PostConstruct; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.LinkedBlockingDeque; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.*; - -@Component -public class MetricESSender implements ApplicationListener { - protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER"); - - private static final int THRESHOLD = 100; - - private ThreadPoolExecutor esExecutor = new ThreadPoolExecutor(10, 20, 6000, TimeUnit.MILLISECONDS, - new LinkedBlockingDeque<>(1000), - new NamedThreadFactory("KM-Collect-MetricESSender-ES"), - (r, e) -> LOGGER.warn("class=MetricESSender||msg=KM-Collect-MetricESSender-ES Deque is blocked, taskCount:{}" + e.getTaskCount())); - - @PostConstruct - public void init(){ - LOGGER.info("class=MetricESSender||method=init||msg=init finished"); - } - - @Override - public void onApplicationEvent(BaseMetricEvent event) { - if(event instanceof BrokerMetricEvent) { - BrokerMetricEvent brokerMetricEvent = (BrokerMetricEvent)event; - send2es(BROKER_INDEX, - ConvertUtil.list2List(brokerMetricEvent.getBrokerMetrics(), BrokerMetricPO.class) - ); - - } else if(event instanceof ClusterMetricEvent) { - ClusterMetricEvent clusterMetricEvent = (ClusterMetricEvent)event; - send2es(CLUSTER_INDEX, - ConvertUtil.list2List(clusterMetricEvent.getClusterMetrics(), ClusterMetricPO.class) - ); - - } else if(event instanceof TopicMetricEvent) { - TopicMetricEvent topicMetricEvent = (TopicMetricEvent)event; - send2es(TOPIC_INDEX, - ConvertUtil.list2List(topicMetricEvent.getTopicMetrics(), TopicMetricPO.class) - ); - - } else if(event instanceof PartitionMetricEvent) { - PartitionMetricEvent partitionMetricEvent = (PartitionMetricEvent)event; - send2es(PARTITION_INDEX, - ConvertUtil.list2List(partitionMetricEvent.getPartitionMetrics(), PartitionMetricPO.class) - ); - - } else if(event instanceof GroupMetricEvent) { - GroupMetricEvent groupMetricEvent = (GroupMetricEvent)event; - send2es(GROUP_INDEX, - ConvertUtil.list2List(groupMetricEvent.getGroupMetrics(), GroupMetricPO.class) - ); - - } else if(event instanceof ReplicaMetricEvent) { - ReplicaMetricEvent replicaMetricEvent = (ReplicaMetricEvent)event; - send2es(REPLICATION_INDEX, - ConvertUtil.list2List(replicaMetricEvent.getReplicationMetrics(), ReplicationMetricPO.class) - ); - } - } - - /** - * 根据不同监控维度来发送 - */ - private boolean send2es(String index, List statsList){ - if (CollectionUtils.isEmpty(statsList)) { - return true; - } - - if (!EnvUtil.isOnline()) { - LOGGER.info("class=MetricESSender||method=send2es||ariusStats={}||size={}", - index, statsList.size()); - } - - BaseMetricESDAO baseMetricESDao = BaseMetricESDAO.getByStatsType(index); - if (Objects.isNull( baseMetricESDao )) { - LOGGER.error("class=MetricESSender||method=send2es||errMsg=fail to find {}", index); - return false; - } - - int size = statsList.size(); - int num = (size) % THRESHOLD == 0 ? (size / THRESHOLD) : (size / THRESHOLD + 1); - - if (size < THRESHOLD) { - esExecutor.execute( - () -> baseMetricESDao.batchInsertStats(statsList) - ); - return true; - } - - for (int i = 1; i < num + 1; i++) { - int end = (i * THRESHOLD) > size ? size : (i * THRESHOLD); - int start = (i - 1) * THRESHOLD; - - esExecutor.execute( - () -> baseMetricESDao.batchInsertStats(statsList.subList(start, end)) - ); - } - - return true; - } -} diff --git a/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/AbstractMetricESSender.java b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/AbstractMetricESSender.java new file mode 100644 index 00000000..d3192f1f --- /dev/null +++ b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/AbstractMetricESSender.java @@ -0,0 +1,72 @@ +package com.xiaojukeji.know.streaming.km.collector.sink; + +import com.didiglobal.logi.log.ILog; +import com.didiglobal.logi.log.LogFactory; +import com.xiaojukeji.know.streaming.km.common.bean.po.BaseESPO; +import com.xiaojukeji.know.streaming.km.common.utils.EnvUtil; +import com.xiaojukeji.know.streaming.km.common.utils.NamedThreadFactory; +import com.xiaojukeji.know.streaming.km.persistence.es.dao.BaseMetricESDAO; +import org.apache.commons.collections.CollectionUtils; + +import java.util.List; +import java.util.Objects; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +public abstract class AbstractMetricESSender { + protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER"); + + private static final int THRESHOLD = 100; + + private static final ThreadPoolExecutor esExecutor = new ThreadPoolExecutor( + 10, + 20, + 6000, + TimeUnit.MILLISECONDS, + new LinkedBlockingDeque<>(1000), + new NamedThreadFactory("KM-Collect-MetricESSender-ES"), + (r, e) -> LOGGER.warn("class=MetricESSender||msg=KM-Collect-MetricESSender-ES Deque is blocked, taskCount:{}" + e.getTaskCount()) + ); + + /** + * 根据不同监控维度来发送 + */ + protected boolean send2es(String index, List statsList){ + if (CollectionUtils.isEmpty(statsList)) { + return true; + } + + if (!EnvUtil.isOnline()) { + LOGGER.info("class=MetricESSender||method=send2es||ariusStats={}||size={}", + index, statsList.size()); + } + + BaseMetricESDAO baseMetricESDao = BaseMetricESDAO.getByStatsType(index); + if (Objects.isNull( baseMetricESDao )) { + LOGGER.error("class=MetricESSender||method=send2es||errMsg=fail to find {}", index); + return false; + } + + int size = statsList.size(); + int num = (size) % THRESHOLD == 0 ? (size / THRESHOLD) : (size / THRESHOLD + 1); + + if (size < THRESHOLD) { + esExecutor.execute( + () -> baseMetricESDao.batchInsertStats(statsList) + ); + return true; + } + + for (int i = 1; i < num + 1; i++) { + int end = (i * THRESHOLD) > size ? size : (i * THRESHOLD); + int start = (i - 1) * THRESHOLD; + + esExecutor.execute( + () -> baseMetricESDao.batchInsertStats(statsList.subList(start, end)) + ); + } + + return true; + } +} diff --git a/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/BrokerMetricESSender.java b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/BrokerMetricESSender.java new file mode 100644 index 00000000..6708ba38 --- /dev/null +++ b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/BrokerMetricESSender.java @@ -0,0 +1,28 @@ +package com.xiaojukeji.know.streaming.km.collector.sink; + +import com.didiglobal.logi.log.ILog; +import com.didiglobal.logi.log.LogFactory; +import com.xiaojukeji.know.streaming.km.common.bean.event.metric.BrokerMetricEvent; +import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.BrokerMetricPO; +import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; +import org.springframework.context.ApplicationListener; +import org.springframework.stereotype.Component; + +import javax.annotation.PostConstruct; + +import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.BROKER_INDEX; + +@Component +public class BrokerMetricESSender extends AbstractMetricESSender implements ApplicationListener { + protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER"); + + @PostConstruct + public void init(){ + LOGGER.info("class=BrokerMetricESSender||method=init||msg=init finished"); + } + + @Override + public void onApplicationEvent(BrokerMetricEvent event) { + send2es(BROKER_INDEX, ConvertUtil.list2List(event.getBrokerMetrics(), BrokerMetricPO.class)); + } +} diff --git a/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/ClusterMetricESSender.java b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/ClusterMetricESSender.java new file mode 100644 index 00000000..94091748 --- /dev/null +++ b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/ClusterMetricESSender.java @@ -0,0 +1,29 @@ +package com.xiaojukeji.know.streaming.km.collector.sink; + +import com.didiglobal.logi.log.ILog; +import com.didiglobal.logi.log.LogFactory; +import com.xiaojukeji.know.streaming.km.common.bean.event.metric.ClusterMetricEvent; +import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.ClusterMetricPO; +import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; +import org.springframework.context.ApplicationListener; +import org.springframework.stereotype.Component; + +import javax.annotation.PostConstruct; + + +import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.CLUSTER_INDEX; + +@Component +public class ClusterMetricESSender extends AbstractMetricESSender implements ApplicationListener { + protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER"); + + @PostConstruct + public void init(){ + LOGGER.info("class=ClusterMetricESSender||method=init||msg=init finished"); + } + + @Override + public void onApplicationEvent(ClusterMetricEvent event) { + send2es(CLUSTER_INDEX, ConvertUtil.list2List(event.getClusterMetrics(), ClusterMetricPO.class)); + } +} diff --git a/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/GroupMetricESSender.java b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/GroupMetricESSender.java new file mode 100644 index 00000000..cd7a2242 --- /dev/null +++ b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/GroupMetricESSender.java @@ -0,0 +1,29 @@ +package com.xiaojukeji.know.streaming.km.collector.sink; + +import com.didiglobal.logi.log.ILog; +import com.didiglobal.logi.log.LogFactory; +import com.xiaojukeji.know.streaming.km.common.bean.event.metric.GroupMetricEvent; +import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.GroupMetricPO; +import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; +import org.springframework.context.ApplicationListener; +import org.springframework.stereotype.Component; + +import javax.annotation.PostConstruct; + + +import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.GROUP_INDEX; + +@Component +public class GroupMetricESSender extends AbstractMetricESSender implements ApplicationListener { + protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER"); + + @PostConstruct + public void init(){ + LOGGER.info("class=GroupMetricESSender||method=init||msg=init finished"); + } + + @Override + public void onApplicationEvent(GroupMetricEvent event) { + send2es(GROUP_INDEX, ConvertUtil.list2List(event.getGroupMetrics(), GroupMetricPO.class)); + } +} diff --git a/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/PartitionMetricESSender.java b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/PartitionMetricESSender.java new file mode 100644 index 00000000..ce108835 --- /dev/null +++ b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/PartitionMetricESSender.java @@ -0,0 +1,28 @@ +package com.xiaojukeji.know.streaming.km.collector.sink; + +import com.didiglobal.logi.log.ILog; +import com.didiglobal.logi.log.LogFactory; +import com.xiaojukeji.know.streaming.km.common.bean.event.metric.PartitionMetricEvent; +import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.PartitionMetricPO; +import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; +import org.springframework.context.ApplicationListener; +import org.springframework.stereotype.Component; + +import javax.annotation.PostConstruct; + +import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.PARTITION_INDEX; + +@Component +public class PartitionMetricESSender extends AbstractMetricESSender implements ApplicationListener { + protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER"); + + @PostConstruct + public void init(){ + LOGGER.info("class=PartitionMetricESSender||method=init||msg=init finished"); + } + + @Override + public void onApplicationEvent(PartitionMetricEvent event) { + send2es(PARTITION_INDEX, ConvertUtil.list2List(event.getPartitionMetrics(), PartitionMetricPO.class)); + } +} diff --git a/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/ReplicaMetricESSender.java b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/ReplicaMetricESSender.java new file mode 100644 index 00000000..76b2aa2a --- /dev/null +++ b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/ReplicaMetricESSender.java @@ -0,0 +1,28 @@ +package com.xiaojukeji.know.streaming.km.collector.sink; + +import com.didiglobal.logi.log.ILog; +import com.didiglobal.logi.log.LogFactory; +import com.xiaojukeji.know.streaming.km.common.bean.event.metric.ReplicaMetricEvent; +import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.ReplicationMetricPO; +import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; +import org.springframework.context.ApplicationListener; +import org.springframework.stereotype.Component; + +import javax.annotation.PostConstruct; + +import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.REPLICATION_INDEX; + +@Component +public class ReplicaMetricESSender extends AbstractMetricESSender implements ApplicationListener { + protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER"); + + @PostConstruct + public void init(){ + LOGGER.info("class=GroupMetricESSender||method=init||msg=init finished"); + } + + @Override + public void onApplicationEvent(ReplicaMetricEvent event) { + send2es(REPLICATION_INDEX, ConvertUtil.list2List(event.getReplicationMetrics(), ReplicationMetricPO.class)); + } +} diff --git a/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/TopicMetricESSender.java b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/TopicMetricESSender.java new file mode 100644 index 00000000..eebd82aa --- /dev/null +++ b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/sink/TopicMetricESSender.java @@ -0,0 +1,29 @@ +package com.xiaojukeji.know.streaming.km.collector.sink; + +import com.didiglobal.logi.log.ILog; +import com.didiglobal.logi.log.LogFactory; +import com.xiaojukeji.know.streaming.km.common.bean.event.metric.*; +import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.*; +import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; +import org.springframework.context.ApplicationListener; +import org.springframework.stereotype.Component; + +import javax.annotation.PostConstruct; + + +import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.TOPIC_INDEX; + +@Component +public class TopicMetricESSender extends AbstractMetricESSender implements ApplicationListener { + protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER"); + + @PostConstruct + public void init(){ + LOGGER.info("class=TopicMetricESSender||method=init||msg=init finished"); + } + + @Override + public void onApplicationEvent(TopicMetricEvent event) { + send2es(TOPIC_INDEX, ConvertUtil.list2List(event.getTopicMetrics(), TopicMetricPO.class)); + } +} diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/cluster/ClusterPhyBaseDTO.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/cluster/ClusterPhyBaseDTO.java index 863e0b29..a9a3587f 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/cluster/ClusterPhyBaseDTO.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/cluster/ClusterPhyBaseDTO.java @@ -3,6 +3,7 @@ package com.xiaojukeji.know.streaming.km.common.bean.dto.cluster; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.xiaojukeji.know.streaming.km.common.bean.dto.BaseDTO; import com.xiaojukeji.know.streaming.km.common.bean.entity.config.JmxConfig; +import com.xiaojukeji.know.streaming.km.common.bean.entity.config.ZKConfig; import io.swagger.annotations.ApiModel; import io.swagger.annotations.ApiModelProperty; import lombok.Data; @@ -34,4 +35,8 @@ public class ClusterPhyBaseDTO extends BaseDTO { @NotNull(message = "jmxProperties不允许为空") @ApiModelProperty(value="Jmx配置") protected JmxConfig jmxProperties; + + // TODO 前端页面增加时,需要加一个不为空的限制 + @ApiModelProperty(value="ZK配置") + protected ZKConfig zkProperties; } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/metrices/MetricDetailDTO.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/metrices/MetricDetailDTO.java new file mode 100644 index 00000000..04f18a03 --- /dev/null +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/metrices/MetricDetailDTO.java @@ -0,0 +1,32 @@ +package com.xiaojukeji.know.streaming.km.common.bean.dto.metrices; + +import com.xiaojukeji.know.streaming.km.common.bean.dto.BaseDTO; +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +import javax.validation.constraints.NotNull; + + +/** + * @author didi + */ +@Data +@NoArgsConstructor +@AllArgsConstructor +@ApiModel(description = "指标详细属性信息") +public class MetricDetailDTO extends BaseDTO { + + @ApiModelProperty("指标名称") + private String metric; + + @ApiModelProperty("指标是否显示") + private Boolean set; + + @NotNull(message = "MetricDetailDTO的rank字段应不为空") + @ApiModelProperty("指标优先级") + private Integer rank; + +} diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/metrices/UserMetricConfigDTO.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/metrices/UserMetricConfigDTO.java index 02bb1d2a..b743ec73 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/metrices/UserMetricConfigDTO.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/metrices/UserMetricConfigDTO.java @@ -7,6 +7,8 @@ import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; +import javax.validation.Valid; +import java.util.List; import java.util.Map; @@ -17,4 +19,8 @@ import java.util.Map; public class UserMetricConfigDTO extends BaseDTO { @ApiModelProperty("指标展示设置项,key:指标名;value:是否展现(true展现/false不展现)") private Map metricsSet; + + @Valid + @ApiModelProperty("指标自定义属性列表") + private List metricDetailDTOList; } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/broker/Broker.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/broker/Broker.java index fa67cac5..752aade0 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/broker/Broker.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/broker/Broker.java @@ -5,7 +5,6 @@ import com.alibaba.fastjson.TypeReference; import com.xiaojukeji.know.streaming.km.common.bean.entity.common.IpPortData; import com.xiaojukeji.know.streaming.km.common.bean.po.broker.BrokerPO; import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; -import com.xiaojukeji.know.streaming.km.common.zookeeper.znode.brokers.BrokerMetadata; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; @@ -79,20 +78,6 @@ public class Broker implements Serializable { return metadata; } - public static Broker buildFrom(Long clusterPhyId, Integer brokerId, BrokerMetadata brokerMetadata) { - Broker metadata = new Broker(); - metadata.setClusterPhyId(clusterPhyId); - metadata.setBrokerId(brokerId); - metadata.setHost(brokerMetadata.getHost()); - metadata.setPort(brokerMetadata.getPort()); - metadata.setJmxPort(brokerMetadata.getJmxPort()); - metadata.setStartTimestamp(brokerMetadata.getTimestamp()); - metadata.setRack(brokerMetadata.getRack()); - metadata.setStatus(1); - metadata.setEndpointMap(brokerMetadata.getEndpointMap()); - return metadata; - } - public static Broker buildFrom(BrokerPO brokerPO) { Broker broker = ConvertUtil.obj2Obj(brokerPO, Broker.class); String endpointMapStr = brokerPO.getEndpointMap(); diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/cluster/ClusterPhy.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/cluster/ClusterPhy.java index 5a0ee86a..823ec67d 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/cluster/ClusterPhy.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/cluster/ClusterPhy.java @@ -53,9 +53,16 @@ public class ClusterPhy implements Comparable, EntifyIdInterface { /** * jmx配置 + * @see com.xiaojukeji.know.streaming.km.common.bean.entity.config.JmxConfig */ private String jmxProperties; + /** + * zk配置 + * @see com.xiaojukeji.know.streaming.km.common.bean.entity.config.ZKConfig + */ + private String zkProperties; + /** * 开启ACL * @see com.xiaojukeji.know.streaming.km.common.enums.cluster.ClusterAuthTypeEnum diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/config/ZKConfig.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/config/ZKConfig.java new file mode 100644 index 00000000..39e6fdf5 --- /dev/null +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/config/ZKConfig.java @@ -0,0 +1,31 @@ +package com.xiaojukeji.know.streaming.km.common.bean.entity.config; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; +import lombok.Data; + +import java.io.Serializable; +import java.util.Properties; + +/** + * @author zengqiao + * @date 22/02/24 + */ +@Data +@ApiModel(description = "ZK配置") +public class ZKConfig implements Serializable { + @ApiModelProperty(value="ZK的jmx配置") + private JmxConfig jmxConfig; + + @ApiModelProperty(value="ZK是否开启secure", example = "false") + private Boolean openSecure = false; + + @ApiModelProperty(value="ZK的Session超时时间", example = "15000") + private Long sessionTimeoutUnitMs = 15000L; + + @ApiModelProperty(value="ZK的Request超时时间", example = "5000") + private Long requestTimeoutUnitMs = 5000L; + + @ApiModelProperty(value="ZK的Request超时时间") + private Properties otherProps = new Properties(); +} diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/config/metric/UserMetricConfig.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/config/metric/UserMetricConfig.java index 6895fb40..e244181a 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/config/metric/UserMetricConfig.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/config/metric/UserMetricConfig.java @@ -1,12 +1,12 @@ package com.xiaojukeji.know.streaming.km.common.bean.entity.config.metric; +import com.xiaojukeji.know.streaming.km.common.constant.Constant; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; @Data @NoArgsConstructor -@AllArgsConstructor public class UserMetricConfig { private int type; @@ -15,6 +15,22 @@ public class UserMetricConfig { private boolean set; + private Integer rank; + + public UserMetricConfig(int type, String metric, boolean set, Integer rank) { + this.type = type; + this.metric = metric; + this.set = set; + this.rank = rank; + } + + public UserMetricConfig(int type, String metric, boolean set) { + this.type = type; + this.metric = metric; + this.set = set; + this.rank = null; + } + @Override public int hashCode(){ return metric.hashCode() << 1 + type; diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/cluster/ClusterPhyPO.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/cluster/ClusterPhyPO.java index a7632057..0a9bba41 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/cluster/ClusterPhyPO.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/cluster/ClusterPhyPO.java @@ -41,6 +41,11 @@ public class ClusterPhyPO extends BasePO { */ private String jmxProperties; + /** + * zk配置 + */ + private String zkProperties; + /** * 认证类型 * @see com.xiaojukeji.know.streaming.km.common.enums.cluster.ClusterAuthTypeEnum diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/vo/cluster/ClusterPhyBaseVO.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/vo/cluster/ClusterPhyBaseVO.java index 3541dc38..e0707e20 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/vo/cluster/ClusterPhyBaseVO.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/vo/cluster/ClusterPhyBaseVO.java @@ -31,9 +31,15 @@ public class ClusterPhyBaseVO extends BaseTimeVO { @ApiModelProperty(value="Jmx配置", example = "{}") protected String jmxProperties; + @ApiModelProperty(value="ZK配置", example = "{}") + protected String zkProperties; + @ApiModelProperty(value="描述", example = "测试") protected String description; @ApiModelProperty(value="集群的kafka版本", example = "2.5.1") protected String kafkaVersion; + + @ApiModelProperty(value="集群的运行模式", example = "2:raft模式,其他是ZK模式") + private Integer runState; } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/vo/config/metric/UserMetricConfigVO.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/vo/config/metric/UserMetricConfigVO.java index e50fc6e7..2b4e76b3 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/vo/config/metric/UserMetricConfigVO.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/vo/config/metric/UserMetricConfigVO.java @@ -14,4 +14,7 @@ import lombok.NoArgsConstructor; public class UserMetricConfigVO extends VersionItemVO { @ApiModelProperty(value = "该指标用户是否设置展现", example = "true") private Boolean set; + + @ApiModelProperty(value = "该指标展示优先级", example = "1") + private Integer rank; } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/Constant.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/Constant.java index 36575938..edd897ff 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/Constant.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/Constant.java @@ -42,6 +42,7 @@ public class Constant { */ public static final Integer DEFAULT_CLUSTER_HEALTH_SCORE = 90; + public static final String DEFAULT_USER_NAME = "know-streaming-app"; public static final int INVALID_CODE = -1; diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/MsgConstant.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/MsgConstant.java index 3d0b6a5c..1be8dadf 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/MsgConstant.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/MsgConstant.java @@ -52,6 +52,10 @@ public class MsgConstant { /**************************************************** Partition ****************************************************/ + public static String getPartitionNoLeader(Long clusterPhyId, String topicName) { + return String.format("集群ID:[%d] Topic名称:[%s] 所有分区NoLeader", clusterPhyId, topicName); + } + public static String getPartitionNotExist(Long clusterPhyId, String topicName) { return String.format("集群ID:[%d] Topic名称:[%s] 存在非法的分区ID", clusterPhyId, topicName); } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/converter/ClusterConverter.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/converter/ClusterConverter.java index b0089729..bf848c8e 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/converter/ClusterConverter.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/converter/ClusterConverter.java @@ -19,6 +19,11 @@ public class ClusterConverter { ClusterPhyPO clusterPhyPO = ConvertUtil.obj2Obj(dto, ClusterPhyPO.class); clusterPhyPO.setClientProperties(ConvertUtil.obj2Json(dto.getClientProperties())); clusterPhyPO.setJmxProperties(ConvertUtil.obj2Json(dto.getJmxProperties())); + if (ValidateUtils.isNull(dto.getZkProperties())) { + clusterPhyPO.setZkProperties(""); + } else { + clusterPhyPO.setZkProperties(ConvertUtil.obj2Json(dto.getZkProperties())); + } clusterPhyPO.setRunState( ValidateUtils.isBlank(dto.getZookeeper())? ClusterRunStateEnum.RUN_RAFT.getRunState() : @@ -32,6 +37,11 @@ public class ClusterConverter { ClusterPhyPO clusterPhyPO = ConvertUtil.obj2Obj(dto, ClusterPhyPO.class); clusterPhyPO.setClientProperties(ConvertUtil.obj2Json(dto.getClientProperties())); clusterPhyPO.setJmxProperties(ConvertUtil.obj2Json(dto.getJmxProperties())); + if (ValidateUtils.isNull(dto.getZkProperties())) { + clusterPhyPO.setZkProperties(""); + } else { + clusterPhyPO.setZkProperties(ConvertUtil.obj2Json(dto.getZkProperties())); + } clusterPhyPO.setRunState( ValidateUtils.isBlank(dto.getZookeeper())? ClusterRunStateEnum.RUN_RAFT.getRunState() : diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/health/HealthStateEnum.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/health/HealthStateEnum.java new file mode 100644 index 00000000..a9490fb6 --- /dev/null +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/health/HealthStateEnum.java @@ -0,0 +1,31 @@ +package com.xiaojukeji.know.streaming.km.common.enums.health; + +import lombok.Getter; + + +/** + * 健康状态 + */ +@Getter +public enum HealthStateEnum { + UNKNOWN(-1, "未知"), + + GOOD(0, "好"), + + MEDIUM(1, "中"), + + POOR(2, "差"), + + DEAD(3, "宕机"), + + ; + + private final int dimension; + + private final String message; + + HealthStateEnum(int dimension, String message) { + this.dimension = dimension; + this.message = message; + } +} diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/jmx/JmxConnectorWrap.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/jmx/JmxConnectorWrap.java index 0fb65589..ca7c01c4 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/jmx/JmxConnectorWrap.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/jmx/JmxConnectorWrap.java @@ -90,6 +90,8 @@ public class JmxConnectorWrap { } try { jmxConnector.close(); + + jmxConnector = null; } catch (IOException e) { LOGGER.warn("close JmxConnector exception, physicalClusterId:{} brokerId:{} host:{} port:{}.", physicalClusterId, brokerId, host, port, e); } @@ -105,6 +107,11 @@ public class JmxConnectorWrap { acquire(); MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection(); return mBeanServerConnection.getAttribute(name, attribute); + } catch (IOException ioe) { + // 如果是因为连接断开,则进行重新连接,并抛出异常 + reInitDueIOException(); + + throw ioe; } finally { atomicInteger.incrementAndGet(); } @@ -120,6 +127,11 @@ public class JmxConnectorWrap { acquire(); MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection(); return mBeanServerConnection.getAttributes(name, attributes); + } catch (IOException ioe) { + // 如果是因为连接断开,则进行重新连接,并抛出异常 + reInitDueIOException(); + + throw ioe; } finally { atomicInteger.incrementAndGet(); } @@ -131,6 +143,11 @@ public class JmxConnectorWrap { acquire(); MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection(); return mBeanServerConnection.queryNames(name, query); + } catch (IOException ioe) { + // 如果是因为连接断开,则进行重新连接,并抛出异常 + reInitDueIOException(); + + throw ioe; } finally { atomicInteger.incrementAndGet(); } @@ -186,4 +203,26 @@ public class JmxConnectorWrap { } } } + + private synchronized void reInitDueIOException() { + try { + if (jmxConnector == null) { + return; + } + + // 检查是否正常 + jmxConnector.getConnectionId(); + + // 如果正常则直接返回 + return; + } catch (Exception e) { + // ignore + } + + // 关闭旧的 + this.close(); + + // 重新创建 + this.checkJmxConnectionAndInitIfNeed(); + } } diff --git a/km-console/package-lock.json b/km-console/package-lock.json index e120e9eb..cb07da96 100644 --- a/km-console/package-lock.json +++ b/km-console/package-lock.json @@ -5100,9 +5100,9 @@ } }, "is-callable": { - "version": "1.2.5", - "resolved": "https://registry.npmmirror.com/is-callable/-/is-callable-1.2.5.tgz", - "integrity": "sha512-ZIWRujF6MvYGkEuHMYtFRkL2wAtFw89EHfKlXrkPkjQZZRWeh9L1q3SV13NIfHnqxugjLvAOkEHx9mb1zcMnEw==", + "version": "1.2.6", + "resolved": "https://registry.npmmirror.com/is-callable/-/is-callable-1.2.6.tgz", + "integrity": "sha512-krO72EO2NptOGAX2KYyqbP9vYMlNAXdB53rq6f8LXY6RY7JdSR/3BD6wLUlPHSAesmY9vstNrjvqGaCiRK/91Q==", "dev": true }, "is-ci": { diff --git a/km-console/packages/config-manager-fe/config/d1-webpack.base.js b/km-console/packages/config-manager-fe/config/d1-webpack.base.js deleted file mode 100644 index 95f0bc70..00000000 --- a/km-console/packages/config-manager-fe/config/d1-webpack.base.js +++ /dev/null @@ -1,205 +0,0 @@ -/* eslint-disable */ -const MiniCssExtractPlugin = require('mini-css-extract-plugin'); -const ProgressBarPlugin = require('progress-bar-webpack-plugin'); -const CaseSensitivePathsPlugin = require('case-sensitive-paths-webpack-plugin'); -const StatsPlugin = require('stats-webpack-plugin'); -const { CleanWebpackPlugin } = require('clean-webpack-plugin'); -const TerserJSPlugin = require('terser-webpack-plugin'); -const OptimizeCSSAssetsPlugin = require('optimize-css-assets-webpack-plugin'); -const HappyPack = require('happypack'); -const os = require('os'); -const happyThreadPool = HappyPack.ThreadPool({ size: os.cpus().length }); -const ReactRefreshWebpackPlugin = require('@pmmmwh/react-refresh-webpack-plugin'); -const theme = require('./theme'); -var cwd = process.cwd(); - -const path = require('path'); -const isProd = process.env.NODE_ENV === 'production'; -const babelOptions = { - cacheDirectory: true, - babelrc: false, - presets: [require.resolve('@babel/preset-env'), require.resolve('@babel/preset-typescript'), require.resolve('@babel/preset-react')], - plugins: [ - [require.resolve('@babel/plugin-proposal-decorators'), { legacy: true }], - [require.resolve('@babel/plugin-proposal-class-properties'), { loose: true }], - [require.resolve('@babel/plugin-proposal-private-methods'), { loose: true }], - require.resolve('@babel/plugin-proposal-export-default-from'), - require.resolve('@babel/plugin-proposal-export-namespace-from'), - require.resolve('@babel/plugin-proposal-object-rest-spread'), - require.resolve('@babel/plugin-transform-runtime'), - require.resolve('@babel/plugin-proposal-optional-chaining'), // - require.resolve('@babel/plugin-proposal-nullish-coalescing-operator'), // 解决 ?? 无法转义问题 - require.resolve('@babel/plugin-proposal-numeric-separator'), // 转义 1_000_000 - !isProd && require.resolve('react-refresh/babel'), - ] - .filter(Boolean) - .concat([ - [ - 'babel-plugin-import', - { - libraryName: 'antd', - style: true, - }, - ], - '@babel/plugin-transform-object-assign', - ]), -}; -module.exports = () => { - const manifestName = `manifest.json`; - const cssFileName = isProd ? '[name]-[chunkhash].css' : '[name].css'; - - const plugins = [ - new ProgressBarPlugin(), - new CaseSensitivePathsPlugin(), - new MiniCssExtractPlugin({ - filename: cssFileName, - }), - new StatsPlugin(manifestName, { - chunkModules: false, - source: true, - chunks: false, - modules: false, - assets: true, - children: false, - exclude: [/node_modules/], - }), - new HappyPack({ - id: 'babel', - loaders: [ - 'cache-loader', - { - loader: 'babel-loader', - options: babelOptions, - }, - ], - threadPool: happyThreadPool, - }), - !isProd && - new ReactRefreshWebpackPlugin({ - overlay: false, - }), - // new BundleAnalyzerPlugin({ - // analyzerPort: 8889 - // }), - ].filter(Boolean); - if (isProd) { - plugins.push(new CleanWebpackPlugin()); - } - return { - externals: isProd - ? [ - /^react$/, - /^react\/lib.*/, - /^react-dom$/, - /.*react-dom.*/, - /^single-spa$/, - /^single-spa-react$/, - /^moment$/, - /^antd$/, - /^lodash$/, - /^react-router$/, - /^react-router-dom$/, - ] - : [], - resolve: { - symlinks: false, - extensions: ['.web.jsx', '.web.js', '.ts', '.tsx', '.js', '.jsx', '.json'], - alias: { - // '@pkgs': path.resolve(cwd, 'src/packages'), - '@pkgs': path.resolve(cwd, './node_modules/@didi/d1-packages'), - '@cpts': path.resolve(cwd, 'src/components'), - '@interface': path.resolve(cwd, 'src/interface'), - '@apis': path.resolve(cwd, 'src/api'), - react: path.resolve('./node_modules/react'), - actions: path.resolve(cwd, 'src/actions'), - lib: path.resolve(cwd, 'src/lib'), - constants: path.resolve(cwd, 'src/constants'), - components: path.resolve(cwd, 'src/components'), - container: path.resolve(cwd, 'src/container'), - api: path.resolve(cwd, 'src/api'), - assets: path.resolve(cwd, 'src/assets'), - mobxStore: path.resolve(cwd, 'src/mobxStore'), - }, - }, - plugins, - module: { - rules: [ - { - parser: { system: false }, - }, - { - test: /\.(js|jsx|ts|tsx)$/, - exclude: /node_modules\/(?!react-intl|@didi\/dcloud-design)/, - use: [ - { - loader: 'happypack/loader?id=babel', - }, - ], - }, - { - test: /\.(png|svg|jpeg|jpg|gif|ttf|woff|woff2|eot|pdf)$/, - use: [ - { - loader: 'file-loader', - options: { - name: '[name].[ext]', - outputPath: './assets/image/', - esModule: false, - }, - }, - ], - }, - { - test: /\.(css|less)$/, - use: [ - { - loader: MiniCssExtractPlugin.loader, - }, - 'css-loader', - { - loader: 'less-loader', - options: { - javascriptEnabled: true, - modifyVars: theme, - }, - }, - ], - }, - ], - }, - optimization: Object.assign( - { - splitChunks: { - cacheGroups: { - vendor: { - test: /[\\/]node_modules[\\/]/, - chunks: 'all', - name: 'vendor', - priority: 10, - enforce: true, - minChunks: 1, - maxSize: 3500000, - }, - }, - }, - }, - isProd - ? { - minimizer: [ - new TerserJSPlugin({ - cache: true, - sourceMap: true, - }), - new OptimizeCSSAssetsPlugin({}), - ], - } - : {} - ), - devtool: isProd ? 'cheap-module-source-map' : 'source-map', - node: { - fs: 'empty', - net: 'empty', - tls: 'empty', - }, - }; -}; diff --git a/km-console/packages/config-manager-fe/config/webpack.common.js b/km-console/packages/config-manager-fe/config/webpack.common.js new file mode 100644 index 00000000..cef88e61 --- /dev/null +++ b/km-console/packages/config-manager-fe/config/webpack.common.js @@ -0,0 +1,132 @@ +const path = require('path'); +const webpack = require('webpack'); +const HtmlWebpackPlugin = require('html-webpack-plugin'); +const MiniCssExtractPlugin = require('mini-css-extract-plugin'); +const ProgressBarPlugin = require('progress-bar-webpack-plugin'); +const CaseSensitivePathsPlugin = require('case-sensitive-paths-webpack-plugin'); +const StatsPlugin = require('stats-webpack-plugin'); +const HappyPack = require('happypack'); +const os = require('os'); +const happyThreadPool = HappyPack.ThreadPool({ size: os.cpus().length }); +const theme = require('./theme'); +const pkgJson = require('../package'); + +const devMode = process.env.NODE_ENV === 'development'; +const babelOptions = { + cacheDirectory: true, + babelrc: false, + presets: [require.resolve('@babel/preset-env'), require.resolve('@babel/preset-typescript'), require.resolve('@babel/preset-react')], + plugins: [ + [require.resolve('@babel/plugin-proposal-decorators'), { legacy: true }], + [require.resolve('@babel/plugin-proposal-class-properties'), { loose: true }], + [require.resolve('@babel/plugin-proposal-private-methods'), { loose: true }], + [require.resolve('@babel/plugin-proposal-private-property-in-object'), { loose: true }], + require.resolve('@babel/plugin-proposal-export-default-from'), + require.resolve('@babel/plugin-proposal-export-namespace-from'), + require.resolve('@babel/plugin-proposal-object-rest-spread'), + require.resolve('@babel/plugin-transform-runtime'), + require.resolve('@babel/plugin-proposal-optional-chaining'), // + require.resolve('@babel/plugin-proposal-nullish-coalescing-operator'), // 解决 ?? 无法转义问题 + require.resolve('@babel/plugin-proposal-numeric-separator'), // 转义 1_000_000 + devMode && require.resolve('react-refresh/babel'), + ].filter(Boolean), +}; + +module.exports = { + entry: { + [pkgJson.ident]: ['./src/index.tsx'], + }, + resolve: { + symlinks: false, + extensions: ['.web.jsx', '.web.js', '.ts', '.tsx', '.js', '.jsx', '.json'], + alias: { + '@src': path.resolve(process.cwd(), 'src'), + }, + }, + plugins: [ + new ProgressBarPlugin(), + new CaseSensitivePathsPlugin(), + new StatsPlugin('manifest.json', { + chunkModules: false, + source: true, + chunks: false, + modules: false, + assets: true, + children: false, + exclude: [/node_modules/], + }), + new HappyPack({ + id: 'babel', + loaders: [ + 'cache-loader', + { + loader: 'babel-loader', + options: babelOptions, + }, + ], + threadPool: happyThreadPool, + }), + new webpack.DefinePlugin({ + 'process.env': { + NODE_ENV: JSON.stringify(process.env.NODE_ENV), + RUN_ENV: JSON.stringify(process.env.RUN_ENV), + }, + }), + new HtmlWebpackPlugin({ + meta: { + manifest: 'manifest.json', + }, + template: './src/index.html', + inject: 'body', + }), + ].filter(Boolean), + module: { + rules: [ + { + parser: { system: false }, + }, + { + test: /\.(js|jsx|ts|tsx)$/, + exclude: /node_modules\/(?!react-intl|@didi\/dcloud-design)/, + use: [ + { + loader: 'happypack/loader?id=babel', + }, + ], + }, + { + test: /\.(png|svg|jpeg|jpg|gif|ttf|woff|woff2|eot|pdf)$/, + use: [ + { + loader: 'file-loader', + options: { + name: '[name].[ext]', + outputPath: './assets/image/', + esModule: false, + }, + }, + ], + }, + { + test: /\.(css|less)$/, + use: [ + MiniCssExtractPlugin.loader, + 'css-loader', + { + loader: 'less-loader', + options: { + javascriptEnabled: true, + modifyVars: theme, + }, + }, + ], + }, + ], + }, + node: { + fs: 'empty', + net: 'empty', + tls: 'empty', + }, + stats: 'errors-warnings', +}; diff --git a/km-console/packages/config-manager-fe/config/webpack.dev.js b/km-console/packages/config-manager-fe/config/webpack.dev.js new file mode 100644 index 00000000..1b60f703 --- /dev/null +++ b/km-console/packages/config-manager-fe/config/webpack.dev.js @@ -0,0 +1,35 @@ +const MiniCssExtractPlugin = require('mini-css-extract-plugin'); +const ReactRefreshWebpackPlugin = require('@pmmmwh/react-refresh-webpack-plugin'); +const pkgJson = require('../package'); + +module.exports = { + mode: 'development', + plugins: [ + new MiniCssExtractPlugin(), + new ReactRefreshWebpackPlugin({ + overlay: false, + }), + ], + devServer: { + host: '127.0.0.1', + port: pkgJson.port, + hot: true, + open: false, + publicPath: `http://localhost:${pkgJson.port}/${pkgJson.ident}/`, + inline: true, + disableHostCheck: true, + historyApiFallback: true, + headers: { + 'Access-Control-Allow-Origin': '*', + }, + }, + output: { + path: '/', + publicPath: `http://localhost:${pkgJson.port}/${pkgJson.ident}/`, + library: pkgJson.ident, + libraryTarget: 'amd', + filename: '[name].js', + chunkFilename: '[name].js', + }, + devtool: 'cheap-module-eval-source-map', +}; diff --git a/km-console/packages/config-manager-fe/config/webpack.prod.js b/km-console/packages/config-manager-fe/config/webpack.prod.js new file mode 100644 index 00000000..9dd9ee66 --- /dev/null +++ b/km-console/packages/config-manager-fe/config/webpack.prod.js @@ -0,0 +1,59 @@ +const path = require('path'); +const MiniCssExtractPlugin = require('mini-css-extract-plugin'); +const { CleanWebpackPlugin } = require('clean-webpack-plugin'); +const TerserJSPlugin = require('terser-webpack-plugin'); +const OptimizeCSSAssetsPlugin = require('optimize-css-assets-webpack-plugin'); +const pkgJson = require('../package'); + +module.exports = { + mode: 'production', + externals: [ + /^react$/, + /^react\/lib.*/, + /^react-dom$/, + /.*react-dom.*/, + /^single-spa$/, + /^single-spa-react$/, + /^moment$/, + /^lodash$/, + /^react-router$/, + /^react-router-dom$/, + ], + plugins: [ + new CleanWebpackPlugin(), + new MiniCssExtractPlugin({ + filename: '[name]-[chunkhash].css', + }), + ], + output: { + path: path.resolve(process.cwd(), `../../../km-rest/src/main/resources/templates/${pkgJson.ident}`), + publicPath: `${process.env.PUBLIC_PATH}/${pkgJson.ident}/`, + library: pkgJson.ident, + libraryTarget: 'amd', + filename: '[name]-[chunkhash].js', + chunkFilename: '[name]-[chunkhash].js', + }, + optimization: { + splitChunks: { + cacheGroups: { + vendor: { + test: /[\\/]node_modules[\\/]/, + chunks: 'all', + name: 'vendor', + priority: 10, + enforce: true, + minChunks: 1, + maxSize: 3500000, + }, + }, + }, + minimizer: [ + new TerserJSPlugin({ + cache: true, + sourceMap: true, + }), + new OptimizeCSSAssetsPlugin({}), + ], + }, + devtool: 'none', +}; diff --git a/km-console/packages/config-manager-fe/package-lock.json b/km-console/packages/config-manager-fe/package-lock.json index 6c32859e..bf0a4f18 100644 --- a/km-console/packages/config-manager-fe/package-lock.json +++ b/km-console/packages/config-manager-fe/package-lock.json @@ -1344,6 +1344,16 @@ "@jridgewell/sourcemap-codec": "^1.4.10" } }, + "@knowdesign/icons": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/@knowdesign/icons/-/icons-1.0.0.tgz", + "integrity": "sha512-7c+h2TSbh2ihTkXIivuO+DddNC5wG7hVv9SS4ccmkvTKls2ZTLitPu+U0wpufDxPhkPMaKEQfsECsVJ+7jLMiw==", + "requires": { + "@ant-design/colors": "^6.0.0", + "@ant-design/icons": "^4.7.0", + "react": "16.12.0" + } + }, "@nodelib/fs.scandir": { "version": "2.1.5", "resolved": "https://registry.npmmirror.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", @@ -6815,9 +6825,9 @@ "dev": true }, "is-callable": { - "version": "1.2.5", - "resolved": "https://registry.npmmirror.com/is-callable/-/is-callable-1.2.5.tgz", - "integrity": "sha512-ZIWRujF6MvYGkEuHMYtFRkL2wAtFw89EHfKlXrkPkjQZZRWeh9L1q3SV13NIfHnqxugjLvAOkEHx9mb1zcMnEw==", + "version": "1.2.6", + "resolved": "https://registry.npmmirror.com/is-callable/-/is-callable-1.2.6.tgz", + "integrity": "sha512-krO72EO2NptOGAX2KYyqbP9vYMlNAXdB53rq6f8LXY6RY7JdSR/3BD6wLUlPHSAesmY9vstNrjvqGaCiRK/91Q==", "dev": true }, "is-color-stop": { diff --git a/km-console/packages/config-manager-fe/package.json b/km-console/packages/config-manager-fe/package.json index 2b820594..945e8b5c 100644 --- a/km-console/packages/config-manager-fe/package.json +++ b/km-console/packages/config-manager-fe/package.json @@ -21,9 +21,11 @@ "build": "cross-env NODE_ENV=production webpack --max_old_space_size=8000" }, "dependencies": { + "@knowdesign/icons": "^1.0.0", "babel-preset-react-app": "^10.0.0", "classnames": "^2.2.6", "dotenv": "^16.0.1", + "knowdesign": "1.3.7", "less": "^3.9.0", "lodash": "^4.17.11", "mobx": "4.15.7", @@ -36,8 +38,7 @@ "react-intl": "^3.2.1", "react-router-cache-route": "^1.11.1", "single-spa": "^5.8.0", - "single-spa-react": "^2.14.0", - "knowdesign": "1.3.7" + "single-spa-react": "^2.14.0" }, "devDependencies": { "@ant-design/icons": "^4.6.2", diff --git a/km-console/packages/config-manager-fe/src/components/TypicalListCard/index.less b/km-console/packages/config-manager-fe/src/components/TypicalListCard/index.less index 06516afa..9a48d134 100644 --- a/km-console/packages/config-manager-fe/src/components/TypicalListCard/index.less +++ b/km-console/packages/config-manager-fe/src/components/TypicalListCard/index.less @@ -22,6 +22,20 @@ display: flex; justify-content: space-between; margin-bottom: 12px; + .left, + .right { + display: flex; + align-items: center; + } + .left .refresh-icon { + font-size: 20px; + color: #74788d; + cursor: pointer; + } + .right .search-input { + width: 248px; + margin-right: 8px; + } } } } diff --git a/km-console/packages/config-manager-fe/src/constants/axiosConfig.ts b/km-console/packages/config-manager-fe/src/constants/axiosConfig.ts index f08ebc77..3e05e105 100644 --- a/km-console/packages/config-manager-fe/src/constants/axiosConfig.ts +++ b/km-console/packages/config-manager-fe/src/constants/axiosConfig.ts @@ -47,8 +47,8 @@ serviceInstance.interceptors.response.use( return res; }, (err: any) => { - const config = err.config; - if (!config || !config.retryTimes) return dealResponse(err, config.customNotification); + const config = err?.config; + if (!config || !config.retryTimes) return dealResponse(err); const { __retryCount = 0, retryDelay = 300, retryTimes } = config; config.__retryCount = __retryCount; if (__retryCount >= retryTimes) { diff --git a/km-console/packages/config-manager-fe/src/pages/CommonConfig.tsx b/km-console/packages/config-manager-fe/src/pages/CommonConfig.tsx index 2a9a9a74..835abf51 100644 --- a/km-console/packages/config-manager-fe/src/pages/CommonConfig.tsx +++ b/km-console/packages/config-manager-fe/src/pages/CommonConfig.tsx @@ -1,6 +1,6 @@ import React, { useLayoutEffect } from 'react'; import { Utils, AppContainer } from 'knowdesign'; -import { goLogin } from 'constants/axiosConfig'; +import { goLogin } from '@src/constants/axiosConfig'; // 权限对应表 export enum ConfigPermissionMap { diff --git a/km-console/packages/config-manager-fe/src/pages/ConfigManage/index.tsx b/km-console/packages/config-manager-fe/src/pages/ConfigManage/index.tsx index 84b5d2f6..1430fbb6 100644 --- a/km-console/packages/config-manager-fe/src/pages/ConfigManage/index.tsx +++ b/km-console/packages/config-manager-fe/src/pages/ConfigManage/index.tsx @@ -15,6 +15,7 @@ import { AppContainer, Utils, } from 'knowdesign'; +import { IconFont } from '@knowdesign/icons'; import { PlusOutlined } from '@ant-design/icons'; import moment from 'moment'; // 引入代码编辑器 @@ -26,8 +27,8 @@ import 'codemirror/addon/selection/active-line'; import 'codemirror/addon/edit/closebrackets'; require('codemirror/mode/xml/xml'); require('codemirror/mode/javascript/javascript'); -import api from 'api'; -import { defaultPagination } from 'constants/common'; +import api from '@src/api'; +import { defaultPagination } from '@src/constants/common'; import TypicalListCard from '../../components/TypicalListCard'; import { ConfigPermissionMap } from '../CommonConfig'; import { ConfigOperate, ConfigProps } from './config'; @@ -384,7 +385,7 @@ export default () => { const onDelete = (record: ConfigProps) => { confirm({ title: '确定删除配置吗?', - content: `配置⌈${record.valueName}⌋${record.status === 1 ? '为启用状态,无法删除' : ''}`, + content: `配置 [${record.valueName}] ${record.status === 1 ? '为启用状态,无法删除' : ''}`, centered: true, okText: '删除', okType: 'primary', @@ -398,9 +399,11 @@ export default () => { }, maskClosable: true, onOk() { - return request(api.editConfig, { - method: 'POST', - data: record.id, + return request(api.delConfig, { + method: 'DELETE', + params: { + id: record.id, + }, }).then((_) => { message.success('删除成功'); getConfigList(); @@ -431,22 +434,28 @@ export default () => {
-
getConfigList({ page: 1 })}> - - - - - - - - - -
+
+
getConfigList()}> + +
+ +
getConfigList({ page: 1 })}> + + + + + + + + + +
+
{global.hasPermission && global.hasPermission(ConfigPermissionMap.CONFIG_ADD) ? ( - - +
+
getData()}> + +
+ + +
getData({ page: 1 })}> + + + + + + + + + + + + +
+
{ return ( <> -
- { - setSearchKeywords(searchKeywordsInput); - }} - style={{ fontSize: '16px' }} - /> - } - placeholder="请输入角色名称" - value={searchKeywordsInput} - onPressEnter={(_) => { - setSearchKeywords(searchKeywordsInput); - }} - onChange={(e) => { - setSearchKeywordsInput(e.target.value); - }} - /> - {global.hasPermission && global.hasPermission(ConfigPermissionMap.ROLE_ADD) ? ( - - ) : ( - <> - )} +
+
+
getRoleList()}> + +
+
+
+ { + setSearchKeywords(searchKeywordsInput); + }} + style={{ fontSize: '16px' }} + /> + } + placeholder="请输入角色名称" + value={searchKeywordsInput} + onPressEnter={(_) => { + setSearchKeywords(searchKeywordsInput); + }} + onChange={(e) => { + setSearchKeywordsInput(e.target.value); + }} + /> + {global.hasPermission && global.hasPermission(ConfigPermissionMap.ROLE_ADD) ? ( + + ) : ( + <> + )} +
{ return ( <>
-
getUserList({ page: 1 })}> - - - - - - - - + + + + + + { }} onPressEnter={searchFn} /> - - { }} onPressEnter={searchFn} /> - +
{/* */}
-
+ {/*
*/}
)} {/* diff --git a/km-console/packages/layout-clusters-fe/src/pages/Jobs/RebalancePlan.tsx b/km-console/packages/layout-clusters-fe/src/pages/Jobs/RebalancePlan.tsx index 4b11b2e7..024fced3 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/Jobs/RebalancePlan.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/Jobs/RebalancePlan.tsx @@ -13,8 +13,8 @@ interface PropsType { } const typeObj: any = { - 1: '周期均衡', - 2: '立即均衡', + 1: '立即均衡', + 2: '周期均衡', }; const { request, post } = Utils; diff --git a/km-console/packages/layout-clusters-fe/src/pages/Jobs/TeskDetails.tsx b/km-console/packages/layout-clusters-fe/src/pages/Jobs/TeskDetails.tsx index d3d710cb..fddbc808 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/Jobs/TeskDetails.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/Jobs/TeskDetails.tsx @@ -1,6 +1,7 @@ /* eslint-disable react/display-name */ import React, { useState, useEffect } from 'react'; -import { Alert, Badge, Dropdown, IconFont, ProTable, Space, Table, Utils } from 'knowdesign'; +import { Alert, Badge, Dropdown, ProTable, Space, Table, Utils } from 'knowdesign'; +import { IconFont } from '@knowdesign/icons'; import { useParams } from 'react-router-dom'; import Api from '@src/api'; import { getTaskDetailsColumns, getMoveBalanceColumns } from './config'; diff --git a/km-console/packages/layout-clusters-fe/src/pages/Jobs/ViewJobsProgress.tsx b/km-console/packages/layout-clusters-fe/src/pages/Jobs/ViewJobsProgress.tsx index 38cff3a6..b052cb92 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/Jobs/ViewJobsProgress.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/Jobs/ViewJobsProgress.tsx @@ -1,7 +1,8 @@ import React, { useState, useEffect } from 'react'; import moment from 'moment'; import { useParams } from 'react-router-dom'; -import { Button, Drawer, Utils, Descriptions, Tabs, Input, IconFont, message, Spin, InputNumber } from 'knowdesign'; +import { Button, Drawer, Utils, Descriptions, Tabs, Input, message, Spin, InputNumber } from 'knowdesign'; +import { IconFont } from '@knowdesign/icons'; import TaskDetails from './TeskDetails'; import NodeTraffic from './NodeTraffic'; import RebalancePlan from './RebalancePlan'; diff --git a/km-console/packages/layout-clusters-fe/src/pages/Jobs/config.tsx b/km-console/packages/layout-clusters-fe/src/pages/Jobs/config.tsx index 5dd24ce9..d4322f40 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/Jobs/config.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/Jobs/config.tsx @@ -89,12 +89,9 @@ export const getJobsListColumns = (arg?: any) => { title: '任务执行对象', dataIndex: 'target', key: 'target', + width: 232, render(t: any, r: any) { - return ( -
- `共有${num}个`} /> -
- ); + return `共有${num}个`} />; }, }, { diff --git a/km-console/packages/layout-clusters-fe/src/pages/Jobs/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/Jobs/index.tsx index 84e70298..3e08d3a5 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/Jobs/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/Jobs/index.tsx @@ -1,6 +1,7 @@ import React, { useState, useEffect, memo } from 'react'; import { useParams, useHistory, useLocation } from 'react-router-dom'; -import { ProTable, Drawer, Utils, AppContainer, Form, Select, Input, Button, message, Modal } from 'knowdesign'; +import { ProTable, Drawer, Utils, AppContainer, Form, Select, Input, Button, message, Modal, Divider } from 'knowdesign'; +import { IconFont } from '@knowdesign/icons'; import API from '../../api'; import { getJobsListColumns, defaultPagination, runningStatus, jobType } from './config'; import JobsCheck from '@src/components/CardBar/JobsCheck'; @@ -10,6 +11,7 @@ import './index.less'; import ReplicaChange from '@src/components/TopicJob/ReplicaChange'; import ReplicaMove from '@src/components/TopicJob/ReplicaMove'; import BalanceDrawer from '../LoadRebalance/BalanceDrawer'; +import { tableHeaderPrefix } from '@src/constants/common'; const { request } = Utils; const JobsList: React.FC = (props: any) => { @@ -171,35 +173,44 @@ const JobsList: React.FC = (props: any) => { {/* */}
-
- - - - - - + + + + + + @@ -354,8 +375,7 @@ const LoadBalance: React.FC = (props: any) => { */} - -
+
{ const intl = useIntl(); const [form] = Form.useForm(); const [loading, setLoading] = React.useState(false); + const [confirmLoading, setConfirmLoading] = React.useState(false); const [curClusterInfo, setCurClusterInfo] = React.useState({}); - const [security, setSecurity] = React.useState(curClusterInfo?.security || 'None'); const [extra, setExtra] = React.useState({ versionExtra: '', zooKeeperExtra: '', bootstrapExtra: '', jmxExtra: '', }); - const [isLowVersion, setIsLowVersion] = React.useState(false); - const [zookeeperErrorStatus, setZookeeperErrorStatus] = React.useState(false); const lastFormItemValue = React.useRef({ - bootstrap: curClusterInfo?.bootstrapServers || '', + bootstrapServers: curClusterInfo?.bootstrapServers || '', zookeeper: curClusterInfo?.zookeeper || '', clientProperties: curClusterInfo?.clientProperties || {}, }); - const onHandleValuesChange = (value: any, allValues: any) => { - Object.keys(value).forEach((key) => { + const onHandleValuesChange = (changedValue: string[]) => { + Object.keys(changedValue).forEach((key) => { switch (key) { - case 'security': - setSecurity(value.security); - break; case 'zookeeper': - setExtra({ - ...extra, - zooKeeperExtra: '', - bootstrapExtra: '', - jmxExtra: '', - }); - break; case 'bootstrapServers': setExtra({ ...extra, @@ -78,21 +65,19 @@ const AccessClusters = (props: any): JSX.Element => { const onCancel = () => { form.resetFields(); setLoading(false); - setZookeeperErrorStatus(false); - setIsLowVersion(false); - setSecurity('None'); setExtra({ versionExtra: '', zooKeeperExtra: '', bootstrapExtra: '', jmxExtra: '', }); - lastFormItemValue.current = { bootstrap: '', zookeeper: '', clientProperties: {} }; + lastFormItemValue.current = { bootstrapServers: '', zookeeper: '', clientProperties: {} }; props.setVisible && props.setVisible(false); }; const onSubmit = () => { form.validateFields().then((res) => { + setConfirmLoading(true); let clientProperties = null; try { clientProperties = res.clientProperties && JSON.parse(res.clientProperties); @@ -107,7 +92,7 @@ const AccessClusters = (props: any): JSX.Element => { jmxProperties: { jmxPort: res.jmxPort, maxConn: res.maxConn, - openSSL: res.security === 'Password', + openSSL: res.openSSL || false, token: res.token, username: res.username, }, @@ -115,7 +100,7 @@ const AccessClusters = (props: any): JSX.Element => { name: res.name, zookeeper: res.zookeeper || '', }; - setLoading(true); + if (!isNaN(curClusterInfo?.id)) { Utils.put(api.phyCluster, { ...params, @@ -127,7 +112,7 @@ const AccessClusters = (props: any): JSX.Element => { onCancel(); }) .finally(() => { - setLoading(false); + setConfirmLoading(false); }); } else { Utils.post(api.phyCluster, params) @@ -137,7 +122,7 @@ const AccessClusters = (props: any): JSX.Element => { onCancel(); }) .finally(() => { - setLoading(false); + setConfirmLoading(false); }); } }); @@ -154,125 +139,224 @@ const AccessClusters = (props: any): JSX.Element => { } setLoading(true); - setIsLowVersion(false); - setZookeeperErrorStatus(false); return Utils.post(api.kafkaValidator, { bootstrapServers: bootstrapServers || '', zookeeper: zookeeper || '', clientProperties, }) - .then((res: any) => { - form.setFieldsValue({ - jmxPort: res.jmxPort, - }); + .then( + (res: { + errList: { code: number; message: string; data: any }[]; + jmxPort: number | null; + kafkaVersion: string | null; + zookeeper: string | null; + }) => { + const changedValue: { jmxPort?: number; kafkaVersion?: string; zookeeper: string } = { + zookeeper: zookeeper || res.zookeeper, + }; + if (res.kafkaVersion && props.kafkaVersion.includes(res.kafkaVersion)) { + changedValue.kafkaVersion = res.kafkaVersion; + } + if (res.jmxPort) { + changedValue.jmxPort = res.jmxPort; + } + form.setFieldsValue(changedValue); - if (props.kafkaVersion.indexOf(res.kafkaVersion) > -1) { - form.setFieldsValue({ - kafkaVersion: res.kafkaVersion, - }); - } else { - form.setFieldsValue({ - kafkaVersion: undefined, + const extraMsg = { + ...extra, + // 重置默认信息为连接成功 + bootstrapExtra: bootstrapServers ? '连接成功' : '', + zooKeeperExtra: zookeeper ? '连接成功' : '', + }; + + const errList = res.errList || []; + // 处理错误信息 + errList.forEach((item: any) => { + const { code, message } = item; + let modifyKey: 'bootstrapExtra' | 'zooKeeperExtra' | 'jmxExtra' | undefined; + if (bootstrapServersErrCodes.includes(code)) { + modifyKey = 'bootstrapExtra'; + } else if (zkErrCodes.includes(code)) { + modifyKey = 'zooKeeperExtra'; + } else if (jmxErrCodes.includes(code)) { + modifyKey = 'jmxExtra'; + } + + if (modifyKey) { + extraMsg[modifyKey] = message; + } }); + + setExtra(extraMsg); + return res; } - - form.setFieldsValue({ - zookeeper: zookeeper || res.zookeeper, - }); - - const errList = res.errList || []; - - const extraMsg = extra; - - // 初始化信息为连接成功 - extraMsg.bootstrapExtra = bootstrapServers ? '连接成功' : ''; - extraMsg.zooKeeperExtra = zookeeper ? '连接成功' : ''; - - // 处理错误信息 - errList.forEach((item: any) => { - const { code, message } = item; - let modifyKey: 'bootstrapExtra' | 'zooKeeperExtra' | 'jmxExtra' | undefined; - if (bootstrapServersErrCodes.includes(code)) { - modifyKey = 'bootstrapExtra'; - } else if (zkErrCodes.includes(code)) { - modifyKey = 'zooKeeperExtra'; - } else if (jmxErrCodes.includes(code)) { - modifyKey = 'jmxExtra'; - } - - if (modifyKey) { - extraMsg[modifyKey] = `连接失败。${message}`; - } - }); - - // 如果kafkaVersion小于最低版本则提示 - const showLowVersion = !( - curClusterInfo?.zookeeper || - !curClusterInfo?.kafkaVersion || - curClusterInfo?.kafkaVersion >= lowKafkaVersion - ); - setIsLowVersion(showLowVersion); - setExtra({ - ...extraMsg, - versionExtra: showLowVersion ? intl.formatMessage({ id: 'access.cluster.low.version.tip' }) : '', - }); - return res; - }) + ) .finally(() => { setLoading(false); }); }; + // 更新表单状态 React.useEffect(() => { - const showLowVersion = !(curClusterInfo?.zookeeper || !curClusterInfo?.kafkaVersion || curClusterInfo?.kafkaVersion >= lowKafkaVersion); lastFormItemValue.current = { - bootstrap: curClusterInfo?.bootstrapServers || '', + bootstrapServers: curClusterInfo?.bootstrapServers || '', zookeeper: curClusterInfo?.zookeeper || '', clientProperties: curClusterInfo?.clientProperties || {}, }; - setIsLowVersion(showLowVersion); - setExtra({ - ...extra, - versionExtra: showLowVersion ? intl.formatMessage({ id: 'access.cluster.low.version.tip' }) : '', - }); form.setFieldsValue({ ...curClusterInfo }); + if (curClusterInfo?.kafkaVersion) { + form.validateFields(['kafkaVersion']); + } }, [curClusterInfo]); + // 获取集群详情数据 React.useEffect(() => { if (visible) { if (clusterInfo?.id) { setLoading(true); + + const resolveJmxProperties = (obj: any) => { + const res = { ...obj }; + try { + const originValue = obj?.jmxProperties; + if (originValue) { + const jmxProperties = JSON.parse(originValue); + typeof jmxProperties === 'object' && jmxProperties !== null && Object.assign(res, jmxProperties); + } + } catch (err) { + console.error('jmxProperties not JSON: ', err); + } + return res; + }; + Utils.request(api.getPhyClusterBasic(clusterInfo.id)) .then((res: any) => { - let jmxProperties = null; - try { - jmxProperties = JSON.parse(res?.jmxProperties); - } catch (err) { - console.error(err); - } - - // 转化值对应成表单值 - if (jmxProperties?.openSSL) { - jmxProperties.security = 'Password'; - } - - if (jmxProperties) { - res = Object.assign({}, res || {}, jmxProperties); - } - setCurClusterInfo(res); - setLoading(false); + setCurClusterInfo(resolveJmxProperties(res)); }) .catch((err) => { - setCurClusterInfo(clusterInfo); + setCurClusterInfo(resolveJmxProperties(clusterInfo)); + }) + .finally(() => { setLoading(false); }); } else { - setCurClusterInfo(clusterInfo); + setCurClusterInfo({}); } } }, [visible, clusterInfo]); + const validators = { + name: async (_: any, value: string) => { + if (!value) { + return Promise.reject('集群名称不能为空'); + } + if (value === curClusterInfo?.name) { + return Promise.resolve(); + } + if (value?.length > 128) { + return Promise.reject('集群名称长度限制在1~128字符'); + } + if (!new RegExp(regClusterName).test(value)) { + return Promise.reject('集群名称支持中英文、数字、特殊字符 ! " # $ % & \' ( ) * + , - . / : ; < = > ? @ [ ] ^ _ ` { | } ~'); + } + return Utils.request(api.getClusterBasicExit(value)) + .then((res: any) => { + const data = res || {}; + return data?.exist ? Promise.reject('集群名称重复') : Promise.resolve(); + }) + .catch(() => Promise.reject('连接超时! 请重试或检查服务')); + }, + bootstrapServers: async (_: any, value: string) => { + if (!value) { + return Promise.reject('Bootstrap Servers不能为空'); + } + if (value.length > 2000) { + return Promise.reject('Bootstrap Servers长度限制在2000字符'); + } + if (value && value !== lastFormItemValue.current.bootstrapServers) { + lastFormItemValue.current.bootstrapServers = value; + return connectTest().catch(() => (lastFormItemValue.current.bootstrapServers = '')); + } + return Promise.resolve(''); + }, + zookeeper: async (_: any, value: string) => { + if (!value) { + return Promise.resolve(''); + } + + if (value.length > 2000) { + return Promise.reject('Zookeeper长度限制在2000字符'); + } + + if (value && value !== lastFormItemValue.current.zookeeper) { + lastFormItemValue.current.zookeeper = value; + return connectTest().catch(() => (lastFormItemValue.current.zookeeper = '')); + } + return Promise.resolve(''); + }, + securityUserName: async (_: any, value: string) => { + if (!value) { + return Promise.reject('用户名不能为空'); + } + if (!new RegExp(regUsername).test(value)) { + return Promise.reject('仅支持大小写、下划线、短划线(-)'); + } + if (value.length > 128) { + return Promise.reject('用户名长度限制在1~128字符'); + } + return Promise.resolve(); + }, + securityToken: async (_: any, value: string) => { + if (!value) { + return Promise.reject('密码不能为空'); + } + if (!new RegExp(regUsername).test(value)) { + return Promise.reject('密码只能由大小写、下划线、短划线(-)组成'); + } + if (value.length < 6 || value.length > 32) { + return Promise.reject('密码长度限制在6~32字符'); + } + return Promise.resolve(); + }, + kafkaVersion: async (_: any, value: any) => { + if (!value) { + return Promise.reject('版本号不能为空'); + } + // 检测版本号小于2.8.0,如果没有填zookeeper信息,才会提示 + const zookeeper = form.getFieldValue('zookeeper'); + let versionExtra = ''; + if (value < LOW_KAFKA_VERSION && !zookeeper) { + versionExtra = intl.formatMessage({ id: 'access.cluster.low.version.tip' }); + } + setExtra({ + ...extra, + versionExtra, + }); + return Promise.resolve(); + }, + clientProperties: async (_: any, value: string) => { + try { + if (value) { + JSON.parse(value); + } + + return Promise.resolve(); + } catch (e) { + return Promise.reject(new Error('输入内容必须为 JSON')); + } + }, + description: async (_: any, value: string) => { + if (!value) { + return Promise.resolve(''); + } + if (value && value.length > 200) { + return Promise.reject('集群描述长度限制在200字符'); + } + return Promise.resolve(); + }, + }; + return ( <> { -
} - title={intl.formatMessage({ id: props.title || 'access.cluster' })} + title={intl.formatMessage({ id: props.title || clusterInfo?.id ? 'edit.cluster' : 'access.cluster' })} visible={props.visible} placement="right" width={480} @@ -306,30 +390,7 @@ const AccessClusters = (props: any): JSX.Element => { rules={[ { required: true, - validator: async (rule: any, value: string) => { - if (!value) { - return Promise.reject('集群名称不能为空'); - } - if (value === curClusterInfo?.name) { - return Promise.resolve(); - } - if (value?.length > 128) { - return Promise.reject('集群名称长度限制在1~128字符'); - } - if (!new RegExp(regClusterName).test(value)) { - return Promise.reject( - '集群名称支持中英文、数字、特殊字符 ! " # $ % & \' ( ) * + , - . / : ; < = > ? @ [ ] ^ _ ` { | } ~' - ); - } - return Utils.request(api.getClusterBasicExit(value)).then((res: any) => { - const data = res || {}; - if (data?.exist) { - return Promise.reject('集群名称重复'); - } else { - return Promise.resolve(); - } - }); - }, + validator: validators.name, }, ]} > @@ -338,31 +399,12 @@ const AccessClusters = (props: any): JSX.Element => { {extra.bootstrapExtra}} + extra={{extra.bootstrapExtra}} validateTrigger={'onBlur'} rules={[ { required: true, - validator: async (rule: any, value: string) => { - if (!value) { - return Promise.reject('Bootstrap Servers不能为空'); - } - if (value.length > 2000) { - return Promise.reject('Bootstrap Servers长度限制在2000字符'); - } - if (value && value !== lastFormItemValue.current.bootstrap) { - return connectTest() - .then((res: any) => { - lastFormItemValue.current.bootstrap = value; - - return Promise.resolve(''); - }) - .catch((err) => { - return Promise.reject('连接失败'); - }); - } - return Promise.resolve(''); - }, + validator: validators.bootstrapServers, }, ]} > @@ -374,36 +416,11 @@ const AccessClusters = (props: any): JSX.Element => { {extra.zooKeeperExtra}} - validateStatus={zookeeperErrorStatus ? 'error' : 'success'} + extra={{extra.zooKeeperExtra}} validateTrigger={'onBlur'} rules={[ { - required: false, - validator: async (rule: any, value: string) => { - if (!value) { - setZookeeperErrorStatus(false); - return Promise.resolve(''); - } - - if (value.length > 2000) { - return Promise.reject('Zookeeper长度限制在2000字符'); - } - - if (value && value !== lastFormItemValue.current.zookeeper) { - return connectTest() - .then((res: any) => { - lastFormItemValue.current.zookeeper = value; - setZookeeperErrorStatus(false); - return Promise.resolve(''); - }) - .catch((err) => { - setZookeeperErrorStatus(true); - return Promise.reject('连接失败'); - }); - } - return Promise.resolve(''); - }, + validator: validators.zookeeper, }, ]} > @@ -412,142 +429,65 @@ const AccessClusters = (props: any): JSX.Element => { placeholder="请输入Zookeeper地址,例如:192.168.0.1:2181,192.168.0.2:2181,192.168.0.2:2181/ks-kafka" /> - - <> - - - - - - - - - - None - Password Authentication - - - {security === 'Password' ? ( - <> - { - if (!value) { - return Promise.reject('用户名不能为空'); - } - if (!new RegExp(regUsername).test(value)) { - return Promise.reject('仅支持大小写、下划线、短划线(-)'); - } - if (value.length > 128) { - return Promise.reject('用户名长度限制在1~128字符'); - } - return Promise.resolve(); - }, - }, - ]} - > - + +
+
+ + + + + + +
+ + + None + Password Authentication + - { - if (!value) { - return Promise.reject('密码不能为空'); - } - if (!new RegExp(regUsername).test(value)) { - return Promise.reject('密码只能由大小写、下划线、短划线(-)组成'); - } - if (value.length < 6 || value.length > 32) { - return Promise.reject('密码长度限制在6~32字符'); - } - return Promise.resolve(); - }, - }, - ]} - > - + + {({ getFieldValue }) => { + return getFieldValue('openSSL') ? ( +
+ +
+ + + + + + +
+
+ ) : null; + }}
- - ) : null} +
+
{extra.versionExtra}} - validateStatus={isLowVersion ? 'error' : 'success'} rules={[ { required: true, - validator: async (rule: any, value: any) => { - if (!value) { - setIsLowVersion(true); - return Promise.reject('版本号不能为空'); - } - // 检测版本号小于2.8.0,如果没有填zookeeper信息,才会提示 - const zookeeper = form.getFieldValue('zookeeper'); - if (value < lowKafkaVersion && !zookeeper) { - setIsLowVersion(true); - setExtra({ - ...extra, - versionExtra: intl.formatMessage({ id: 'access.cluster.low.version.tip' }), - }); - return Promise.resolve(); - } - setIsLowVersion(false); - return Promise.resolve(); - }, + validator: validators.kafkaVersion, }, ]} > @@ -565,29 +505,15 @@ const AccessClusters = (props: any): JSX.Element => { label="集群配置" rules={[ { - required: false, - message: '请输入集群配置', + validator: validators.clientProperties, }, - () => ({ - validator(_, value) { - try { - if (value) { - JSON.parse(value); - } - - return Promise.resolve(); - } catch (e) { - return Promise.reject(new Error('输入内容必须为 JSON')); - } - }, - }), ]} >
{ form.setFieldsValue({ clientProperties }); form.validateFields(['clientProperties']); @@ -621,20 +547,11 @@ const AccessClusters = (props: any): JSX.Element => { label="集群描述" rules={[ { - required: false, - validator: async (rule: any, value: string) => { - if (!value) { - return Promise.resolve(''); - } - if (value && value.length > 200) { - return Promise.reject('集群描述长度限制在200字符'); - } - return Promise.resolve(); - }, + validator: validators.description, }, ]} > - + diff --git a/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/HomePage.tsx b/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/HomePage.tsx index 473bb2dc..6cebe36a 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/HomePage.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/HomePage.tsx @@ -1,5 +1,6 @@ import React, { useEffect, useRef, useState } from 'react'; -import { Slider, Input, Select, Checkbox, Button, Utils, Spin, IconFont, AppContainer } from 'knowdesign'; +import { Slider, Input, Select, Checkbox, Button, Utils, Spin, AppContainer } from 'knowdesign'; +import { IconFont } from '@knowdesign/icons'; import API from '@src/api'; import TourGuide, { MultiPageSteps } from '@src/components/TourGuide'; import './index.less'; diff --git a/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/List.tsx b/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/List.tsx index a65ac3fd..d15cffcb 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/List.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/List.tsx @@ -1,4 +1,5 @@ -import { AppContainer, Divider, Form, IconFont, Input, List, message, Modal, Progress, Spin, Tooltip, Utils } from 'knowdesign'; +import { AppContainer, Divider, Form, Input, List, message, Modal, Progress, Spin, Tooltip, Utils } from 'knowdesign'; +import { IconFont } from '@knowdesign/icons'; import moment from 'moment'; import API from '@src/api'; import React, { useEffect, useImperativeHandle, useMemo, useRef, useState } from 'react'; @@ -16,6 +17,10 @@ import { SearchParams } from './HomePage'; const DEFAULT_PAGE_SIZE = 10; +enum ClusterRunState { + Raft = 2, +} + const DeleteCluster = React.forwardRef((_, ref) => { const intl = useIntl(); const [form] = Form.useForm(); @@ -245,6 +250,7 @@ const ClusterList = (props: { searchParams: SearchParams; showAccessCluster: any metricPoints.push(line); }); + const runState = itemData.runState; const { Brokers: brokers, Zookeepers: zks, @@ -345,18 +351,21 @@ const ClusterList = (props: { searchParams: SearchParams; showAccessCluster: any
{brokers}
-
-
- - ZK + {/* 2: raft 模式 无zk */} + {runState !== ClusterRunState.Raft && ( +
+
+ + ZK +
+
{zookeepersAvailable === -1 ? '-' : zks}
-
{zookeepersAvailable === -1 ? '-' : zks}
-
+ )}
{metricPoints.map((row, index) => { diff --git a/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/index.less b/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/index.less index 3108c4c0..f459fd1c 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/index.less +++ b/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/index.less @@ -656,43 +656,37 @@ color: @error-color; } } - .inline-item.dcloud-form-item { - display: -webkit-inline-box; - margin-right: 16px; - - &.adjust-height-style { - .dcloud-form-item-label { - padding: 0; - label { - height: 36px; - } - } - .dcloud-form-item-control { - &-input { - height: 36px; - } - } + .horizontal-form-container { + padding-left: 16px; + .inline-items { + display: flex; + justify-content: space-between; } - - &.max-width-66 { - .dcloud-form-item-control { - max-width: 66%; - } - } - - .dcloud-form-item-label { - margin-right: 12px; - - label { + .dcloud-form-item { + flex-direction: row; + align-items: center; + &-label { + padding: 0 12px 0 0; + font-size: 13px; font-family: @font-family; + color: #74788d; } } - } - - .no-item-control { - margin-bottom: 8px !important; - .dcloud-form-item-control { - display: none; + .metrics-form-item { + margin-top: 8px; + } + .user-info-form-items { + display: flex; + align-items: flex-start; + .user-info-label { + padding-top: 4px; + } + .inline-items { + flex: 0 0 80%; + .token-form-item { + margin-left: 16px; + } + } } } } diff --git a/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/index.less b/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/index.less index 54f77499..1d962f93 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/index.less +++ b/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/index.less @@ -10,11 +10,6 @@ box-shadow: 0 2px 4px 0 rgba(0, 0, 0, 0.01), 0 3px 6px 3px rgba(0, 0, 0, 0.01), 0 2px 6px 0 rgba(0, 0, 0, 0.03); // border-radius: 12px; } - .operate-bar { - display: flex; - justify-content: space-between; - margin-bottom: 12px; - } } .acls-edit-drawer { diff --git a/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/index.tsx index 3f51bc55..a98f6269 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/index.tsx @@ -1,7 +1,9 @@ import React, { useEffect, useRef, useState } from 'react'; -import { Button, Form, Input, Select, Modal, message, ProTable, AppContainer, DKSBreadcrumb, Utils } from 'knowdesign'; +import { Button, Form, Input, Select, Modal, message, ProTable, AppContainer, DKSBreadcrumb, Utils, Divider } from 'knowdesign'; +import { IconFont } from '@knowdesign/icons'; import ACLsCardBar from '@src/components/CardBar/ACLsCardBar'; import api from '@src/api'; +import { tableHeaderPrefix } from '@src/constants/common'; import { useParams } from 'react-router-dom'; import AddACLDrawer, { ACL_OPERATION, @@ -205,37 +207,45 @@ const SecurityACLs = (): JSX.Element => {
-
-
getACLs({ page: 1 })}> - - - - - - - - - - - +
+
+
getACLs()}> + +
+ +
getACLs({ page: 1 })}> + + + + + + + + + + +
+
+ +
{ const maxPos = chars.length; @@ -426,34 +427,41 @@ const SecurityUsers = (): JSX.Element => { ]} />
-
- { - setSearchKeywords(searchKeywordsInput); - }} - style={{ fontSize: '16px' }} - /> - } - placeholder="请输入 Kafka User" - value={searchKeywordsInput} - onPressEnter={(_) => { - setSearchKeywords(searchKeywordsInput); - }} - onChange={(e) => { - setSearchKeywordsInput(e.target.value); - }} - /> - +
+
+
getKafkaUserList()}> + +
+
+
+ { + setSearchKeywords(searchKeywordsInput); + }} + style={{ fontSize: '16px' }} + /> + } + placeholder="请输入 Kafka User" + value={searchKeywordsInput} + onPressEnter={(_) => { + setSearchKeywords(searchKeywordsInput); + }} + onChange={(e) => { + setSearchKeywordsInput(e.target.value); + }} + /> + +
div { width: 100%; height: 100%; @@ -73,65 +40,64 @@ justify-content: center; align-items: center; } - - .chart-box { - position: relative; - width: 100%; - height: 244px; - background: #f8f9fa; - border-radius: 8px; - - .expand-icon-box { - position: absolute; - z-index: 1000; - top: 14px; - right: 16px; - width: 24px; - height: 24px; - cursor: pointer; - font-size: 16px; - text-align: center; - border-radius: 50%; - transition: background-color 0.3s ease; - - .expand-icon { - color: #adb5bc; - line-height: 24px; - } - - &:hover { - background: rgba(33, 37, 41, 0.06); - .expand-icon { - color: #74788d; - } - } - } - } } .config-change-records-container { width: 240px; height: 100%; margin-left: 12px; - .cluster-container-border(); + .cluster-detail-container-border(); } } } +} - .chart-box-title { - padding: 18px 0 0 20px; - font-family: @font-family-bold; - line-height: 16px; - .name { - font-size: 14px; - color: #212529; +.cluster-detail-chart-box { + position: relative; + width: 100%; + height: 244px; + background: #f8f9fa; + border-radius: 8px; + + .expand-icon-box { + position: absolute; + z-index: 1000; + top: 14px; + right: 16px; + width: 24px; + height: 24px; + cursor: pointer; + font-size: 16px; + text-align: center; + border-radius: 50%; + transition: background-color 0.3s ease; + + .expand-icon { + color: #adb5bc; + line-height: 24px; } - .unit { - font-size: 12px; - color: #495057; - } - > span { - cursor: pointer; + + &:hover { + background: rgba(33, 37, 41, 0.06); + .expand-icon { + color: #74788d; + } } } } +.cluster-detail-chart-box-title { + padding: 18px 0 0 20px; + font-family: @font-family-bold; + line-height: 16px; + .name { + font-size: 14px; + color: #212529; + } + .unit { + font-size: 12px; + color: #495057; + } + > span { + cursor: pointer; + } +} diff --git a/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/DetailChart/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/DetailChart/index.tsx index 6ecb7604..3df6ee1e 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/DetailChart/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/DetailChart/index.tsx @@ -1,29 +1,26 @@ -import { Col, Row, SingleChart, IconFont, Utils, Modal, Spin, Empty, AppContainer, Tooltip } from 'knowdesign'; +import { Col, Row, SingleChart, Utils, Modal, Spin, Empty, AppContainer, Tooltip } from 'knowdesign'; +import { IconFont } from '@knowdesign/icons'; import React, { useEffect, useRef, useState } from 'react'; +import { arrayMoveImmutable } from 'array-move'; import api from '@src/api'; -import { getChartConfig } from './config'; -import './index.less'; import { useParams } from 'react-router-dom'; import { MetricDefaultChartDataType, MetricChartDataType, formatChartData, supplementaryPoints, -} from '@src/components/DashboardDragChart/config'; + resolveMetricsRank, + MetricInfo, +} from '@src/constants/chartConfig'; import { MetricType } from '@src/api'; import { getDataNumberUnit, getUnit } from '@src/constants/chartConfig'; import SingleChartHeader, { KsHeaderOptions } from '@src/components/SingleChartHeader'; -import { MAX_TIME_RANGE_WITH_SMALL_POINT_INTERVAL } from '@src/constants/common'; import RenderEmpty from '@src/components/RenderEmpty'; +import DragGroup from '@src/components/DragGroup'; +import { getChartConfig } from './config'; +import './index.less'; type ChartFilterOptions = Omit; -interface MetricInfo { - type: number; - name: string; - desc: string; - set: boolean; - support: boolean; -} interface MessagesInDefaultData { aggType: string | null; @@ -70,8 +67,7 @@ const DetailChart = (props: { children: JSX.Element }): JSX.Element => { const [curHeaderOptions, setCurHeaderOptions] = useState(); const [defaultChartLoading, setDefaultChartLoading] = useState(true); const [chartLoading, setChartLoading] = useState(true); - const [showChartDetailModal, setShowChartDetailModal] = useState(false); - const [chartDetail, setChartDetail] = useState(); + const metricRankList = useRef([]); const curFetchingTimestamp = useRef({ messagesIn: 0, other: 0, @@ -90,36 +86,53 @@ const DetailChart = (props: { children: JSX.Element }): JSX.Element => { }); }; + // 更新 rank + const updateRank = (metricList: MetricInfo[]) => { + const { list, listInfo, shouldUpdate } = resolveMetricsRank(metricList); + metricRankList.current = list; + if (shouldUpdate) { + updateMetricList(listInfo); + } + }; + // 获取指标列表 const getMetricList = () => { Utils.request(api.getDashboardMetricList(clusterId, MetricType.Cluster)).then((res: MetricInfo[] | null) => { if (!res) return; - const showMetrics = res.filter((metric) => metric.support); - const selectedMetrics = showMetrics.filter((metric) => metric.set).map((metric) => metric.name); + const supportMetrics = res.filter((metric) => metric.support); + const selectedMetrics = supportMetrics.filter((metric) => metric.set).map((metric) => metric.name); !selectedMetrics.includes(DEFAULT_METRIC) && selectedMetrics.push(DEFAULT_METRIC); - setMetricList(showMetrics); + updateRank([...supportMetrics]); + setMetricList(supportMetrics); setSelectedMetricNames(selectedMetrics); }); }; // 更新指标 - const updateMetricList = (metricsSet: { [name: string]: boolean }) => { + const updateMetricList = (metricDetailDTOList: { metric: string; rank: number; set: boolean }[]) => { return Utils.request(api.getDashboardMetricList(clusterId, MetricType.Cluster), { method: 'POST', data: { - metricsSet, + metricDetailDTOList, }, }); }; // 指标选中项更新回调 const indicatorChangeCallback = (newMetricNames: (string | number)[]) => { - const updateMetrics: { [name: string]: boolean } = {}; + const updateMetrics: { metric: string; set: boolean; rank: number }[] = []; // 需要选中的指标 - newMetricNames.forEach((name) => !selectedMetricNames.includes(name) && (updateMetrics[name] = true)); + newMetricNames.forEach( + (name) => + !selectedMetricNames.includes(name) && + updateMetrics.push({ metric: name as string, set: true, rank: metricList.find(({ name: metric }) => metric === name)?.rank }) + ); // 取消选中的指标 - selectedMetricNames.forEach((name) => !newMetricNames.includes(name) && (updateMetrics[name] = false)); - + selectedMetricNames.forEach( + (name) => + !newMetricNames.includes(name) && + updateMetrics.push({ metric: name as string, set: false, rank: metricList.find(({ name: metric }) => metric === name)?.rank }) + ); const requestPromise = Object.keys(updateMetrics).length ? updateMetricList(updateMetrics) : Promise.resolve(); requestPromise.then( () => getMetricList(), @@ -155,15 +168,16 @@ const DetailChart = (props: { children: JSX.Element }): JSX.Element => { return; } - const supplementaryInterval = (endTime - startTime > MAX_TIME_RANGE_WITH_SMALL_POINT_INTERVAL ? 10 : 1) * 60 * 1000; const formattedMetricData: MetricChartDataType[] = formatChartData( res, global.getMetricDefine || {}, MetricType.Cluster, - curHeaderOptions.rangeTime, - supplementaryInterval + curHeaderOptions.rangeTime ); formattedMetricData.forEach((data) => (data.metricLines[0].name = data.metricName)); + // 指标排序 + formattedMetricData.sort((a, b) => metricRankList.current.indexOf(a.metricName) - metricRankList.current.indexOf(b.metricName)); + setMetricDataList(formattedMetricData); setChartLoading(false); }, @@ -241,9 +255,7 @@ const DetailChart = (props: { children: JSX.Element }): JSX.Element => { ...info, value: 0, })); - const supplementaryInterval = - (curHeaderOptions.rangeTime[1] - curHeaderOptions.rangeTime[0] > MAX_TIME_RANGE_WITH_SMALL_POINT_INTERVAL ? 10 : 1) * 60 * 1000; - supplementaryPoints([line], curHeaderOptions.rangeTime, supplementaryInterval, (point) => { + supplementaryPoints([line], curHeaderOptions.rangeTime, (point) => { point.push(extraMetrics as any); return point; }); @@ -262,6 +274,22 @@ const DetailChart = (props: { children: JSX.Element }): JSX.Element => { targetNode && targetNode.addEventListener('click', () => busInstance.emit('chartResize')); }; + // 拖拽开始回调,触发图表的 onDrag 事件( 设置为 true ),禁止同步展示图表的 tooltip + const dragStart = () => { + busInstance.emit('onDrag', true); + }; + + // 拖拽结束回调,更新图表顺序,并触发图表的 onDrag 事件( 设置为 false ),允许同步展示图表的 tooltip + const dragEnd = ({ oldIndex, newIndex }: { oldIndex: number; newIndex: number }) => { + busInstance.emit('onDrag', false); + const originFrom = metricRankList.current.indexOf(metricDataList[oldIndex].metricName); + const originTarget = metricRankList.current.indexOf(metricDataList[newIndex].metricName); + const newList = arrayMoveImmutable(metricRankList.current, originFrom, originTarget); + metricRankList.current = newList; + updateMetricList(newList.map((metric, rank) => ({ metric, rank, set: metricList.find(({ name }) => metric === name)?.set || false }))); + setMetricDataList(arrayMoveImmutable(metricDataList, oldIndex, newIndex)); + }; + useEffect(() => { getMetricData(); }, [selectedMetricNames]); @@ -279,7 +307,7 @@ const DetailChart = (props: { children: JSX.Element }): JSX.Element => { }, []); return ( -
+
{ {messagesInMetricData.data && ( <> -
+
{ @@ -354,14 +382,25 @@ const DetailChart = (props: { children: JSX.Element }): JSX.Element => {
- - {metricDataList.length ? ( - metricDataList.map((data: any, i: number) => { - const { metricName, metricUnit, metricLines } = data; - return ( -
-
-
+ {metricDataList.length ? ( +
+ + {metricDataList.map((data: any, i: number) => { + const { metricName, metricUnit, metricLines } = data; + return ( +
+
{ @@ -379,15 +418,6 @@ const DetailChart = (props: { children: JSX.Element }): JSX.Element => {
-
{ - setChartDetail(data); - setShowChartDetailModal(true); - }} - > - -
{ })} />
- - ); - }) - ) : chartLoading ? ( - <> - ) : ( - - )} - + ); + })} +
+
+ ) : chartLoading ? ( + <> + ) : ( + + )}
@@ -421,35 +451,6 @@ const DetailChart = (props: { children: JSX.Element }): JSX.Element => {
{props.children}
- - {/* 图表详情 */} - setShowChartDetailModal(false)} - > -
-
setShowChartDetailModal(false)}> - -
- {chartDetail && ( - - )} -
-
); }; diff --git a/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/LeftSider.tsx b/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/LeftSider.tsx index 5a428b5b..bd5989d9 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/LeftSider.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/LeftSider.tsx @@ -1,4 +1,5 @@ -import { AppContainer, Divider, IconFont, Progress, Tooltip, Utils } from 'knowdesign'; +import { AppContainer, Divider, Progress, Tooltip, Utils } from 'knowdesign'; +import { IconFont } from '@knowdesign/icons'; import React, { useEffect, useState } from 'react'; import AccessClusters from '../MutliClusterPage/AccessCluster'; import './index.less'; diff --git a/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/config.tsx b/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/config.tsx index b2d9eb0c..6ab6e317 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/config.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/config.tsx @@ -2,7 +2,8 @@ import moment from 'moment'; import React from 'react'; import { timeFormat } from '../../constants/common'; import TagsWithHide from '../../components/TagsWithHide/index'; -import { Form, IconFont, InputNumber, Tooltip } from 'knowdesign'; +import { Form, InputNumber, Tooltip } from 'knowdesign'; +import { IconFont } from '@knowdesign/icons'; import { Link } from 'react-router-dom'; import { systemKey } from '../../constants/menu'; diff --git a/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/index.less b/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/index.less index 7baae7b8..3fa013de 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/index.less +++ b/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/index.less @@ -231,9 +231,10 @@ } .chart-panel { - flex: 1; + flex: auto; margin-left: 12px; margin-right: 10px; + overflow: hidden; } .change-log-panel { diff --git a/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/index.tsx index e4b4b2c6..59e679b3 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/SingleClusterDetail/index.tsx @@ -21,11 +21,9 @@ const SingleClusterDetail = (): JSX.Element => {
-
- - - -
+ + +
diff --git a/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/Consume.tsx b/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/Consume.tsx index d74d723f..be43d454 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/Consume.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/Consume.tsx @@ -1,6 +1,7 @@ /* eslint-disable no-case-declarations */ import { DownloadOutlined } from '@ant-design/icons'; -import { AppContainer, Divider, IconFont, message, Tooltip, Utils } from 'knowdesign'; +import { AppContainer, Divider, message, Tooltip, Utils } from 'knowdesign'; +import { IconFont } from '@knowdesign/icons'; import * as React from 'react'; import moment from 'moment'; import { timeFormat } from '../../constants/common'; diff --git a/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/config.tsx b/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/config.tsx index d27f8d4e..fc529e9d 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/config.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TestingConsumer/config.tsx @@ -268,6 +268,7 @@ export const getFormConfig = (topicMetaData: any, info = {} as any, partitionLis type: FormItemType.inputNumber, attrs: { min: 1, + max: 1000, }, invisible: !info?.needMsgNum, rules: [ diff --git a/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/component/EditTable.tsx b/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/component/EditTable.tsx index 49e2568d..a5300af2 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/component/EditTable.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/component/EditTable.tsx @@ -1,6 +1,7 @@ /* eslint-disable react/display-name */ import React, { useState } from 'react'; -import { Table, Input, InputNumber, Popconfirm, Form, Typography, Button, message, IconFont, Select } from 'knowdesign'; +import { Table, Input, InputNumber, Popconfirm, Form, Typography, Button, message, Select } from 'knowdesign'; +import { IconFont } from '@knowdesign/icons'; import './style/edit-table.less'; import { CheckOutlined, CloseOutlined, PlusSquareOutlined } from '@ant-design/icons'; diff --git a/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/config.tsx b/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/config.tsx index 81503271..3e34ad67 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/config.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TestingProduce/config.tsx @@ -1,5 +1,6 @@ import { QuestionCircleOutlined } from '@ant-design/icons'; -import { IconFont, Switch, Tooltip } from 'knowdesign'; +import { Switch, Tooltip } from 'knowdesign'; +import { IconFont } from '@knowdesign/icons'; import { FormItemType, IFormItem } from 'knowdesign/es/extend/x-form'; import moment from 'moment'; import React from 'react'; @@ -152,6 +153,7 @@ export const getFormConfig = (params: any) => { rules: [{ required: true, message: '请输入' }], attrs: { min: 0, + max: 1000, style: { width: 232 }, }, }, @@ -391,7 +393,7 @@ export const getTableColumns = () => { { title: 'time', dataIndex: 'costTimeUnitMs', - width: 60, + width: 100, }, ]; }; diff --git a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/BrokersDetail.tsx b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/BrokersDetail.tsx index 1c40cd66..9b3a12a0 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/BrokersDetail.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/BrokersDetail.tsx @@ -1,6 +1,7 @@ import React, { useCallback } from 'react'; import { useEffect, useState } from 'react'; -import { AppContainer, Button, Empty, IconFont, List, Popover, ProTable, Radio, Spin, Utils } from 'knowdesign'; +import { AppContainer, Button, Empty, List, Popover, ProTable, Radio, Spin, Utils } from 'knowdesign'; +import { IconFont } from '@knowdesign/icons'; import { CloseOutlined } from '@ant-design/icons'; import api, { MetricType } from '@src/api'; import { useParams } from 'react-router-dom'; diff --git a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/ConfigurationEdit.tsx b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/ConfigurationEdit.tsx index a2638384..7fc4ab80 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/ConfigurationEdit.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/ConfigurationEdit.tsx @@ -1,5 +1,6 @@ import React from 'react'; -import { Drawer, Form, Input, Space, Button, Utils, Row, Col, IconFont, Divider, message } from 'knowdesign'; +import { Drawer, Form, Input, Space, Button, Utils, Row, Col, Divider, message } from 'knowdesign'; +import { IconFont } from '@knowdesign/icons'; import { useParams } from 'react-router-dom'; import Api from '@src/api'; export const ConfigurationEdit = (props: any) => { diff --git a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/Messages.tsx b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/Messages.tsx index eecf792a..450cb59b 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/Messages.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/Messages.tsx @@ -1,5 +1,6 @@ import React, { useState, useEffect } from 'react'; -import { Alert, Button, Checkbox, Form, IconFont, Input, ProTable, Select, Tooltip, Utils } from 'knowdesign'; +import { Alert, Button, Checkbox, Form, Input, ProTable, Select, Tooltip, Utils } from 'knowdesign'; +import { IconFont } from '@knowdesign/icons'; import Api from '@src/api'; import { useParams, useHistory } from 'react-router-dom'; import { getTopicMessagesColmns } from './config'; @@ -10,7 +11,7 @@ const defaultParams: any = { maxRecords: 100, pullTimeoutUnitMs: 5000, // filterPartitionId: 1, - filterOffsetReset: 0 + filterOffsetReset: 0, }; const defaultpaPagination = { current: 1, @@ -32,8 +33,8 @@ const TopicMessages = (props: any) => { // 获取消息开始位置 const offsetResetList = [ - { 'label': 'latest', value: 0 }, - { 'label': 'earliest', value: 1 } + { label: 'latest', value: 0 }, + { label: 'earliest', value: 1 }, ]; // 默认排序 @@ -99,10 +100,10 @@ const TopicMessages = (props: any) => { const onTableChange = (pagination: any, filters: any, sorter: any, extra: any) => { setPagination(pagination); // 只有排序事件时,触发重新请求后端数据 - if(extra.action === 'sort') { + if (extra.action === 'sort') { setSorter({ sortField: sorter.field || '', - sortType: sorter.order ? sorter.order.substring(0, sorter.order.indexOf('end')) : '' + sortType: sorter.order ? sorter.order.substring(0, sorter.order.indexOf('end')) : '', }); } // const asc = sorter?.order && sorter?.order === 'ascend' ? true : false; @@ -137,11 +138,11 @@ const TopicMessages = (props: any) => { - - - */} - {/* - - */} - {/*
*/} + +
getTopicsList()}> + +
+
{ 展示系统Topic
-
+
p), - output: { - path: outPath, - publicPath: isProd ? process.env.PUBLIC_PATH + '/layout/' : '/', - filename: jsFileName, - chunkFilename: jsFileName, - library: 'layout', - libraryTarget: 'amd', - }, - devServer: { - host: 'localhost', - port: 8000, - hot: true, - open: true, - openPage: 'http://localhost:8000/', - inline: true, - historyApiFallback: true, - publicPath: `http://localhost:8000/`, - headers: { - 'cache-control': 'no-cache', - pragma: 'no-cache', - 'Access-Control-Allow-Origin': '*', - }, - proxy: { - '/ks-km/api/v3': { - changeOrigin: true, - target: 'http://localhost:8080/', - }, - '/logi-security/api/v1': { - changeOrigin: true, - target: 'http://localhost:8080/', - }, - }, - }, +const devMode = process.env.NODE_ENV === 'development'; +const commonConfig = require('./config/webpack.common'); +const devConfig = require('./config/webpack.dev'); +const prodConfig = require('./config/webpack.prod'); - resolve: { - alias: { - '@src': path.resolve(__dirname, 'src'), - }, - }, -}); +module.exports = merge(commonConfig, devMode ? devConfig : prodConfig); diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/AbstractZKWatcher.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/AbstractZKWatcher.java index 261aff0a..e43f1b40 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/AbstractZKWatcher.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/AbstractZKWatcher.java @@ -5,7 +5,7 @@ import com.didiglobal.logi.log.LogFactory; import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy; import com.xiaojukeji.know.streaming.km.common.exception.NotExistException; import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient; -import com.xiaojukeji.know.streaming.km.persistence.zk.KafkaZKDAO; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO; import kafka.zk.KafkaZkClient; import org.springframework.beans.factory.annotation.Autowired; diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/AbstractZKHandler.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/AbstractZKHandler.java index e2ed09d1..04a14e87 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/AbstractZKHandler.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/AbstractZKHandler.java @@ -7,7 +7,7 @@ import com.xiaojukeji.know.streaming.km.common.utils.BackoffUtils; import com.xiaojukeji.know.streaming.km.core.service.change.record.KafkaChangeRecordService; import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache; import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient; -import com.xiaojukeji.know.streaming.km.persistence.zk.KafkaZKDAO; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO; public abstract class AbstractZKHandler { diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/BrokersNodeChangeHandler.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/BrokersNodeChangeHandler.java index b7c93c2f..314195af 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/BrokersNodeChangeHandler.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/BrokersNodeChangeHandler.java @@ -9,7 +9,7 @@ import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum import com.xiaojukeji.know.streaming.km.common.utils.FutureUtil; import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService; import com.xiaojukeji.know.streaming.km.core.service.change.record.KafkaChangeRecordService; -import com.xiaojukeji.know.streaming.km.persistence.zk.KafkaZKDAO; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO; import kafka.zk.BrokerIdsZNode; import kafka.zookeeper.ZNodeChildChangeHandler; diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/ConfigNotificationNodeChangeHandler.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/ConfigNotificationNodeChangeHandler.java index 91d91571..1e626632 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/ConfigNotificationNodeChangeHandler.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/ConfigNotificationNodeChangeHandler.java @@ -8,11 +8,11 @@ import com.xiaojukeji.know.streaming.km.common.enums.KafkaConfigTypeEnum; import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum; import com.xiaojukeji.know.streaming.km.common.utils.FutureUtil; import com.xiaojukeji.know.streaming.km.common.utils.Tuple; -import com.xiaojukeji.know.streaming.km.common.zookeeper.znode.config.ConfigChangeNotificationBaseData; -import com.xiaojukeji.know.streaming.km.common.zookeeper.znode.config.ConfigChangeNotificationDataV1; -import com.xiaojukeji.know.streaming.km.common.zookeeper.znode.config.ConfigChangeNotificationDataV2; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.config.ConfigChangeNotificationBaseData; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.config.ConfigChangeNotificationDataV1; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.config.ConfigChangeNotificationDataV2; import com.xiaojukeji.know.streaming.km.core.service.change.record.KafkaChangeRecordService; -import com.xiaojukeji.know.streaming.km.persistence.zk.KafkaZKDAO; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO; import kafka.zk.ConfigEntityChangeNotificationZNode; import kafka.zookeeper.ZNodeChildChangeHandler; import org.apache.zookeeper.data.Stat; diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/ControllerNodeChangeHandler.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/ControllerNodeChangeHandler.java index 904b7d72..b671c4a3 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/ControllerNodeChangeHandler.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/ControllerNodeChangeHandler.java @@ -11,7 +11,7 @@ import com.xiaojukeji.know.streaming.km.common.utils.BackoffUtils; import com.xiaojukeji.know.streaming.km.common.utils.FutureUtil; import com.xiaojukeji.know.streaming.km.core.service.change.record.KafkaChangeRecordService; import com.xiaojukeji.know.streaming.km.core.service.kafkacontroller.KafkaControllerService; -import com.xiaojukeji.know.streaming.km.persistence.zk.KafkaZKDAO; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO; import kafka.zk.ControllerZNode; import kafka.zookeeper.ZNodeChangeHandler; diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/TopicsNodeChangeHandler.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/TopicsNodeChangeHandler.java index 31602632..88c01281 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/TopicsNodeChangeHandler.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/flusher/zk/handler/TopicsNodeChangeHandler.java @@ -9,7 +9,7 @@ import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum import com.xiaojukeji.know.streaming.km.common.utils.FutureUtil; import com.xiaojukeji.know.streaming.km.core.service.change.record.KafkaChangeRecordService; import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService; -import com.xiaojukeji.know.streaming.km.persistence.zk.KafkaZKDAO; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO; import kafka.zk.TopicsZNode; import kafka.zookeeper.ZNodeChildChangeHandler; diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/impl/BrokerServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/impl/BrokerServiceImpl.java index dc702388..fbede23c 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/impl/BrokerServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/impl/BrokerServiceImpl.java @@ -24,7 +24,6 @@ import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistExcept import com.xiaojukeji.know.streaming.km.common.jmx.JmxConnectorWrap; import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; -import com.xiaojukeji.know.streaming.km.common.zookeeper.znode.brokers.BrokerMetadata; import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService; import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService; import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService; @@ -32,8 +31,7 @@ import com.xiaojukeji.know.streaming.km.persistence.jmx.JmxDAO; import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient; import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaJMXClient; import com.xiaojukeji.know.streaming.km.persistence.mysql.broker.BrokerDAO; -import com.xiaojukeji.know.streaming.km.persistence.zk.KafkaZKDAO; -import kafka.zk.BrokerIdZNode; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO; import kafka.zk.BrokerIdsZNode; import org.apache.kafka.clients.admin.*; import org.apache.kafka.common.Node; @@ -310,9 +308,7 @@ public class BrokerServiceImpl extends BaseVersionControlService implements Brok List brokerIdList = kafkaZKDAO.getChildren(clusterPhy.getId(), BrokerIdsZNode.path(), false); for (String brokerId: brokerIdList) { - BrokerMetadata metadata = kafkaZKDAO.getData(clusterPhy.getId(), BrokerIdZNode.path(Integer.valueOf(brokerId)), BrokerMetadata.class); - BrokerMetadata.parseAndUpdateBrokerMetadata(metadata); - brokerList.add(Broker.buildFrom(clusterPhy.getId(), Integer.valueOf(brokerId), metadata)); + brokerList.add(kafkaZKDAO.getBrokerMetadata(clusterPhy.getId(), Integer.valueOf(brokerId))); } return Result.buildSuc(brokerList); diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterMetricServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterMetricServiceImpl.java index 9fdd9ec0..3d004f78 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterMetricServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterMetricServiceImpl.java @@ -85,7 +85,7 @@ public class ClusterMetricServiceImpl extends BaseMetricService implements Clust public static final String CLUSTER_METHOD_GET_TOTAL_LOG_SIZE = "getTotalLogSize"; public static final String CLUSTER_METHOD_GET_PARTITION_SIZE = "getPartitionSize"; public static final String CLUSTER_METHOD_GET_PARTITION_NO_LEADER_SIZE = "getPartitionNoLeaderSize"; - public static final String CLUSTER_METHOD_GET_HEALTH_SCORE = "getTopicHealthScore"; + public static final String CLUSTER_METHOD_GET_HEALTH_SCORE = "getClusterHealthScore"; public static final String CLUSTER_METHOD_GET_METRIC_FROM_KAFKA_BY_TOTAL_BROKERS_JMX = "getMetricFromKafkaByTotalBrokersJMX"; public static final String CLUSTER_METHOD_GET_METRIC_FROM_KAFKA_BY_CONTROLLER_JMX = "getMetricFromKafkaByControllerJMX"; public static final String CLUSTER_METHOD_GET_ZK_COUNT = "getZKCount"; @@ -188,7 +188,7 @@ public class ClusterMetricServiceImpl extends BaseMetricService implements Clust registerVCHandler( CLUSTER_METHOD_GET_PARTITION_SIZE, this::getPartitionSize); registerVCHandler( CLUSTER_METHOD_GET_PARTITION_NO_LEADER_SIZE, this::getPartitionNoLeaderSize); - registerVCHandler( CLUSTER_METHOD_GET_HEALTH_SCORE, this::getTopicHealthScore); + registerVCHandler( CLUSTER_METHOD_GET_HEALTH_SCORE, this::getClusterHealthScore); registerVCHandler( CLUSTER_METHOD_GET_METRIC_FROM_KAFKA_BY_TOTAL_BROKERS_JMX, this::getMetricFromKafkaByTotalBrokersJMX); registerVCHandler( CLUSTER_METHOD_GET_METRIC_FROM_KAFKA_BY_CONTROLLER_JMX, this::getMetricFromKafkaByControllerJMX); @@ -364,7 +364,7 @@ public class ClusterMetricServiceImpl extends BaseMetricService implements Clust /** * 获取集群的健康分 */ - private Result getTopicHealthScore(VersionItemParam metricParam){ + private Result getClusterHealthScore(VersionItemParam metricParam){ ClusterMetricParam param = (ClusterMetricParam)metricParam; ClusterMetrics clusterMetrics = healthScoreService.calClusterHealthScore(param.getClusterId()); return Result.buildSuc(clusterMetrics); diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterValidateServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterValidateServiceImpl.java index 6dcd858e..ba72d2fe 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterValidateServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterValidateServiceImpl.java @@ -13,8 +13,8 @@ import com.xiaojukeji.know.streaming.km.common.enums.valid.ValidateKafkaAddressE import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterValidateService; import com.xiaojukeji.know.streaming.km.persistence.jmx.JmxDAO; -import com.xiaojukeji.know.streaming.km.persistence.zk.KafkaZKDAO; -import com.xiaojukeji.know.streaming.km.persistence.zk.impl.KafkaZKDAOImpl; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.impl.KafkaZKDAOImpl; import kafka.server.KafkaConfig; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.admin.*; diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/GroupService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/GroupService.java index ce6d13a5..790a7c47 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/GroupService.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/GroupService.java @@ -30,10 +30,14 @@ public interface GroupService { int replaceDBData(GroupMemberPO groupMemberPO); + void batchReplace(List newGroupMemberList); + GroupStateEnum getGroupStateFromDB(Long clusterPhyId, String groupName); List listGroupByTopic(Long clusterPhyId, String topicName); + List listGroup(Long clusterPhyId); + PaginationResult pagingGroupMembers(Long clusterPhyId, String topicName, String groupName, diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupMetricServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupMetricServiceImpl.java index ea324888..427edc2c 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupMetricServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupMetricServiceImpl.java @@ -90,23 +90,31 @@ public class GroupMetricServiceImpl extends BaseMetricService implements GroupMe @Override public Result> collectGroupMetricsFromKafka(Long clusterId, String groupName, List metrics) { - List allGroupMetrics = new ArrayList<>(); - Map topicPartitionGroupMap = new HashMap<>(); + List allGroupMetrics = new ArrayList<>(); + Map topicPartitionGroupMap = new HashMap<>(); GroupMetrics groupMetrics = new GroupMetrics(clusterId, groupName, true); - for(String metric : metrics){ - if(null != groupMetrics.getMetrics().get(metric)){continue;} + Set existMetricSet = new HashSet<>(); + for (String metric : metrics) { + if (existMetricSet.contains(metric)) { + continue; + } Result> ret = collectGroupMetricsFromKafka(clusterId, groupName, metric); - if(null != ret && ret.successful()){ + if (null != ret && ret.successful()) { List groupMetricsList = ret.getData(); - for(GroupMetrics gm : groupMetricsList){ - if(gm.isBGroupMetric()){ + + for (GroupMetrics gm : groupMetricsList) { + + //记录已存在的指标 + existMetricSet.addAll(gm.getMetrics().keySet()); + + if (gm.isBGroupMetric()) { groupMetrics.getMetrics().putAll(gm.getMetrics()); - }else { + } else { GroupMetrics topicGroupMetric = topicPartitionGroupMap.getOrDefault( gm.getTopic() + gm.getPartitionId(), - new GroupMetrics(clusterId, groupName, false)); + new GroupMetrics(clusterId, gm.getPartitionId(), gm.getTopic(), groupName, false)); topicGroupMetric.getMetrics().putAll(gm.getMetrics()); topicPartitionGroupMap.put(gm.getTopic() + gm.getPartitionId(), topicGroupMetric); diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupServiceImpl.java index b029330c..4cf29d2a 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupServiceImpl.java @@ -32,6 +32,7 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import java.util.*; +import java.util.function.Function; import java.util.stream.Collectors; import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.SERVICE_SEARCH_GROUP; @@ -120,6 +121,38 @@ public class GroupServiceImpl extends BaseVersionControlService implements Group return groupMemberDAO.replace(groupMemberPO); } + @Override + public void batchReplace(List newGroupMemberList) { + if (newGroupMemberList == null || newGroupMemberList.isEmpty()) { + return; + } + + Long clusterPhyId = newGroupMemberList.get(0).getClusterPhyId(); + if (clusterPhyId == null) { + return; + } + + List dbGroupMemberList = listGroup(clusterPhyId); + + + Map dbGroupMemberMap = dbGroupMemberList.stream().collect(Collectors.toMap(elem -> elem.getGroupName() + elem.getTopicName(), Function.identity())); + for (GroupMemberPO groupMemberPO : newGroupMemberList) { + GroupMemberPO po = dbGroupMemberMap.remove(groupMemberPO.getGroupName() + groupMemberPO.getTopicName()); + try { + if (po != null) { + groupMemberPO.setId(po.getId()); + groupMemberDAO.updateById(groupMemberPO); + } else { + groupMemberDAO.insert(groupMemberPO); + } + } catch (Exception e) { + log.error("method=batchReplace||clusterPhyId={}||groupName={}||errMsg=exception", clusterPhyId, groupMemberPO.getGroupName(), e); + } + + } + + } + @Override public GroupStateEnum getGroupStateFromDB(Long clusterPhyId, String groupName) { LambdaQueryWrapper lambdaQueryWrapper = new LambdaQueryWrapper<>(); @@ -143,6 +176,14 @@ public class GroupServiceImpl extends BaseVersionControlService implements Group return groupMemberDAO.selectList(lambdaQueryWrapper); } + @Override + public List listGroup(Long clusterPhyId) { + LambdaQueryWrapper lambdaQueryWrapper = new LambdaQueryWrapper<>(); + lambdaQueryWrapper.eq(GroupMemberPO::getClusterPhyId, clusterPhyId); + + return groupMemberDAO.selectList(lambdaQueryWrapper); + } + @Override public PaginationResult pagingGroupMembers(Long clusterPhyId, String topicName, diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/health/score/HealthScoreService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/health/score/HealthScoreService.java index 5997edec..48f8933b 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/health/score/HealthScoreService.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/health/score/HealthScoreService.java @@ -15,35 +15,16 @@ public interface HealthScoreService { * @param clusterPhyId 集群ID * @return */ + @Deprecated ClusterMetrics calClusterHealthScore(Long clusterPhyId); - /** - * 获取集群Topics健康分指标 - * @param clusterPhyId 集群ID - * @return - */ - ClusterMetrics calClusterTopicsHealthScore(Long clusterPhyId); - - /** - * 获取集群Brokers健康分指标 - * @param clusterPhyId 集群ID - * @return - */ - ClusterMetrics calClusterBrokersHealthScore(Long clusterPhyId); - - /** - * 获取集群Groups健康分指标 - * @param clusterPhyId 集群ID - * @return - */ - ClusterMetrics calClusterGroupsHealthScore(Long clusterPhyId); - /** * 获取集群健康分指标 * @param clusterPhyId 集群ID * @param topicName Topic名称 * @return */ + @Deprecated TopicMetrics calTopicHealthScore(Long clusterPhyId, String topicName); /** @@ -52,6 +33,7 @@ public interface HealthScoreService { * @param brokerId brokerId * @return */ + @Deprecated BrokerMetrics calBrokerHealthScore(Long clusterPhyId, Integer brokerId); /** @@ -60,6 +42,7 @@ public interface HealthScoreService { * @param groupName group名称 * @return */ + @Deprecated GroupMetrics calGroupHealthScore(Long clusterPhyId, String groupName); /** diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/health/score/impl/HealthScoreServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/health/score/impl/HealthScoreServiceImpl.java index c443c3bb..6ba01bb9 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/health/score/impl/HealthScoreServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/health/score/impl/HealthScoreServiceImpl.java @@ -136,60 +136,6 @@ public class HealthScoreServiceImpl implements HealthScoreService { return metrics; } - @Override - public ClusterMetrics calClusterTopicsHealthScore(Long clusterPhyId) { - List healthScoreResultList = this.getDimensionHealthScoreResult(clusterPhyId, HealthCheckDimensionEnum.TOPIC); - - ClusterMetrics metrics = new ClusterMetrics(clusterPhyId); - if (ValidateUtils.isEmptyList(healthScoreResultList)) { - metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_SCORE_TOPICS, Constant.MIN_HEALTH_SCORE); - metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_PASSED_TOPICS, 0.0f); - metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_TOPICS, 0.0f); - } else { - metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_SCORE_TOPICS, Math.max(this.getDimensionHealthScore(healthScoreResultList), Constant.MIN_HEALTH_SCORE)); - metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_PASSED_TOPICS, getHealthCheckPassed(healthScoreResultList)); - metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_TOPICS, Float.valueOf(healthScoreResultList.size())); - } - - return metrics; - } - - @Override - public ClusterMetrics calClusterBrokersHealthScore(Long clusterPhyId) { - List healthScoreResultList = this.getDimensionHealthScoreResult(clusterPhyId, HealthCheckDimensionEnum.BROKER); - - ClusterMetrics metrics = new ClusterMetrics(clusterPhyId); - if (ValidateUtils.isEmptyList(healthScoreResultList)) { - metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_SCORE_BROKERS, Constant.MIN_HEALTH_SCORE); - metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_PASSED_BROKERS, 0.0f); - metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_BROKERS, 0.0f); - } else { - metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_SCORE_BROKERS, Math.max(this.getDimensionHealthScore(healthScoreResultList), Constant.MIN_HEALTH_SCORE)); - metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_PASSED_BROKERS, getHealthCheckPassed(healthScoreResultList)); - metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_BROKERS, Float.valueOf(healthScoreResultList.size())); - } - - return metrics; - } - - @Override - public ClusterMetrics calClusterGroupsHealthScore(Long clusterPhyId) { - List healthScoreResultList = this.getDimensionHealthScoreResult(clusterPhyId, HealthCheckDimensionEnum.GROUP); - - ClusterMetrics metrics = new ClusterMetrics(clusterPhyId); - if (ValidateUtils.isEmptyList(healthScoreResultList)) { - metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_SCORE_GROUPS, Constant.MIN_HEALTH_SCORE); - metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_PASSED_GROUPS, 0.0f); - metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_GROUPS, 0.0f); - } else { - metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_SCORE_GROUPS, Math.max(this.getDimensionHealthScore(healthScoreResultList), Constant.MIN_HEALTH_SCORE)); - metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_PASSED_GROUPS, this.getHealthCheckPassed(healthScoreResultList)); - metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_GROUPS, Float.valueOf(healthScoreResultList.size())); - } - - return metrics; - } - @Override public TopicMetrics calTopicHealthScore(Long clusterPhyId, String topicName) { List healthScoreResultList = this.getResHealthScoreResult(clusterPhyId, HealthCheckDimensionEnum.TOPIC.getDimension(), topicName); diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/kafkacontroller/impl/KafkaControllerServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/kafkacontroller/impl/KafkaControllerServiceImpl.java index 1fb3f488..8048eabe 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/kafkacontroller/impl/KafkaControllerServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/kafkacontroller/impl/KafkaControllerServiceImpl.java @@ -19,7 +19,7 @@ import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService; import com.xiaojukeji.know.streaming.km.core.service.kafkacontroller.KafkaControllerService; import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient; import com.xiaojukeji.know.streaming.km.persistence.mysql.kafkacontroller.KafkaControllerDAO; -import com.xiaojukeji.know.streaming.km.persistence.zk.KafkaZKDAO; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO; import org.apache.kafka.clients.admin.*; import org.apache.kafka.common.Node; import org.springframework.beans.factory.annotation.Autowired; diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/PartitionServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/PartitionServiceImpl.java index 13eedb41..1795e4d4 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/PartitionServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/PartitionServiceImpl.java @@ -21,14 +21,14 @@ import com.xiaojukeji.know.streaming.km.common.exception.NotExistException; import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException; import com.xiaojukeji.know.streaming.km.common.utils.CommonUtils; import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; -import com.xiaojukeji.know.streaming.km.common.zookeeper.znode.brokers.PartitionMap; -import com.xiaojukeji.know.streaming.km.common.zookeeper.znode.brokers.PartitionState; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.brokers.PartitionMap; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.brokers.PartitionState; import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService; import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService; import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient; import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaConsumerClient; import com.xiaojukeji.know.streaming.km.persistence.mysql.partition.PartitionDAO; -import com.xiaojukeji.know.streaming.km.persistence.zk.KafkaZKDAO; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO; import kafka.zk.TopicPartitionStateZNode; import kafka.zk.TopicPartitionsZNode; import kafka.zk.TopicZNode; @@ -202,10 +202,22 @@ public class PartitionServiceImpl extends BaseVersionControlService implements P @Override public Result> getPartitionOffsetFromKafka(Long clusterPhyId, String topicName, OffsetSpec offsetSpec, Long timestamp) { Map topicPartitionOffsets = new HashMap<>(); - this.listPartitionByTopic(clusterPhyId, topicName) - .stream() + + List partitionList = this.listPartitionByTopic(clusterPhyId, topicName); + if (partitionList == null || partitionList.isEmpty()) { + // Topic不存在 + return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getTopicNotExist(clusterPhyId, topicName)); + } + + partitionList.stream() + .filter(item -> !item.getLeaderBrokerId().equals(KafkaConstant.NO_LEADER)) .forEach(elem -> topicPartitionOffsets.put(new TopicPartition(topicName, elem.getPartitionId()), offsetSpec)); + if (topicPartitionOffsets.isEmpty()) { + // 所有分区no-leader + return Result.buildFromRSAndMsg(ResultStatus.OPERATION_FAILED, MsgConstant.getPartitionNoLeader(clusterPhyId, topicName)); + } + try { return (Result>) doVCHandler(clusterPhyId, PARTITION_OFFSET_GET, new PartitionOffsetParam(clusterPhyId, topicName, topicPartitionOffsets, timestamp)); } catch (VCHandlerNotExistException e) { diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/OpTopicServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/OpTopicServiceImpl.java index 7f289c88..7cd017f4 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/OpTopicServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/OpTopicServiceImpl.java @@ -23,7 +23,7 @@ import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService; import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService; import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient; import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient; -import com.xiaojukeji.know.streaming.km.persistence.zk.KafkaZKDAO; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO; import kafka.controller.ReplicaAssignment; import kafka.server.ConfigType; import kafka.zk.AdminZkClient; diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/TopicConfigServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/TopicConfigServiceImpl.java index 9aaadee5..09be0d43 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/TopicConfigServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/TopicConfigServiceImpl.java @@ -30,7 +30,7 @@ import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService; import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService; import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient; import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient; -import com.xiaojukeji.know.streaming.km.persistence.zk.KafkaZKDAO; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO; import kafka.server.ConfigType; import kafka.zk.AdminZkClient; import kafka.zk.KafkaZkClient; diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/TopicServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/TopicServiceImpl.java index bffabec8..e2870d9d 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/TopicServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/TopicServiceImpl.java @@ -23,7 +23,7 @@ import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService; import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient; import com.xiaojukeji.know.streaming.km.persistence.mysql.topic.TopicDAO; -import com.xiaojukeji.know.streaming.km.persistence.zk.KafkaZKDAO; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO; import kafka.zk.TopicsZNode; import org.apache.kafka.clients.admin.*; import org.apache.kafka.common.TopicPartitionInfo; diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/metrics/ClusterMetricVersionItems.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/metrics/ClusterMetricVersionItems.java index 53b98479..00a5e0cd 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/metrics/ClusterMetricVersionItems.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/metrics/ClusterMetricVersionItems.java @@ -20,22 +20,35 @@ import static com.xiaojukeji.know.streaming.km.core.service.cluster.impl.Cluster @Component public class ClusterMetricVersionItems extends BaseMetricVersionMetric { + /** + * 健康分 + */ public static final String CLUSTER_METRIC_HEALTH_SCORE = "HealthScore"; + public static final String CLUSTER_METRIC_HEALTH_SCORE_TOPICS = "HealthScore_Topics"; + public static final String CLUSTER_METRIC_HEALTH_SCORE_BROKERS = "HealthScore_Brokers"; + public static final String CLUSTER_METRIC_HEALTH_SCORE_GROUPS = "HealthScore_Groups"; + public static final String CLUSTER_METRIC_HEALTH_SCORE_CLUSTER = "HealthScore_Cluster"; + + /** + * 健康巡检 + */ public static final String CLUSTER_METRIC_HEALTH_CHECK_PASSED = "HealthCheckPassed"; public static final String CLUSTER_METRIC_HEALTH_CHECK_TOTAL = "HealthCheckTotal"; - public static final String CLUSTER_METRIC_HEALTH_SCORE_TOPICS = "HealthScore_Topics"; + public static final String CLUSTER_METRIC_HEALTH_CHECK_PASSED_TOPICS = "HealthCheckPassed_Topics"; public static final String CLUSTER_METRIC_HEALTH_CHECK_TOTAL_TOPICS = "HealthCheckTotal_Topics"; - public static final String CLUSTER_METRIC_HEALTH_SCORE_BROKERS = "HealthScore_Brokers"; + public static final String CLUSTER_METRIC_HEALTH_CHECK_PASSED_BROKERS = "HealthCheckPassed_Brokers"; public static final String CLUSTER_METRIC_HEALTH_CHECK_TOTAL_BROKERS = "HealthCheckTotal_Brokers"; - public static final String CLUSTER_METRIC_HEALTH_SCORE_GROUPS = "HealthScore_Groups"; + public static final String CLUSTER_METRIC_HEALTH_CHECK_PASSED_GROUPS = "HealthCheckPassed_Groups"; public static final String CLUSTER_METRIC_HEALTH_CHECK_TOTAL_GROUPS = "HealthCheckTotal_Groups"; - public static final String CLUSTER_METRIC_HEALTH_SCORE_CLUSTER = "HealthScore_Cluster"; + public static final String CLUSTER_METRIC_HEALTH_CHECK_PASSED_CLUSTER = "HealthCheckPassed_Cluster"; public static final String CLUSTER_METRIC_HEALTH_CHECK_TOTAL_CLUSTER = "HealthCheckTotal_Cluster"; + + public static final String CLUSTER_METRIC_TOTAL_REQ_QUEUE_SIZE = "TotalRequestQueueSize"; public static final String CLUSTER_METRIC_TOTAL_RES_QUEUE_SIZE = "TotalResponseQueueSize"; public static final String CLUSTER_METRIC_EVENT_QUEUE_SIZE = "EventQueueSize"; diff --git a/km-dist/docker/docker-compose.yml b/km-dist/docker/docker-compose.yml new file mode 100644 index 00000000..219ceb5c --- /dev/null +++ b/km-dist/docker/docker-compose.yml @@ -0,0 +1,102 @@ +version: "2" +services: + # *不要调整knowstreaming-manager服务名称,ui中会用到 + knowstreaming-manager: + image: knowstreaming/knowstreaming-manager:latest + container_name: knowstreaming-manager + privileged: true + restart: always + depends_on: + - elasticsearch-single + - knowstreaming-mysql + expose: + - 80 + command: + - /bin/sh + - /ks-start.sh + environment: + TZ: Asia/Shanghai + # mysql服务地址 + SERVER_MYSQL_ADDRESS: knowstreaming-mysql:3306 + # mysql数据库名 + SERVER_MYSQL_DB: know_streaming + # mysql用户名 + SERVER_MYSQL_USER: root + # mysql用户密码 + SERVER_MYSQL_PASSWORD: admin2022_ + # es服务地址 + SERVER_ES_ADDRESS: elasticsearch-single:9200 + # 服务JVM参数 + JAVA_OPTS: -Xmx1g -Xms1g + # 对于kafka中ADVERTISED_LISTENERS填写的hostname可以通过该方式完成 +# extra_hosts: +# - "hostname:x.x.x.x" + # 服务日志路径 +# volumes: +# - /ks/manage/log:/logs + knowstreaming-ui: + image: knowstreaming/knowstreaming-ui:latest + container_name: knowstreaming-ui + restart: always + ports: + - '80:80' + environment: + TZ: Asia/Shanghai + depends_on: + - knowstreaming-manager +# extra_hosts: +# - "hostname:x.x.x.x" + elasticsearch-single: + image: docker.io/library/elasticsearch:7.6.2 + container_name: elasticsearch-single + restart: always + expose: + - 9200 + - 9300 +# ports: +# - '9200:9200' +# - '9300:9300' + environment: + TZ: Asia/Shanghai + # es的JVM参数 + ES_JAVA_OPTS: -Xms512m -Xmx512m + # 单节点配置,多节点集群参考 https://www.elastic.co/guide/en/elasticsearch/reference/7.6/docker.html#docker-compose-file + discovery.type: single-node + # 数据持久化路径 +# volumes: +# - /ks/es/data:/usr/share/elasticsearch/data + + # es初始化服务,与manager使用同一镜像 + # 首次启动es需初始化模版和索引,后续会自动创建 + knowstreaming-init: + image: knowstreaming/knowstreaming-manager:latest + container_name: knowstreaming-init + depends_on: + - elasticsearch-single + command: + - /bin/bash + - /es_template_create.sh + environment: + TZ: Asia/Shanghai + # es服务地址 + SERVER_ES_ADDRESS: elasticsearch-single:9200 + + knowstreaming-mysql: + image: knowstreaming/knowstreaming-mysql:latest + container_name: knowstreaming-mysql + restart: always + environment: + TZ: Asia/Shanghai + # root 用户密码 + MYSQL_ROOT_PASSWORD: admin2022_ + # 初始化时创建的数据库名称 + MYSQL_DATABASE: know_streaming + # 通配所有host,可以访问远程 + MYSQL_ROOT_HOST: '%' + expose: + - 3306 +# ports: +# - '3306:3306' + # 数据持久化路径 +# volumes: +# - /ks/mysql/data:/data/mysql diff --git a/km-dist/docker/manager/dockerfile b/km-dist/docker/manager/dockerfile new file mode 100644 index 00000000..4d850f6e --- /dev/null +++ b/km-dist/docker/manager/dockerfile @@ -0,0 +1,17 @@ +FROM java:8 + +#维护者 +MAINTAINER wangdongfang + +# 编译好的knowstreaming包 +ADD km-rest.jar / +# docker启动脚本 helm安装不使用 +ADD ks-start.sh / +# es初始化脚本 helm安装不使用 +ADD es_template_create.sh / + +RUN mkdir /conf +# { 配置文件 helm安装不使用 +ADD application.yml /conf/ +ADD logback-spring.xml /conf/ +# } diff --git a/km-dist/docker/manager/es_template_create.sh b/km-dist/docker/manager/es_template_create.sh new file mode 100644 index 00000000..615bf54d --- /dev/null +++ b/km-dist/docker/manager/es_template_create.sh @@ -0,0 +1,661 @@ +echo "Wait ElasticSearch Start...${SERVER_ES_ADDRESS}" +while true +do + curl -s --connect-timeout 10 -o /dev/null http://${SERVER_ES_ADDRESS}/_cat/nodes > /dev/null 2>&1 + if [ "$?" != "0" ];then + sleep 1s + else + echo "ElasticSearch Start Initialize" + break + fi +done + +curl -s --connect-timeout 10 -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${SERVER_ES_ADDRESS}/_template/ks_kafka_broker_metric -d '{ + "order" : 10, + "index_patterns" : [ + "ks_kafka_broker_metric*" + ], + "settings" : { + "index" : { + "number_of_shards" : "10" + } + }, + "mappings" : { + "properties" : { + "brokerId" : { + "type" : "long" + }, + "routingValue" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "clusterPhyId" : { + "type" : "long" + }, + "metrics" : { + "properties" : { + "NetworkProcessorAvgIdle" : { + "type" : "float" + }, + "UnderReplicatedPartitions" : { + "type" : "float" + }, + "BytesIn_min_15" : { + "type" : "float" + }, + "HealthCheckTotal" : { + "type" : "float" + }, + "RequestHandlerAvgIdle" : { + "type" : "float" + }, + "connectionsCount" : { + "type" : "float" + }, + "BytesIn_min_5" : { + "type" : "float" + }, + "HealthScore" : { + "type" : "float" + }, + "BytesOut" : { + "type" : "float" + }, + "BytesOut_min_15" : { + "type" : "float" + }, + "BytesIn" : { + "type" : "float" + }, + "BytesOut_min_5" : { + "type" : "float" + }, + "TotalRequestQueueSize" : { + "type" : "float" + }, + "MessagesIn" : { + "type" : "float" + }, + "TotalProduceRequests" : { + "type" : "float" + }, + "HealthCheckPassed" : { + "type" : "float" + }, + "TotalResponseQueueSize" : { + "type" : "float" + } + } + }, + "key" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "timestamp" : { + "format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis", + "index" : true, + "type" : "date", + "doc_values" : true + } + } + }, + "aliases" : { } + }' + +curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${SERVER_ES_ADDRESS}/_template/ks_kafka_cluster_metric -d '{ + "order" : 10, + "index_patterns" : [ + "ks_kafka_cluster_metric*" + ], + "settings" : { + "index" : { + "number_of_shards" : "10" + } + }, + "mappings" : { + "properties" : { + "routingValue" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "clusterPhyId" : { + "type" : "long" + }, + "metrics" : { + "properties" : { + "Connections" : { + "type" : "double" + }, + "BytesIn_min_15" : { + "type" : "double" + }, + "PartitionURP" : { + "type" : "double" + }, + "HealthScore_Topics" : { + "type" : "double" + }, + "EventQueueSize" : { + "type" : "double" + }, + "ActiveControllerCount" : { + "type" : "double" + }, + "GroupDeads" : { + "type" : "double" + }, + "BytesIn_min_5" : { + "type" : "double" + }, + "HealthCheckTotal_Topics" : { + "type" : "double" + }, + "Partitions" : { + "type" : "double" + }, + "BytesOut" : { + "type" : "double" + }, + "Groups" : { + "type" : "double" + }, + "BytesOut_min_15" : { + "type" : "double" + }, + "TotalRequestQueueSize" : { + "type" : "double" + }, + "HealthCheckPassed_Groups" : { + "type" : "double" + }, + "TotalProduceRequests" : { + "type" : "double" + }, + "HealthCheckPassed" : { + "type" : "double" + }, + "TotalLogSize" : { + "type" : "double" + }, + "GroupEmptys" : { + "type" : "double" + }, + "PartitionNoLeader" : { + "type" : "double" + }, + "HealthScore_Brokers" : { + "type" : "double" + }, + "Messages" : { + "type" : "double" + }, + "Topics" : { + "type" : "double" + }, + "PartitionMinISR_E" : { + "type" : "double" + }, + "HealthCheckTotal" : { + "type" : "double" + }, + "Brokers" : { + "type" : "double" + }, + "Replicas" : { + "type" : "double" + }, + "HealthCheckTotal_Groups" : { + "type" : "double" + }, + "GroupRebalances" : { + "type" : "double" + }, + "MessageIn" : { + "type" : "double" + }, + "HealthScore" : { + "type" : "double" + }, + "HealthCheckPassed_Topics" : { + "type" : "double" + }, + "HealthCheckTotal_Brokers" : { + "type" : "double" + }, + "PartitionMinISR_S" : { + "type" : "double" + }, + "BytesIn" : { + "type" : "double" + }, + "BytesOut_min_5" : { + "type" : "double" + }, + "GroupActives" : { + "type" : "double" + }, + "MessagesIn" : { + "type" : "double" + }, + "GroupReBalances" : { + "type" : "double" + }, + "HealthCheckPassed_Brokers" : { + "type" : "double" + }, + "HealthScore_Groups" : { + "type" : "double" + }, + "TotalResponseQueueSize" : { + "type" : "double" + }, + "Zookeepers" : { + "type" : "double" + }, + "LeaderMessages" : { + "type" : "double" + }, + "HealthScore_Cluster" : { + "type" : "double" + }, + "HealthCheckPassed_Cluster" : { + "type" : "double" + }, + "HealthCheckTotal_Cluster" : { + "type" : "double" + } + } + }, + "key" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "timestamp" : { + "format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis", + "type" : "date" + } + } + }, + "aliases" : { } + }' + +curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${SERVER_ES_ADDRESS}/_template/ks_kafka_group_metric -d '{ + "order" : 10, + "index_patterns" : [ + "ks_kafka_group_metric*" + ], + "settings" : { + "index" : { + "number_of_shards" : "10" + } + }, + "mappings" : { + "properties" : { + "group" : { + "type" : "keyword" + }, + "partitionId" : { + "type" : "long" + }, + "routingValue" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "clusterPhyId" : { + "type" : "long" + }, + "topic" : { + "type" : "keyword" + }, + "metrics" : { + "properties" : { + "HealthScore" : { + "type" : "float" + }, + "Lag" : { + "type" : "float" + }, + "OffsetConsumed" : { + "type" : "float" + }, + "HealthCheckTotal" : { + "type" : "float" + }, + "HealthCheckPassed" : { + "type" : "float" + } + } + }, + "groupMetric" : { + "type" : "keyword" + }, + "key" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "timestamp" : { + "format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis", + "index" : true, + "type" : "date", + "doc_values" : true + } + } + }, + "aliases" : { } + }' + +curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${SERVER_ES_ADDRESS}/_template/ks_kafka_partition_metric -d '{ + "order" : 10, + "index_patterns" : [ + "ks_kafka_partition_metric*" + ], + "settings" : { + "index" : { + "number_of_shards" : "10" + } + }, + "mappings" : { + "properties" : { + "brokerId" : { + "type" : "long" + }, + "partitionId" : { + "type" : "long" + }, + "routingValue" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "clusterPhyId" : { + "type" : "long" + }, + "topic" : { + "type" : "keyword" + }, + "metrics" : { + "properties" : { + "LogStartOffset" : { + "type" : "float" + }, + "Messages" : { + "type" : "float" + }, + "LogEndOffset" : { + "type" : "float" + } + } + }, + "key" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "timestamp" : { + "format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis", + "index" : true, + "type" : "date", + "doc_values" : true + } + } + }, + "aliases" : { } + }' + +curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${SERVER_ES_ADDRESS}/_template/ks_kafka_replication_metric -d '{ + "order" : 10, + "index_patterns" : [ + "ks_kafka_partition_metric*" + ], + "settings" : { + "index" : { + "number_of_shards" : "10" + } + }, + "mappings" : { + "properties" : { + "brokerId" : { + "type" : "long" + }, + "partitionId" : { + "type" : "long" + }, + "routingValue" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "clusterPhyId" : { + "type" : "long" + }, + "topic" : { + "type" : "keyword" + }, + "metrics" : { + "properties" : { + "LogStartOffset" : { + "type" : "float" + }, + "Messages" : { + "type" : "float" + }, + "LogEndOffset" : { + "type" : "float" + } + } + }, + "key" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "timestamp" : { + "format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis", + "index" : true, + "type" : "date", + "doc_values" : true + } + } + }, + "aliases" : { } + }[root@10-255-0-23 template]# cat ks_kafka_replication_metric +PUT _template/ks_kafka_replication_metric +{ + "order" : 10, + "index_patterns" : [ + "ks_kafka_replication_metric*" + ], + "settings" : { + "index" : { + "number_of_shards" : "10" + } + }, + "mappings" : { + "properties" : { + "timestamp" : { + "format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis", + "index" : true, + "type" : "date", + "doc_values" : true + } + } + }, + "aliases" : { } + }' + +curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${SERVER_ES_ADDRESS}/_template/ks_kafka_topic_metric -d '{ + "order" : 10, + "index_patterns" : [ + "ks_kafka_topic_metric*" + ], + "settings" : { + "index" : { + "number_of_shards" : "10" + } + }, + "mappings" : { + "properties" : { + "brokerId" : { + "type" : "long" + }, + "routingValue" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "topic" : { + "type" : "keyword" + }, + "clusterPhyId" : { + "type" : "long" + }, + "metrics" : { + "properties" : { + "BytesIn_min_15" : { + "type" : "float" + }, + "Messages" : { + "type" : "float" + }, + "BytesRejected" : { + "type" : "float" + }, + "PartitionURP" : { + "type" : "float" + }, + "HealthCheckTotal" : { + "type" : "float" + }, + "ReplicationCount" : { + "type" : "float" + }, + "ReplicationBytesOut" : { + "type" : "float" + }, + "ReplicationBytesIn" : { + "type" : "float" + }, + "FailedFetchRequests" : { + "type" : "float" + }, + "BytesIn_min_5" : { + "type" : "float" + }, + "HealthScore" : { + "type" : "float" + }, + "LogSize" : { + "type" : "float" + }, + "BytesOut" : { + "type" : "float" + }, + "BytesOut_min_15" : { + "type" : "float" + }, + "FailedProduceRequests" : { + "type" : "float" + }, + "BytesIn" : { + "type" : "float" + }, + "BytesOut_min_5" : { + "type" : "float" + }, + "MessagesIn" : { + "type" : "float" + }, + "TotalProduceRequests" : { + "type" : "float" + }, + "HealthCheckPassed" : { + "type" : "float" + } + } + }, + "brokerAgg" : { + "type" : "keyword" + }, + "key" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "timestamp" : { + "format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis", + "index" : true, + "type" : "date", + "doc_values" : true + } + } + }, + "aliases" : { } + }' + +for i in {0..6}; +do + logdate=_$(date -d "${i} day ago" +%Y-%m-%d) + curl -s --connect-timeout 10 -o /dev/null -X PUT http://${SERVER_ES_ADDRESS}/ks_kafka_broker_metric${logdate} && \ + curl -s -o /dev/null -X PUT http://${SERVER_ES_ADDRESS}/ks_kafka_cluster_metric${logdate} && \ + curl -s -o /dev/null -X PUT http://${SERVER_ES_ADDRESS}/ks_kafka_group_metric${logdate} && \ + curl -s -o /dev/null -X PUT http://${SERVER_ES_ADDRESS}/ks_kafka_partition_metric${logdate} && \ + curl -s -o /dev/null -X PUT http://${SERVER_ES_ADDRESS}/ks_kafka_replication_metric${logdate} && \ + curl -s -o /dev/null -X PUT http://${SERVER_ES_ADDRESS}/ks_kafka_topic_metric${logdate} || \ + exit 2 +done + +echo "ElasticSearch Initialize Success" diff --git a/km-dist/docker/manager/ks-start.sh b/km-dist/docker/manager/ks-start.sh new file mode 100644 index 00000000..5e46fb7c --- /dev/null +++ b/km-dist/docker/manager/ks-start.sh @@ -0,0 +1,7 @@ +sed -i "s/SERVER_MYSQL_ADDRESS/${SERVER_MYSQL_ADDRESS}/g" /conf/application.yml +sed -i "s/SERVER_MYSQL_DB/${SERVER_MYSQL_DB}/g" /conf/application.yml +sed -i "s/SERVER_MYSQL_USER/${SERVER_MYSQL_USER}/g" /conf/application.yml +sed -i "s/SERVER_MYSQL_PASSWORD/${SERVER_MYSQL_PASSWORD}/g" /conf/application.yml +sed -i "s/SERVER_ES_ADDRESS/${SERVER_ES_ADDRESS}/g" /conf/application.yml + +java -server ${JAVA_OPTS} -XX:+UseStringDeduplication -Dfile.encoding=UTF-8 -Djava.security.egd=file:/dev/./urandom -Duser.timezone=GMT+08 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/logs -XX:ErrorFile=/logs/jvm/hs_err_pid%p.log -jar /km-rest.jar --spring.config.location=/conf/application.yml diff --git a/km-dist/docker/mysql/dockerfile b/km-dist/docker/mysql/dockerfile new file mode 100644 index 00000000..00ee9b9d --- /dev/null +++ b/km-dist/docker/mysql/dockerfile @@ -0,0 +1,4 @@ +FROM mysql/mysql-server:5.7.38 +COPY initsql /docker-entrypoint-initdb.d/ +COPY init.sh /docker-entrypoint-initdb.d/ +ADD my.cnf /etc/ diff --git a/km-dist/docker/mysql/init.sh b/km-dist/docker/mysql/init.sh new file mode 100644 index 00000000..99248d67 --- /dev/null +++ b/km-dist/docker/mysql/init.sh @@ -0,0 +1,6 @@ +if [ "$MYSQL_DATABASE" ]; then + "${mysql[@]}" < /docker-entrypoint-initdb.d/initsql +else + echo "CREATE DATABASE IF NOT EXISTS ks ;" | "${mysql[@]}" + "${mysql[@]}" ks < /docker-entrypoint-initdb.d/initsql +fi diff --git a/km-dist/docker/mysql/initsql b/km-dist/docker/mysql/initsql new file mode 100644 index 00000000..d5916260 --- /dev/null +++ b/km-dist/docker/mysql/initsql @@ -0,0 +1,784 @@ +DROP TABLE IF EXISTS `ks_km_broker`; +CREATE TABLE `ks_km_broker` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '物理集群ID', + `broker_id` int(16) NOT NULL DEFAULT '-1' COMMENT 'brokerId', + `host` varchar(128) NOT NULL DEFAULT '' COMMENT 'broker主机名', + `port` int(16) NOT NULL DEFAULT '-1' COMMENT 'broker端口', + `jmx_port` int(16) NOT NULL DEFAULT '-1' COMMENT 'Jmx端口', + `start_timestamp` bigint(20) NOT NULL DEFAULT '-1' COMMENT '启动时间', + `rack` varchar(128) NOT NULL DEFAULT '' COMMENT 'Rack信息', + `status` int(16) NOT NULL DEFAULT '0' COMMENT '状态: 1存活,0未存活', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', + `endpoint_map` varchar(1024) NOT NULL DEFAULT '' COMMENT '监听信息', + PRIMARY KEY (`id`), + UNIQUE KEY `uniq_cluster_phy_id_broker_id` (`cluster_phy_id`,`broker_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Broker信息表'; + + + +DROP TABLE IF EXISTS `ks_km_broker_config`; +CREATE TABLE `ks_km_broker_config` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群ID', + `broker_id` int(16) NOT NULL DEFAULT '-1' COMMENT 'brokerId', + `config_name` varchar(192) NOT NULL DEFAULT '' COMMENT '配置名称', + `config_value` text COMMENT '配置值', + `diff_type` int(16) NOT NULL DEFAULT '-1' COMMENT '差异类型', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', + PRIMARY KEY (`id`), + UNIQUE KEY `uniq_cluster_broker_name` (`cluster_phy_id`,`broker_id`,`config_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Broker配置信息表'; + + + +DROP TABLE IF EXISTS `ks_km_cluster_balance_job`; +CREATE TABLE `ks_km_cluster_balance_job` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', + `cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id', + `brokers` varchar(1024) NOT NULL DEFAULT '' COMMENT '均衡节点', + `topic_black_list` varchar(4096) NOT NULL DEFAULT '' COMMENT 'topic黑名单', + `type` int(16) NOT NULL DEFAULT '0' COMMENT '1:周期均衡,2:立即均衡', + `balance_interval_json` text COMMENT '均衡区间详情', + `metric_calculation_period` int(16) NOT NULL DEFAULT '0' COMMENT '指标计算周期,单位分钟', + `reassignment_json` text COMMENT '迁移脚本', + `parallel_num` int(16) NOT NULL DEFAULT '0' COMMENT '任务并行数', + `execution_strategy` int(16) NOT NULL DEFAULT '0' COMMENT '执行策略, 1:优先最大副本,2:优先最小副本', + `throttle_unit_b` bigint(20) NOT NULL DEFAULT '0' COMMENT '限流值', + `total_reassign_size` double NOT NULL DEFAULT '0' COMMENT '总迁移大小', + `total_reassign_replica_num` int(16) NOT NULL DEFAULT '0' COMMENT '总迁移副本数', + `move_in_topic_list` varchar(4096) NOT NULL DEFAULT '' COMMENT '移入topic', + `broker_balance_detail` text COMMENT '节点均衡详情', + `status` int(16) NOT NULL DEFAULT '0' COMMENT '任务状态 1:进行中,2:准备,3,成功,4:失败,5:取消', + `creator` varchar(64) NOT NULL DEFAULT '' COMMENT '操作人', + `start_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务开始时间', + `finished_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务完成时间', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务创建时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '任务修改时间', + `description` text COMMENT '备注', + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='集群均衡任务'; + + + +DROP TABLE IF EXISTS `ks_km_cluster_balance_job_config`; +CREATE TABLE `ks_km_cluster_balance_job_config` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', + `cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id', + `brokers` varchar(256) NOT NULL DEFAULT '' COMMENT '均衡节点', + `topic_black_list` varchar(4096) NOT NULL DEFAULT '' COMMENT 'topic黑名单', + `task_cron` varchar(64) NOT NULL DEFAULT '' COMMENT '任务周期', + `balance_interval_json` text COMMENT '均衡区间详情', + `metric_calculation_period` int(16) NOT NULL DEFAULT '0' COMMENT '指标计算周期,单位分钟', + `reassignment_json` text COMMENT '迁移脚本', + `parallel_num` int(16) NOT NULL DEFAULT '0' COMMENT '任务并行数', + `execution_strategy` int(16) NOT NULL DEFAULT '0' COMMENT '执行策略, 1:优先最大副本,2:优先最小副本', + `throttle_unit_b` bigint(20) NOT NULL DEFAULT '0' COMMENT '限流值', + `creator` varchar(64) NOT NULL DEFAULT '' COMMENT '操作人', + `status` int(16) NOT NULL DEFAULT '0' COMMENT '任务状态 0:未开启,1:开启', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务创建时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '任务修改时间', + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='集群均衡任务'; + + + +DROP TABLE IF EXISTS `ks_km_cluster_balance_reassign`; +CREATE TABLE `ks_km_cluster_balance_reassign` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', + `job_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '父任务ID', + `cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id', + `topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'Topic名称', + `partition_id` int(11) NOT NULL DEFAULT '-1' COMMENT '分区ID', + `original_broker_ids` text COMMENT '源BrokerId列表', + `reassign_broker_ids` text COMMENT '目标BrokerId列表', + `start_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务开始时间', + `finished_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务完成时间', + `extend_data` text COMMENT '扩展数据', + `status` int(16) NOT NULL DEFAULT '2' COMMENT '任务状态', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务创建时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '任务修改时间', + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='集群平衡迁移详情'; + + + +DROP TABLE IF EXISTS `ks_km_group_member`; +CREATE TABLE `ks_km_group_member` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群ID', + `topic_name` varchar(192) NOT NULL DEFAULT '' COMMENT 'Topic名称', + `group_name` varchar(192) NOT NULL DEFAULT '' COMMENT 'Group名称', + `kafka_user` varchar(192) NOT NULL DEFAULT '' COMMENT 'Kafka用户', + `state` varchar(64) NOT NULL DEFAULT '' COMMENT '状态', + `member_count` int(11) NOT NULL DEFAULT '0' COMMENT '成员数', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', + PRIMARY KEY (`id`), + UNIQUE KEY `uniq_cluster_topic_group` (`cluster_phy_id`,`topic_name`,`group_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='GroupMember信息表'; + + + +DROP TABLE IF EXISTS `ks_km_health_check_result`; +CREATE TABLE `ks_km_health_check_result` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', + `dimension` int(11) NOT NULL DEFAULT '0' COMMENT '检查维度(0:未知,1:Cluster,2:Broker,3:Topic,4:Group)', + `config_name` varchar(192) NOT NULL DEFAULT '' COMMENT '配置名', + `cluster_phy_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '物理集群ID', + `res_name` varchar(192) NOT NULL DEFAULT '' COMMENT '资源名称', + `passed` int(11) NOT NULL DEFAULT '0' COMMENT '检查通过(0:未通过,1:通过)', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`), + UNIQUE KEY `uniq_dimension_config_cluster_res` (`dimension`,`config_name`,`cluster_phy_id`,`res_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='健康检查结果'; + + + +DROP TABLE IF EXISTS `ks_km_job`; +CREATE TABLE `ks_km_job` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键自增', + `job_name` varchar(1024) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL COMMENT '任务名称', + `job_type` tinyint(10) NOT NULL COMMENT '任务类型', + `job_status` tinyint(10) NOT NULL COMMENT '任务状态', + `job_data` text CHARACTER SET utf8 COLLATE utf8_bin NOT NULL COMMENT '任务的详细信息', + `job_desc` varchar(1024) NOT NULL DEFAULT '' COMMENT '任务描述', + `cluster_id` int(11) NOT NULL COMMENT 'kafka集群id', + `target` varchar(8192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL COMMENT '任务执行对象', + `running_status` varchar(256) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '任务运行详细状态(json), Success:7 Fail:1 Doing:2', + `creator` varchar(45) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL COMMENT '创建者', + `plan_time` timestamp NOT NULL DEFAULT '1971-1-1 00:00:00' COMMENT '计划执行时间', + `start_time` timestamp NOT NULL DEFAULT '1971-1-1 00:00:00' COMMENT '实际执行时间', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`), + KEY `index_cluster_id` (`cluster_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Job信息'; + + + +DROP TABLE IF EXISTS `ks_km_kafka_acl`; +CREATE TABLE `ks_km_kafka_acl` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', + `cluster_phy_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id', + `principal` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'Kafka用户', + `operation` int(11) NOT NULL DEFAULT '0' COMMENT '操作', + `permission_type` int(11) NOT NULL DEFAULT '0' COMMENT '权限类型(0:未知,1:任意,2:拒绝,3:允许)', + `host` varchar(192) NOT NULL DEFAULT '127.0.0.1' COMMENT '机器', + `resource_type` int(11) NOT NULL DEFAULT '0' COMMENT '资源类型(0:未知,1:任意,2:Topic,3:Group,4:Cluster,5:事务,6:Token)', + `resource_name` varchar(192) NOT NULL DEFAULT '' COMMENT '资源名称', + `pattern_type` int(11) NOT NULL COMMENT '匹配类型(0:未知,1:任意,2:Match,3:Literal,4:prefixed)', + `unique_field` varchar(1024) NOT NULL DEFAULT '' COMMENT '唯一字段,由cluster_phy_id等字段组成', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`), + UNIQUE KEY `uniq_uniq_field` (`unique_field`), + KEY `idx_cluster_phy_id_principal_res_name` (`cluster_phy_id`,`principal`,`resource_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='ACL信息'; + + + +DROP TABLE IF EXISTS `ks_km_kafka_change_record`; +CREATE TABLE `ks_km_kafka_change_record` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群ID', + `res_type` int(11) NOT NULL DEFAULT '-1' COMMENT '资源类型', + `res_name` varchar(192) NOT NULL DEFAULT '' COMMENT '资源名称', + `operate_type` int(11) NOT NULL DEFAULT '-1' COMMENT '操作类型', + `operate_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '操作时间', + `unique_field` varchar(1024) NOT NULL DEFAULT '' COMMENT '唯一键字段', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', + PRIMARY KEY (`id`), + UNIQUE KEY `unique_field` (`unique_field`), + KEY `idx_cluster_phy_id` (`cluster_phy_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Kafka变更记录表'; + + + +DROP TABLE IF EXISTS `ks_km_kafka_controller`; +CREATE TABLE `ks_km_kafka_controller` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', + `cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id', + `broker_id` int(16) NOT NULL DEFAULT '-1' COMMENT 'brokerId', + `broker_host` varchar(256) NOT NULL DEFAULT '' COMMENT '主机名', + `broker_rack` varchar(256) NOT NULL DEFAULT '' COMMENT 'BrokerRack信息', + `timestamp` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'controller变更时间,-1表示未存活', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', + PRIMARY KEY (`id`), + UNIQUE KEY `uniq_cluster_broker_timestamp` (`cluster_phy_id`,`broker_id`,`timestamp`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='controller记录表'; + + + +DROP TABLE IF EXISTS `ks_km_kafka_user`; +CREATE TABLE `ks_km_kafka_user` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群ID', + `name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '名称', + `token` varchar(8192) NOT NULL DEFAULT '' COMMENT '密钥', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', + PRIMARY KEY (`id`), + UNIQUE KEY `uniq_cluster_phy_id_name` (`cluster_phy_id`,`name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Kafka-User信息表'; + + + +DROP TABLE IF EXISTS `ks_km_partition`; +CREATE TABLE `ks_km_partition` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群ID', + `topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'Topic名称', + `partition_id` int(11) NOT NULL DEFAULT '-1' COMMENT '分区ID', + `leader_broker_id` int(11) NOT NULL DEFAULT '-1' COMMENT '分区的LeaderBroker,-1表示无Leader', + `in_sync_replicas` varchar(512) NOT NULL DEFAULT '-1' COMMENT 'ISR', + `assign_replicas` varchar(512) NOT NULL DEFAULT '-1' COMMENT 'AR', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', + PRIMARY KEY (`id`), + UNIQUE KEY `uniq_cluster_topic_partition` (`cluster_phy_id`,`topic_name`,`partition_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Partition信息表'; + + + +DROP TABLE IF EXISTS `ks_km_physical_cluster`; +CREATE TABLE `ks_km_physical_cluster` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '集群id', + `name` varchar(128) NOT NULL DEFAULT '' COMMENT '集群名称', + `zookeeper` varchar(2048) NOT NULL DEFAULT '' COMMENT 'zk地址', + `bootstrap_servers` varchar(2048) NOT NULL DEFAULT '' COMMENT 'server地址', + `kafka_version` varchar(32) NOT NULL DEFAULT '' COMMENT 'kafka版本', + `client_properties` text COMMENT 'Kafka客户端配置', + `jmx_properties` text COMMENT 'JMX配置', + `description` text COMMENT '备注', + `auth_type` int(11) NOT NULL DEFAULT '0' COMMENT '认证类型,-1未知,0:无认证,', + `run_state` tinyint(4) NOT NULL DEFAULT '1' COMMENT '运行状态, 0表示未监控, 1监控中,有ZK,2:监控中,无ZK', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '接入时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', + PRIMARY KEY (`id`), + UNIQUE KEY `uniq_name` (`name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='物理集群信息表'; + + + +DROP TABLE IF EXISTS `ks_km_platform_cluster_config`; +CREATE TABLE `ks_km_platform_cluster_config` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键自增', + `cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群ID', + `value_group` varchar(100) NOT NULL DEFAULT '' COMMENT '配置项组', + `value_name` varchar(100) NOT NULL DEFAULT '' COMMENT '配置项名字', + `value` text COMMENT '配置项的值', + `description` text COMMENT '备注', + `operator` varchar(16) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '操作者', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', + PRIMARY KEY (`id`), + UNIQUE KEY `uniq_cluster_id_group_name` (`cluster_id`,`value_group`,`value_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='KS平台集群配置'; + + + +DROP TABLE IF EXISTS `ks_km_reassign_job`; +CREATE TABLE `ks_km_reassign_job` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', + `cluster_phy_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id', + `reassignment_json` text COMMENT '迁移计划', + `throttle_unit_byte` bigint(20) NOT NULL DEFAULT '0' COMMENT '限流值', + `start_time` timestamp NOT NULL DEFAULT '1971-1-1 00:00:00' COMMENT '任务开始时间', + `finished_time` timestamp NOT NULL DEFAULT '1971-1-1 00:00:00' COMMENT '任务完成时间', + `creator` varchar(64) NOT NULL DEFAULT '' COMMENT '操作人', + `description` text COMMENT '备注', + `status` int(16) NOT NULL DEFAULT '0' COMMENT '任务状态', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务创建时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '任务修改时间', + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='迁移Job信息'; + + + +DROP TABLE IF EXISTS `ks_km_reassign_sub_job`; +CREATE TABLE `ks_km_reassign_sub_job` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', + `job_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '父任务ID', + `cluster_phy_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id', + `topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'Topic名称', + `partition_id` int(11) NOT NULL DEFAULT '-1' COMMENT '分区ID', + `original_broker_ids` text COMMENT '源BrokerId列表', + `reassign_broker_ids` text COMMENT '目标BrokerId列表', + `start_time` timestamp NOT NULL DEFAULT '1971-1-1 00:00:00' COMMENT '任务开始时间', + `finished_time` timestamp NOT NULL DEFAULT '1971-1-1 00:00:00' COMMENT '任务完成时间', + `extend_data` text COMMENT '扩展数据', + `status` int(16) NOT NULL DEFAULT '0' COMMENT '任务状态', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务创建时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '任务修改时间', + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='迁移SubJob信息'; + + + +DROP TABLE IF EXISTS `ks_km_topic`; +CREATE TABLE `ks_km_topic` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群ID', + `topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'Topic名称', + `replica_num` int(11) NOT NULL DEFAULT '-1' COMMENT '副本数', + `partition_num` int(11) NOT NULL DEFAULT '-1' COMMENT '分区数', + `broker_ids` varchar(2048) NOT NULL DEFAULT '' COMMENT 'BrokerId列表', + `partition_map` text COMMENT '分区分布信息', + `retention_ms` bigint(20) NOT NULL DEFAULT '-2' COMMENT '保存时间,-2:未知,-1:无限制,>=0对应时间,单位ms', + `type` tinyint(4) NOT NULL DEFAULT '0' COMMENT 'Topic类型,默认0,0:普通,1:Kafka内部', + `description` text COMMENT '备注信息', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间(尽量与Topic实际创建时间一致)', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间(尽量与Topic实际创建时间一致)', + PRIMARY KEY (`id`), + UNIQUE KEY `uniq_cluster_phy_id_topic_name` (`cluster_phy_id`,`topic_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Topic信息表'; + + +DROP TABLE IF EXISTS `ks_km_app_node`; +CREATE TABLE `ks_km_app_node` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `host_name` varchar(256) NOT NULL DEFAULT '' COMMENT 'host', + `ip` varchar(256) NOT NULL DEFAULT '' COMMENT 'ip', + `beat_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'node 的心跳时间', + `app_name` varchar(128) NOT NULL DEFAULT '' COMMENT 'km 集群的应用名', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', + PRIMARY KEY (`id`), + KEY `idx_app_host` (`app_name`,`host_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='km集群部署的node信息'; + +-- Logi-Job模块的sql,安装KS-KM需要执行该sql + + +DROP TABLE IF EXISTS `logi_job`; +CREATE TABLE `logi_job` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `job_code` varchar(100) NOT NULL DEFAULT '' COMMENT 'task taskCode', + `task_code` varchar(255) NOT NULL DEFAULT '' COMMENT '任务code', + `class_name` varchar(255) NOT NULL DEFAULT '' COMMENT '类的全限定名', + `try_times` int(10) NOT NULL DEFAULT '0' COMMENT '第几次重试', + `worker_code` varchar(200) NOT NULL DEFAULT '' COMMENT '执行机器', + `app_name` varchar(100) NOT NULL DEFAULT '' COMMENT '被调度的应用名称', + `start_time` datetime DEFAULT '1971-01-01 00:00:00' COMMENT '开始时间', + `create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`), + UNIQUE KEY `job_code` (`job_code`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='正在执行的job信息'; + + + +DROP TABLE IF EXISTS `logi_job_log`; +CREATE TABLE `logi_job_log` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `job_code` varchar(100) NOT NULL DEFAULT '' COMMENT 'job taskCode', + `task_code` varchar(255) NOT NULL DEFAULT '' COMMENT '任务code', + `task_name` varchar(255) NOT NULL DEFAULT '' COMMENT '任务名称', + `task_desc` varchar(255) NOT NULL DEFAULT '' COMMENT '任务描述', + `task_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '任务id', + `class_name` varchar(255) NOT NULL DEFAULT '' COMMENT '类的全限定名', + `try_times` int(10) NOT NULL DEFAULT '0' COMMENT '第几次重试', + `worker_code` varchar(200) NOT NULL DEFAULT '' COMMENT '执行机器', + `worker_ip` varchar(200) NOT NULL DEFAULT '' COMMENT '执行机器ip', + `start_time` datetime DEFAULT '1971-01-01 00:00:00' COMMENT '开始时间', + `end_time` datetime DEFAULT '1971-01-01 00:00:00' COMMENT '结束时间', + `status` tinyint(4) NOT NULL DEFAULT '0' COMMENT '执行结果 1成功 2失败 3取消', + `error` text NOT NULL COMMENT '错误信息', + `result` text NOT NULL COMMENT '执行结果', + `app_name` varchar(100) NOT NULL DEFAULT '' COMMENT '被调度的应用名称', + `create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`), + KEY `index_job_code` (`job_code`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='job执行历史日志'; + + + +DROP TABLE IF EXISTS `logi_task`; +CREATE TABLE `logi_task` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `task_code` varchar(100) NOT NULL DEFAULT '' COMMENT 'task taskCode', + `task_name` varchar(255) NOT NULL DEFAULT '' COMMENT '名称', + `task_desc` varchar(1000) NOT NULL DEFAULT '' COMMENT '任务描述', + `cron` varchar(100) NOT NULL DEFAULT '' COMMENT 'cron 表达式', + `class_name` varchar(255) NOT NULL DEFAULT '' COMMENT '类的全限定名', + `params` varchar(1000) NOT NULL DEFAULT '' COMMENT '执行参数 map 形式{key1:value1,key2:value2}', + `retry_times` int(10) NOT NULL DEFAULT '0' COMMENT '允许重试次数', + `last_fire_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '上次执行时间', + `timeout` bigint(20) NOT NULL DEFAULT '0' COMMENT '超时 毫秒', + `status` tinyint(4) NOT NULL DEFAULT '0' COMMENT '1等待 2运行中 3暂停', + `sub_task_codes` varchar(1000) NOT NULL DEFAULT '' COMMENT '子任务code列表,逗号分隔', + `consensual` varchar(200) NOT NULL DEFAULT '' COMMENT '执行策略', + `owner` varchar(200) NOT NULL DEFAULT '' COMMENT '责任人', + `task_worker_str` varchar(3000) NOT NULL DEFAULT '' COMMENT '机器执行信息', + `app_name` varchar(100) NOT NULL DEFAULT '' COMMENT '被调度的应用名称', + `create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`), + UNIQUE KEY `task_code` (`task_code`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='任务信息'; + + + +DROP TABLE IF EXISTS `logi_task_lock`; +CREATE TABLE `logi_task_lock` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `task_code` varchar(100) NOT NULL DEFAULT '' COMMENT 'task taskCode', + `worker_code` varchar(100) NOT NULL DEFAULT '' COMMENT 'worker taskCode', + `app_name` varchar(100) NOT NULL DEFAULT '' COMMENT '被调度的应用名称', + `expire_time` bigint(20) NOT NULL DEFAULT '0' COMMENT '过期时间', + `create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='任务锁'; + + + +DROP TABLE IF EXISTS `logi_worker`; +CREATE TABLE `logi_worker` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `worker_code` varchar(100) NOT NULL DEFAULT '' COMMENT 'worker taskCode', + `worker_name` varchar(100) NOT NULL DEFAULT '' COMMENT 'worker名', + `ip` varchar(100) NOT NULL DEFAULT '' COMMENT 'worker的ip', + `cpu` int(11) NOT NULL DEFAULT '0' COMMENT 'cpu数量', + `cpu_used` double NOT NULL DEFAULT '0' COMMENT 'cpu使用率', + `memory` double NOT NULL DEFAULT '0' COMMENT '内存,以M为单位', + `memory_used` double NOT NULL DEFAULT '0' COMMENT '内存使用率', + `jvm_memory` double NOT NULL DEFAULT '0' COMMENT 'jvm堆大小,以M为单位', + `jvm_memory_used` double NOT NULL DEFAULT '0' COMMENT 'jvm堆使用率', + `job_num` int(10) NOT NULL DEFAULT '0' COMMENT '正在执行job数', + `heartbeat` datetime DEFAULT '1971-01-01 00:00:00' COMMENT '心跳时间', + `app_name` varchar(100) NOT NULL DEFAULT '' COMMENT '被调度的应用名称', + `create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`), + UNIQUE KEY `worker_code` (`worker_code`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='worker信息'; + + + +DROP TABLE IF EXISTS `logi_worker_blacklist`; +CREATE TABLE `logi_worker_blacklist` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `worker_code` varchar(100) NOT NULL DEFAULT '' COMMENT 'worker taskCode', + `create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`), + UNIQUE KEY `worker_code` (`worker_code`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='worker黑名单列表'; + +DROP TABLE IF EXISTS `logi_security_dept`; +CREATE TABLE `logi_security_dept` +( + id int auto_increment primary key, + dept_name varchar(10) not null comment '部门名', + parent_id int not null comment '父部门id', + leaf tinyint(1) not null comment '是否叶子部门', + level tinyint not null comment 'parentId为0的层级为1', + description varchar(20) null comment '描述', + create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间', + update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间', + is_delete tinyint(1) default 0 null comment '逻辑删除', + app_name varchar(16) null comment '应用名称' +) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '部门信息表'; + +DROP TABLE IF EXISTS `logi_security_message`; +CREATE TABLE `logi_security_message` +( + id int auto_increment primary key, + title varchar(60) not null comment '标题', + content varchar(256) null comment '内容', + read_tag tinyint(1) default 0 null comment '是否已读', + oplog_id int null comment '操作日志id', + user_id int null comment '这条消息属于哪个用户的,用户id', + create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间', + update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间', + is_delete tinyint(1) default 0 null comment '逻辑删除', + app_name varchar(16) null comment '应用名称' +) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '消息中心'; + +DROP TABLE IF EXISTS `logi_security_oplog`; +CREATE TABLE `logi_security_oplog` +( + id int auto_increment primary key, + operator_ip varchar(64) not null comment '操作者ip', + operator varchar(64) null comment '操作者账号', + operate_page varchar(64) not null default '' comment '操作页面', + operate_type varchar(64) not null comment '操作类型', + target_type varchar(64) not null comment '对象分类', + target varchar(1024) not null comment '操作对象', + operation_methods varchar(64) not null default '' comment '操作方式', + detail text null comment '日志详情', + create_time timestamp default CURRENT_TIMESTAMP null, + update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间', + is_delete tinyint(1) default 0 not null comment '逻辑删除', + app_name varchar(16) null comment '应用名称' +) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '操作日志'; + + +DROP TABLE IF EXISTS `logi_security_oplog_extra`; +CREATE TABLE `logi_security_oplog_extra` +( + id int auto_increment primary key, + info varchar(16) null comment '信息', + type tinyint not null comment '哪种信息:1:操作页面;2:操作类型;3:对象分类', + create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间', + update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间', + is_delete tinyint(1) default 0 null comment '逻辑删除', + app_name varchar(16) null comment '应用名称' +) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '操作日志信息(操作页面、操作类型、对象分类)'; + +DROP TABLE IF EXISTS `logi_security_permission`; +CREATE TABLE `logi_security_permission` +( + id int auto_increment primary key, + permission_name varchar(40) not null comment '权限名字', + parent_id int not null comment '父权限id', + leaf tinyint(1) not null comment '是否叶子权限点(具体的操作)', + level tinyint not null comment '权限点的层级(parentId为0的层级为1)', + description varchar(64) null comment '权限点描述', + create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间', + update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间', + is_delete tinyint(1) default 0 null comment '逻辑删除', + app_name varchar(16) null comment '应用名称' +) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '权限表'; + +DROP TABLE IF EXISTS `logi_security_project`; +CREATE TABLE `logi_security_project` +( + id int auto_increment comment '项目id' primary key, + project_code varchar(128) not null comment '项目编号', + project_name varchar(128) not null comment '项目名', + description varchar(512) default '' not null comment '项目描述', + dept_id int not null comment '部门id', + running tinyint(1) default 1 not null comment '启用 or 停用', + create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间', + update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间', + is_delete tinyint(1) default 0 not null comment '逻辑删除', + app_name varchar(16) null comment '应用名称' +) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '项目表'; + +DROP TABLE IF EXISTS `logi_security_resource_type`; +CREATE TABLE `logi_security_resource_type` +( + id int auto_increment primary key, + type_name varchar(16) null comment '资源类型名', + create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间', + update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间', + is_delete tinyint(1) default 0 not null comment '逻辑删除', + app_name varchar(16) null comment '应用名称' +) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '资源类型表'; + +DROP TABLE IF EXISTS `logi_security_role`; +CREATE TABLE `logi_security_role` +( + id int auto_increment primary key, + role_code varchar(128) not null comment '角色编号', + role_name varchar(128) not null comment '名称', + description varchar(128) null comment '角色描述', + last_reviser varchar(30) null comment '最后修改人', + create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间', + update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间', + is_delete tinyint(1) default 0 not null comment '逻辑删除', + app_name varchar(16) null comment '应用名称' +) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '角色信息'; + +DROP TABLE IF EXISTS `logi_security_role_permission`; +CREATE TABLE `logi_security_role_permission` +( + id int auto_increment primary key, + role_id int not null comment '角色id', + permission_id int not null comment '权限id', + create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间', + update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间', + is_delete tinyint(1) default 0 not null comment '逻辑删除', + app_name varchar(16) null comment '应用名称' +) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '角色权限表(只保留叶子权限与角色关系)'; + +DROP TABLE IF EXISTS `logi_security_user`; +CREATE TABLE `logi_security_user` +( + id int auto_increment primary key, + user_name varchar(64) not null comment '用户账号', + pw varchar(2048) not null comment '用户密码', + salt char(5) default '' not null comment '密码盐', + real_name varchar(128) default '' not null comment '真实姓名', + phone char(11) default '' not null comment 'mobile', + email varchar(30) default '' not null comment 'email', + dept_id int null comment '所属部门id', + is_delete tinyint(1) default 0 not null comment '逻辑删除', + create_time timestamp default CURRENT_TIMESTAMP null comment '注册时间', + update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间', + app_name varchar(16) null comment '应用名称' +) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '用户信息'; + +DROP TABLE IF EXISTS `logi_security_user_project`; +CREATE TABLE `logi_security_user_project` +( + id int auto_increment primary key, + user_id int not null comment '用户id', + project_id int not null comment '项目id', + create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间', + update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间', + is_delete tinyint(1) default 0 not null comment '逻辑删除', + app_name varchar(16) null comment '应用名称' +) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '用户项目关系表(项目负责人)'; + +DROP TABLE IF EXISTS `logi_security_user_resource`; +CREATE TABLE `logi_security_user_resource` +( + id int auto_increment primary key, + user_id int not null comment '用户id', + project_id int not null comment '资源所属项目id', + resource_type_id int not null comment '资源类别id', + resource_id int not null comment '资源id', + control_level tinyint not null comment '管理级别:1(查看权限)2(管理权限)', + create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间', + update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间', + is_delete tinyint(1) default 0 not null comment '逻辑删除', + app_name varchar(16) null comment '应用名称' +) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '用户和资源关系表'; + +DROP TABLE IF EXISTS `logi_security_user_role`; +CREATE TABLE `logi_security_user_role` +( + id int auto_increment primary key, + user_id int not null comment '用户id', + role_id int not null comment '角色id', + create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间', + update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间', + is_delete tinyint(1) default 0 not null comment '逻辑删除', + app_name varchar(16) null comment '应用名称' +) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '用户角色表'; + + +DROP TABLE IF EXISTS `logi_security_config`; +CREATE TABLE `logi_security_config` +( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键自增', + `value_group` varchar(100) NOT NULL DEFAULT '' COMMENT '配置项组', + `value_name` varchar(100) NOT NULL DEFAULT '' COMMENT '配置项名字', + `value` text COMMENT '配置项的值', + `edit` int(4) NOT NULL DEFAULT '1' COMMENT '是否可以编辑 1 不可编辑(程序获取) 2 可编辑', + `status` int(4) NOT NULL DEFAULT '1' COMMENT '1 正常 2 禁用', + `memo` varchar(1000) NOT NULL DEFAULT '' COMMENT '备注', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', + `is_delete` tinyint(1) NOT NULL DEFAULT '0' COMMENT '逻辑删除', + `app_name` varchar(16) COLLATE utf8_bin DEFAULT NULL COMMENT '应用名称', + `operator` varchar(16) COLLATE utf8_bin DEFAULT NULL COMMENT '操作者', + PRIMARY KEY (`id`), + KEY `idx_group_name` (`value_group`,`value_name`) +) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 COMMENT='logi配置项'; + + +INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_CLUSTER_NO_CONTROLLER','{ \"value\": 1, \"weight\": 30 } ','集群Controller数正常','know-streaming'); +INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_BROKER_REQUEST_QUEUE_FULL','{ \"value\": 10, \"weight\": 20 } ','Broker-RequestQueueSize指标','know-streaming'); +INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_BROKER_NETWORK_PROCESSOR_AVG_IDLE_TOO_LOW','{ \"value\": 0.8, \"weight\": 20 } ','Broker-NetworkProcessorAvgIdlePercent指标','know-streaming'); +INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_GROUP_RE_BALANCE_TOO_FREQUENTLY','{\n \"latestMinutes\": 10,\n \"detectedTimes\": 8,\n \"weight\": 10\n}\n','Group的re-balance频率','know-streaming'); +INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_TOPIC_NO_LEADER','{ \"value\": 1, \"weight\": 10 } ','Topic 无Leader数','know-stream'); +INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_TOPIC_UNDER_REPLICA_TOO_LONG','{ \"latestMinutes\": 10, \"detectedTimes\": 8, \"weight\": 10 } ','Topic 未同步持续时间','know-streaming'); +-- 初始化权限 +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1593', '多集群管理', '0', '0', '1', '多集群管理', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1595', '系统管理', '0', '0', '1', '系统管理', '0', 'know-streaming'); + +-- 多集群管理权限 +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1597', '接入集群', '1593', '1', '2', '接入集群', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1599', '删除集群', '1593', '1', '2', '删除集群', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1601', 'Cluster-修改集群信息', '1593', '1', '2', 'Cluster-修改集群信息', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1603', 'Cluster-修改健康规则', '1593', '1', '2', 'Cluster-修改健康规则', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1605', 'Broker-修改Broker配置', '1593', '1', '2', 'Broker-修改Broker配置', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1607', 'Topic-新增Topic', '1593', '1', '2', 'Topic-新增Topic', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1609', 'Topic-扩分区', '1593', '1', '2', 'Topic-扩分区', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1611', 'Topic-删除Topic', '1593', '1', '2', 'Topic-扩分区', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1613', 'Topic-重置Offset', '1593', '1', '2', 'Topic-重置Offset', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1615', 'Topic-修改Topic配置', '1593', '1', '2', 'Topic-修改Topic配置', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1617', 'Consumers-重置Offset', '1593', '1', '2', 'Consumers-重置Offset', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1619', 'Test-Producer', '1593', '1', '2', 'Test-Producer', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1621', 'Test-Consumer', '1593', '1', '2', 'Test-Consumer', '0', 'know-streaming'); + +-- 系统管理权限 +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1623', '配置管理-新增配置', '1595', '1', '2', '配置管理-新增配置', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1625', '配置管理-编辑配置', '1595', '1', '2', '配置管理-编辑配置', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1627', '配置管理-删除配置', '1595', '1', '2', '配置管理-删除配置', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1629', '用户管理-新增人员', '1595', '1', '2', '用户管理-新增人员', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1631', '用户管理-编辑人员', '1595', '1', '2', '用户管理-编辑人员', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1633', '用户管理-修改人员密码', '1595', '1', '2', '用户管理-修改人员密码', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1635', '用户管理-删除人员', '1595', '1', '2', '用户管理-删除人员', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1637', '用户管理-新增角色', '1595', '1', '2', '用户管理-新增角色', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1639', '用户管理-编辑角色', '1595', '1', '2', '用户管理-编辑角色', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1641', '用户管理-分配用户角色', '1595', '1', '2', '用户管理-分配用户角色', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1643', '用户管理-删除角色', '1595', '1', '2', '用户管理-删除角色', '0', 'know-streaming'); + +-- 多集群管理权限2022-09-06新增 +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2000', '多集群管理查看', '1593', '1', '2', '多集群管理查看', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2002', 'Topic-迁移副本', '1593', '1', '2', 'Topic-迁移副本', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2004', 'Topic-扩缩副本', '1593', '1', '2', 'Topic-扩缩副本', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2006', 'Cluster-LoadReBalance-周期均衡', '1593', '1', '2', 'Cluster-LoadReBalance-周期均衡', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2008', 'Cluster-LoadReBalance-立即均衡', '1593', '1', '2', 'Cluster-LoadReBalance-立即均衡', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2010', 'Cluster-LoadReBalance-设置集群规格', '1593', '1', '2', 'Cluster-LoadReBalance-设置集群规格', '0', 'know-streaming'); + + +-- 系统管理权限2022-09-06新增 +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('3000', '系统管理查看', '1595', '1', '2', '系统管理查看', '0', 'know-streaming'); + + + + + + +-- 初始化用户 +INSERT INTO `logi_security_user` (`id`, `user_name`, `pw`, `real_name`, `is_delete`, `app_name`) VALUES ('1', 'admin', 'V1ZkU2RHRlhOVGRSUmxweFUycFNhR0V6ZEdKSk1FRjRVVU5PWkdaVmJ6SlZiWGh6WVVWQ09YdEFWbXBLTkdGcmUxc2pRREpBSTExOVNqWlNiR3hvUUgwPXtAVmpKNGFre1sjQDNAI119SjZSbGxoQH0=Mv{#cdRgJ45Lqx}3IubEW87!==', '系统管理员', '0', 'know-streaming'); + +-- 初始化角色 +INSERT INTO `logi_security_role` (`id`, `role_code`, `role_name`, `description`, `last_reviser`, `is_delete`, `app_name`) VALUES ('1677', 'r15477137', '管理员角色', '包含系统所有权限', 'admin', '0', 'know-streaming'); + +-- 初始化角色权限关系 +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1597', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1599', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1601', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1603', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1605', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1607', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1609', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1611', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1613', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1615', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1617', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1619', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1621', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1593', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1623', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1625', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1627', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1629', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1631', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1633', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1635', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1637', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1639', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1641', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1643', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1595', '0', 'know-streaming'); + +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2000', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2002', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2004', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2006', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2008', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2010', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '3000', '0', 'know-streaming'); + +-- 初始化 用户角色关系 +INSERT INTO `logi_security_user_role` (`id`, `user_id`, `role_id`, `is_delete`, `app_name`) VALUES ('1', '1', '1677', '0', 'know-streaming'); + +INSERT INTO `logi_security_config` +(`value_group`,`value_name`,`value`,`edit`,`status`,`memo`,`is_delete`,`app_name`,`operator`) +VALUES +('SECURITY.LOGIN','SECURITY.TRICK_USERS','[\n \"admin\"\n]',1,1,'允许跳过登录的用户',0,'know-streaming','admin'); diff --git a/km-dist/docker/mysql/my.cnf b/km-dist/docker/mysql/my.cnf new file mode 100644 index 00000000..79f0db15 --- /dev/null +++ b/km-dist/docker/mysql/my.cnf @@ -0,0 +1,15 @@ +[client] +default-character-set=utf8 +[mysql] +default-character-set=utf8 +[mysqld] +skip-host-cache +skip-name-resolve +datadir=/data/mysql +socket=/var/lib/mysql/mysql.sock +secure-file-priv=/var/lib/mysql-files +character-set-server=utf8 +user=mysql +symbolic-links=0 +pid-file=/var/run/mysqld/mysqld.pid +sql_mode=ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION diff --git a/km-dist/docker/ui/dockerfile b/km-dist/docker/ui/dockerfile new file mode 100644 index 00000000..dfe605dc --- /dev/null +++ b/km-dist/docker/ui/dockerfile @@ -0,0 +1,5 @@ +FROM nginx:1.18 + +ADD pub.tar / +ADD knowstreaming.conf /etc/nginx/conf.d/ +RUN rm -rf /etc/nginx/conf.d/default.conf diff --git a/km-dist/docker/ui/knowstreaming.conf b/km-dist/docker/ui/knowstreaming.conf new file mode 100644 index 00000000..5b051595 --- /dev/null +++ b/km-dist/docker/ui/knowstreaming.conf @@ -0,0 +1,46 @@ +server { + listen 80; + server_name localhost; + gzip on; + gzip_buffers 16 8k; + gzip_comp_level 4; + gzip_http_version 1.0; + gzip_min_length 1280; + gzip_types text/plain text/css text/xml application/x-javascript application/xml application/xml+rss application/json application/javascript text/*; + gzip_vary on; + root /pub; + location / { + root /pub; + if ($request_filename ~* .*\.(?:htm|html|json)$) { + add_header Cache-Control "private, no-store, no-cache, must-revalidate, proxy-revalidate"; + } + try_files $uri /layout/index.html; + } + location ~* \.(json)$ { + add_header Cache-Control no-cache; + } + location @kmfallback { + } + #location ~ ^/(clusters|config|cluster|login) { + # rewrite ^.*$ /; + #} + location ~ ^/ks-km/api/v3 { + #rewrite ^/ks-km/api/v3/(.*)$ /ks-km/ks-km/api/v3/$1 break; + proxy_pass http://knowstreaming-manager; + #proxy_pass https://api-kylin-xg02.intra.xiaojukeji.com; + #proxy_cookie_path /ks-km/ /; + #proxy_set_header Host $host; + #proxy_set_header Referer $http_referer; + #proxy_set_header Cookie $http_cookie; + #proxy_set_header X-Real-Ip $remote_addr; + #proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location ~ ^/logi-security/api/v1 { + #rewrite ^/logi-security/api/v1/(.*)$ /ks-km/logi-security/api/v1/$1 break; + proxy_pass http://knowstreaming-manager; + #proxy_pass https://api-kylin-xg02.intra.xiaojukeji.com; + } + location ~ ^/(401|403|404|500){ + rewrite ^.*$ /; + } + } diff --git a/km-dist/helm/Chart.yaml b/km-dist/helm/Chart.yaml index 4b764af4..b5f5432c 100644 --- a/km-dist/helm/Chart.yaml +++ b/km-dist/helm/Chart.yaml @@ -4,13 +4,13 @@ description: knowstreaming-manager Helm chart type: application -version: 0.1.4 +version: 0.1.5 maintainers: - email: didicloud@didiglobal.com name: didicloud -appVersion: "3.0.0-beta.2" +appVersion: "3.0.0-beta.3" dependencies: - name: knowstreaming-web diff --git a/km-dist/helm/charts/ksmysql/templates/statefulset.yaml b/km-dist/helm/charts/ksmysql/templates/statefulset.yaml index a536f2f8..d61c506a 100644 --- a/km-dist/helm/charts/ksmysql/templates/statefulset.yaml +++ b/km-dist/helm/charts/ksmysql/templates/statefulset.yaml @@ -21,7 +21,7 @@ spec: {{- include "ksmysql.selectorLabels" . | nindent 8 }} spec: containers: - - image: knowstreaming/knowstreaming-mysql:0.2.0 + - image: knowstreaming/knowstreaming-mysql:latest name: {{ .Chart.Name }} env: - name: MYSQL_DATABASE diff --git a/km-dist/helm/values.yaml b/km-dist/helm/values.yaml index 2c422f61..293bd001 100644 --- a/km-dist/helm/values.yaml +++ b/km-dist/helm/values.yaml @@ -3,7 +3,7 @@ replicaCount: 2 image: repository: knowstreaming/knowstreaming-manager pullPolicy: IfNotPresent - tag: "0.2.0" + tag: "latest" imagePullSecrets: [] nameOverride: "" @@ -73,7 +73,7 @@ knowstreaming-web: image: repository: knowstreaming/knowstreaming-ui pullPolicy: IfNotPresent - tag: "0.2.0" + tag: "latest" service: type: NodePort diff --git a/km-dist/init/sql/ddl-ks-km.sql b/km-dist/init/sql/ddl-ks-km.sql index 50696917..d9e4e16c 100644 --- a/km-dist/init/sql/ddl-ks-km.sql +++ b/km-dist/init/sql/ddl-ks-km.sql @@ -257,6 +257,7 @@ CREATE TABLE `ks_km_physical_cluster` ( `kafka_version` varchar(32) NOT NULL DEFAULT '' COMMENT 'kafka版本', `client_properties` text COMMENT 'Kafka客户端配置', `jmx_properties` text COMMENT 'JMX配置', + `zk_properties` text COMMENT 'ZK配置', `description` text COMMENT '备注', `auth_type` int(11) NOT NULL DEFAULT '0' COMMENT '认证类型,-1未知,0:无认证,', `run_state` tinyint(4) NOT NULL DEFAULT '1' COMMENT '运行状态, 0表示未监控, 1监控中,有ZK,2:监控中,无ZK', diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/ESOpClient.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/ESOpClient.java index 1200699a..c70a4df6 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/ESOpClient.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/ESOpClient.java @@ -392,10 +392,7 @@ public class ESOpClient { return false; } - /** - * 创建索引模板 - */ - public boolean createIndexTemplateIfNotExist(String indexTemplateName, String config) { + public boolean templateExist(String indexTemplateName){ ESClient esClient = null; try { @@ -410,6 +407,29 @@ public class ESOpClient { if (null != templateConfig) { return true; } + } catch (Exception e) { + LOGGER.warn( "method=templateExist||indexTemplateName={}||msg=exception!", + indexTemplateName, e); + } finally { + if (esClient != null) { + this.returnESClientToPool(esClient); + } + } + + return false; + } + + /** + * 创建索引模板 + */ + public boolean createIndexTemplateIfNotExist(String indexTemplateName, String config) { + ESClient esClient = null; + + try { + esClient = this.getESClientFromPool(); + + //存在模板就返回,不存在就创建 + if(templateExist(indexTemplateName)){return true;} // 创建新的模板 ESIndicesPutTemplateResponse response = esClient.admin().indices().preparePutTemplate( indexTemplateName ) @@ -417,8 +437,7 @@ public class ESOpClient { return response.getAcknowledged(); } catch (Exception e) { - LOGGER.warn( - "class=ESOpClient||method=createIndexTemplateIfNotExist||indexTemplateName={}||config={}||msg=exception!", + LOGGER.warn( "method=createIndexTemplateIfNotExist||indexTemplateName={}||config={}||msg=exception!", indexTemplateName, config, e ); } finally { diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/package-info.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/package-info.java new file mode 100644 index 00000000..88139db3 --- /dev/null +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/package-info.java @@ -0,0 +1,4 @@ +/** + * 读取Kafka在ZK中存储的数据的包 + */ +package com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper; \ No newline at end of file diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/zk/KafkaZKDAO.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/service/KafkaZKDAO.java similarity index 97% rename from km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/zk/KafkaZKDAO.java rename to km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/service/KafkaZKDAO.java index 3e00e558..7a7d4b76 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/zk/KafkaZKDAO.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/service/KafkaZKDAO.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.know.streaming.km.persistence.zk; +package com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service; import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker; import com.xiaojukeji.know.streaming.km.common.bean.entity.kafkacontroller.KafkaController; diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/zk/impl/KafkaZKDAOImpl.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/service/impl/KafkaZKDAOImpl.java similarity index 90% rename from km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/zk/impl/KafkaZKDAOImpl.java rename to km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/service/impl/KafkaZKDAOImpl.java index 61a7bad0..82cb8130 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/zk/impl/KafkaZKDAOImpl.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/service/impl/KafkaZKDAOImpl.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.know.streaming.km.persistence.zk.impl; +package com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.impl; import com.alibaba.fastjson.JSON; import com.didiglobal.logi.log.ILog; @@ -11,11 +11,11 @@ import com.xiaojukeji.know.streaming.km.common.enums.topic.TopicTypeEnum; import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException; import com.xiaojukeji.know.streaming.km.common.exception.NotExistException; import com.xiaojukeji.know.streaming.km.common.utils.Tuple; -import com.xiaojukeji.know.streaming.km.common.zookeeper.znode.ControllerData; -import com.xiaojukeji.know.streaming.km.common.zookeeper.znode.brokers.BrokerMetadata; -import com.xiaojukeji.know.streaming.km.common.zookeeper.znode.brokers.PartitionMap; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.ControllerData; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.brokers.BrokerMetadata; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.brokers.PartitionMap; import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient; -import com.xiaojukeji.know.streaming.km.persistence.zk.KafkaZKDAO; +import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO; import kafka.utils.Json; import kafka.zk.*; import kafka.zookeeper.AsyncResponse; @@ -46,14 +46,14 @@ public class KafkaZKDAOImpl implements KafkaZKDAO { public Broker getBrokerMetadata(String zkAddress) throws KeeperException.NoNodeException, AdminOperateException { ZooKeeper zooKeeper = null; try { - zooKeeper = new ZooKeeper(zkAddress, 1000, watchedEvent -> logger.info(" receive event : " + watchedEvent.getType().name())); + zooKeeper = new ZooKeeper(zkAddress, 3000, watchedEvent -> logger.info(" receive event : " + watchedEvent.getType().name())); List brokerIdList = this.getChildren(zooKeeper, BrokerIdsZNode.path()); if (brokerIdList == null || brokerIdList.isEmpty()) { return null; } BrokerMetadata brokerMetadata = this.getData(zooKeeper, BrokerIdZNode.path(Integer.parseInt(brokerIdList.get(0))), false, BrokerMetadata.class); - return Broker.buildFrom(null, Integer.valueOf(brokerIdList.get(0)), brokerMetadata); + return this.convert2Broker(null, Integer.valueOf(brokerIdList.get(0)), brokerMetadata); } catch (KeeperException.NoNodeException nne) { logger.warn("method=getBrokerMetadata||zkAddress={}||errMsg=exception", zkAddress, nne); throw nne; @@ -79,7 +79,7 @@ public class KafkaZKDAOImpl implements KafkaZKDAO { try { BrokerMetadata metadata = this.getData(kafkaZkClient.currentZooKeeper(), BrokerIdZNode.path(brokerId), false, BrokerMetadata.class); BrokerMetadata.parseAndUpdateBrokerMetadata(metadata); - return Broker.buildFrom(clusterPhyId, brokerId, metadata); + return this.convert2Broker(clusterPhyId, brokerId, metadata); } catch (KeeperException ke) { logger.error("method=getBrokerMetadata||clusterPhyId={}||brokerId={}||errMsg=exception", clusterPhyId, brokerId, ke); throw ke; @@ -269,4 +269,18 @@ public class KafkaZKDAOImpl implements KafkaZKDAO { byte[] bytes = zooKeeper.getData(path, addWatch, null); return JSON.parseObject(bytes, clazz); } + + private Broker convert2Broker(Long clusterPhyId, Integer brokerId, BrokerMetadata brokerMetadata) { + Broker metadata = new Broker(); + metadata.setClusterPhyId(clusterPhyId); + metadata.setBrokerId(brokerId); + metadata.setHost(brokerMetadata.getHost()); + metadata.setPort(brokerMetadata.getPort()); + metadata.setJmxPort(brokerMetadata.getJmxPort()); + metadata.setStartTimestamp(brokerMetadata.getTimestamp()); + metadata.setRack(brokerMetadata.getRack()); + metadata.setStatus(1); + metadata.setEndpointMap(brokerMetadata.getEndpointMap()); + return metadata; + } } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/ControllerData.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/ControllerData.java similarity index 81% rename from km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/ControllerData.java rename to km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/ControllerData.java index f69c6862..afc7f55b 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/ControllerData.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/ControllerData.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.know.streaming.km.common.zookeeper.znode; +package com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode; import lombok.AllArgsConstructor; import lombok.Data; diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/brokers/BrokerMetadata.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/brokers/BrokerMetadata.java similarity index 97% rename from km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/brokers/BrokerMetadata.java rename to km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/brokers/BrokerMetadata.java index 480867af..3b252c5f 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/brokers/BrokerMetadata.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/brokers/BrokerMetadata.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.know.streaming.km.common.zookeeper.znode.brokers; +package com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.brokers; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/brokers/PartitionMap.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/brokers/PartitionMap.java similarity index 91% rename from km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/brokers/PartitionMap.java rename to km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/brokers/PartitionMap.java index bf1fbd1a..4bc36cac 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/brokers/PartitionMap.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/brokers/PartitionMap.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.know.streaming.km.common.zookeeper.znode.brokers; +package com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.brokers; import lombok.AllArgsConstructor; import lombok.Data; diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/brokers/PartitionState.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/brokers/PartitionState.java similarity index 93% rename from km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/brokers/PartitionState.java rename to km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/brokers/PartitionState.java index 60ae4307..47be5cb9 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/brokers/PartitionState.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/brokers/PartitionState.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.know.streaming.km.common.zookeeper.znode.brokers; +package com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.brokers; import com.fasterxml.jackson.annotation.JsonProperty; import lombok.AllArgsConstructor; diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/brokers/TopicMetadata.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/brokers/TopicMetadata.java similarity index 91% rename from km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/brokers/TopicMetadata.java rename to km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/brokers/TopicMetadata.java index 803a5e29..f84c8fcf 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/brokers/TopicMetadata.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/brokers/TopicMetadata.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.know.streaming.km.common.zookeeper.znode.brokers; +package com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.brokers; import lombok.AllArgsConstructor; import lombok.Data; diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/config/ConfigChangeNotificationBaseData.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/config/ConfigChangeNotificationBaseData.java similarity index 77% rename from km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/config/ConfigChangeNotificationBaseData.java rename to km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/config/ConfigChangeNotificationBaseData.java index 86a3abe9..09ffee10 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/config/ConfigChangeNotificationBaseData.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/config/ConfigChangeNotificationBaseData.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.know.streaming.km.common.zookeeper.znode.config; +package com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.config; import lombok.AllArgsConstructor; import lombok.Data; diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/config/ConfigChangeNotificationDataV1.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/config/ConfigChangeNotificationDataV1.java similarity index 86% rename from km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/config/ConfigChangeNotificationDataV1.java rename to km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/config/ConfigChangeNotificationDataV1.java index 75598e65..1853b940 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/config/ConfigChangeNotificationDataV1.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/config/ConfigChangeNotificationDataV1.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.know.streaming.km.common.zookeeper.znode.config; +package com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.config; import com.fasterxml.jackson.annotation.JsonProperty; import lombok.AllArgsConstructor; diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/config/ConfigChangeNotificationDataV2.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/config/ConfigChangeNotificationDataV2.java similarity index 90% rename from km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/config/ConfigChangeNotificationDataV2.java rename to km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/config/ConfigChangeNotificationDataV2.java index 6b0d8806..5e6024fa 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/config/ConfigChangeNotificationDataV2.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/config/ConfigChangeNotificationDataV2.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.know.streaming.km.common.zookeeper.znode.config; +package com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.config; import com.fasterxml.jackson.annotation.JsonProperty; import lombok.AllArgsConstructor; diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/config/ConfigNodeData.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/config/ConfigNodeData.java similarity index 81% rename from km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/config/ConfigNodeData.java rename to km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/config/ConfigNodeData.java index 13132b4f..287912dc 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/zookeeper/znode/config/ConfigNodeData.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/config/ConfigNodeData.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.know.streaming.km.common.zookeeper.znode.config; +package com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.config; import lombok.AllArgsConstructor; import lombok.Data; diff --git a/km-persistence/src/main/resources/mybatis/ClusterPhyMapper.xml b/km-persistence/src/main/resources/mybatis/ClusterPhyMapper.xml index 25e16526..ca99b102 100644 --- a/km-persistence/src/main/resources/mybatis/ClusterPhyMapper.xml +++ b/km-persistence/src/main/resources/mybatis/ClusterPhyMapper.xml @@ -13,6 +13,7 @@ + @@ -23,8 +24,8 @@ useGeneratedKeys="true" keyProperty="id"> INSERT INTO ks_km_physical_cluster - (name, zookeeper, bootstrap_servers, kafka_version, client_properties, jmx_properties, description, auth_type, run_state) + (name, zookeeper, bootstrap_servers, kafka_version, client_properties, jmx_properties, zk_properties, description, auth_type, run_state) VALUES - (#{name}, #{zookeeper}, #{bootstrapServers}, #{kafkaVersion}, #{clientProperties}, #{jmxProperties}, #{description}, #{authType}, #{runState}) + (#{name}, #{zookeeper}, #{bootstrapServers}, #{kafkaVersion}, #{clientProperties}, #{jmxProperties}, #{zkProperties}, #{description}, #{authType}, #{runState}) \ No newline at end of file diff --git a/km-persistence/src/main/resources/mybatis/ReassignJob.xml b/km-persistence/src/main/resources/mybatis/ReassignJob.xml index 5966b418..d1dcd80d 100644 --- a/km-persistence/src/main/resources/mybatis/ReassignJob.xml +++ b/km-persistence/src/main/resources/mybatis/ReassignJob.xml @@ -19,8 +19,8 @@ INSERT INTO ks_km_reassign_job - (id, cluster_phy_id, reassignment_json, description, throttle_unit_byte, start_time, finished_time, creator, status) + (id, cluster_phy_id, reassignment_json, description, throttle_unit_byte, creator, status) VALUES - (#{id}, #{clusterPhyId}, #{reassignmentJson}, #{description}, #{throttleUnitByte}, #{startTime}, #{finishedTime}, #{creator}, #{status}) + (#{id}, #{clusterPhyId}, #{reassignmentJson}, #{description}, #{throttleUnitByte}, #{creator}, #{status}) \ No newline at end of file diff --git a/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/version/VersionController.java b/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/version/VersionController.java index 093d597a..52cc4807 100644 --- a/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/version/VersionController.java +++ b/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/version/VersionController.java @@ -11,9 +11,11 @@ import com.xiaojukeji.know.streaming.km.common.constant.Constant; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.validation.annotation.Validated; import org.springframework.web.bind.annotation.*; import javax.servlet.http.HttpServletRequest; +import javax.validation.Valid; import java.util.List; import java.util.Map; import java.util.SortedMap; @@ -67,7 +69,7 @@ public class VersionController { @PostMapping(value = "clusters/{clusterId}/types/{type}/user-metric-config") @ResponseBody public Result updateUserMetricItem(@PathVariable Long clusterId, @PathVariable Integer type, - @RequestBody UserMetricConfigDTO userMetricConfigDTO, HttpServletRequest request){ + @Validated @RequestBody UserMetricConfigDTO userMetricConfigDTO, HttpServletRequest request) { return versionControlManager.updateUserMetricItem(clusterId, type, userMetricConfigDTO, HttpRequestUtil.getOperator(request)); } } diff --git a/km-task/pom.xml b/km-task/pom.xml index 502d806d..d07b37b3 100644 --- a/km-task/pom.xml +++ b/km-task/pom.xml @@ -43,7 +43,21 @@ io.github.zqrferrari logi-job-spring-boot-starter + + + oshi-core + com.github.oshi + + + + + + com.github.oshi + oshi-core + 5.6.1 + + io.github.zqrferrari logi-security-spring-boot-starter diff --git a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/HealthCheckTask.java b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/BrokerHealthCheckTask.java similarity index 76% rename from km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/HealthCheckTask.java rename to km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/BrokerHealthCheckTask.java index 6af11e96..7b611823 100644 --- a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/HealthCheckTask.java +++ b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/BrokerHealthCheckTask.java @@ -9,36 +9,38 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy; import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.BaseClusterHealthConfig; import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult; import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam; -import com.xiaojukeji.know.streaming.km.common.component.SpringTool; import com.xiaojukeji.know.streaming.km.common.constant.Constant; import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum; import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; -import com.xiaojukeji.know.streaming.km.core.service.health.checkresult.HealthCheckResultService; import com.xiaojukeji.know.streaming.km.core.service.health.checker.AbstractHealthCheckService; +import com.xiaojukeji.know.streaming.km.core.service.health.checker.broker.HealthCheckBrokerService; +import com.xiaojukeji.know.streaming.km.core.service.health.checkresult.HealthCheckResultService; import com.xiaojukeji.know.streaming.km.task.metrics.AbstractAsyncMetricsDispatchTask; import lombok.AllArgsConstructor; import lombok.NoArgsConstructor; import org.springframework.beans.factory.annotation.Autowired; -import java.util.*; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.Map; @NoArgsConstructor @AllArgsConstructor -@Task(name = "HealthCheckTask", - description = "健康检查", +@Task(name = "BrokerHealthCheckTask", + description = "Broker健康检查", cron = "0 0/1 * * * ? *", autoRegister = true, consensual = ConsensualEnum.BROADCAST, timeout = 2 * 60) -public class HealthCheckTask extends AbstractAsyncMetricsDispatchTask { - private static final ILog log = LogFactory.getLog(HealthCheckTask.class); +public class BrokerHealthCheckTask extends AbstractAsyncMetricsDispatchTask { + private static final ILog log = LogFactory.getLog(BrokerHealthCheckTask.class); @Autowired private HealthCheckResultService healthCheckResultService; - private final List healthCheckServiceList = new ArrayList<>( - SpringTool.getBeansOfType(AbstractHealthCheckService.class).values() - ); + @Autowired + private HealthCheckBrokerService healthCheckBrokerService; @Override public TaskResult processClusterTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) { @@ -53,25 +55,22 @@ public class HealthCheckTask extends AbstractAsyncMetricsDispatchTask { List resultList = new ArrayList<>(); // 遍历Check-Service - for (AbstractHealthCheckService healthCheckService: healthCheckServiceList) { - List paramList = healthCheckService.getResList(clusterPhy.getId()); - if (ValidateUtils.isEmptyList(paramList)) { - // 当前无该维度的资源,则直接设置为 - resultList.addAll(this.getNoResResult(clusterPhy.getId(), healthCheckService, healthConfigMap)); - continue; - } + List paramList = healthCheckBrokerService.getResList(clusterPhy.getId()); + if (ValidateUtils.isEmptyList(paramList)) { + // 当前无该维度的资源,则直接设置为 + resultList.addAll(this.getNoResResult(clusterPhy.getId(), healthCheckBrokerService, healthConfigMap)); + } - // 遍历资源 - for (ClusterPhyParam clusterPhyParam: paramList) { - resultList.addAll(this.checkAndGetResult(healthCheckService, clusterPhyParam, healthConfigMap)); - } + // 遍历资源 + for (ClusterPhyParam clusterPhyParam: paramList) { + resultList.addAll(this.checkAndGetResult(healthCheckBrokerService, clusterPhyParam, healthConfigMap)); } for (HealthCheckResult checkResult: resultList) { try { healthCheckResultService.replace(checkResult); } catch (Exception e) { - log.error("method=processSubTask||clusterPhyId={}||checkResult={}||errMsg=exception!", clusterPhy.getId(), checkResult, e); + log.error("class=BrokerHealthCheckTask||method=processSubTask||clusterPhyId={}||checkResult={}||errMsg=exception!", clusterPhy.getId(), checkResult, e); } } @@ -79,7 +78,7 @@ public class HealthCheckTask extends AbstractAsyncMetricsDispatchTask { try { healthCheckResultService.deleteByUpdateTimeBeforeInDB(clusterPhy.getId(), new Date(triggerTimeUnitMs - 10 * 60 * 1000)); } catch (Exception e) { - log.error("method=processSubTask||clusterPhyId={}||errMsg=exception!", clusterPhy.getId(), e); + log.error("class=BrokerHealthCheckTask||method=processSubTask||clusterPhyId={}||errMsg=exception!", clusterPhy.getId(), e); } return TaskResult.SUCCESS; diff --git a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/ClusterHealthCheckTask.java b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/ClusterHealthCheckTask.java new file mode 100644 index 00000000..cb7f78b2 --- /dev/null +++ b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/ClusterHealthCheckTask.java @@ -0,0 +1,130 @@ +package com.xiaojukeji.know.streaming.km.task.health; + +import com.didiglobal.logi.job.annotation.Task; +import com.didiglobal.logi.job.common.TaskResult; +import com.didiglobal.logi.job.core.consensual.ConsensualEnum; +import com.didiglobal.logi.log.ILog; +import com.didiglobal.logi.log.LogFactory; +import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy; +import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.BaseClusterHealthConfig; +import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam; +import com.xiaojukeji.know.streaming.km.common.constant.Constant; +import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum; +import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; +import com.xiaojukeji.know.streaming.km.core.service.health.checker.AbstractHealthCheckService; +import com.xiaojukeji.know.streaming.km.core.service.health.checker.cluster.HealthCheckClusterService; +import com.xiaojukeji.know.streaming.km.core.service.health.checkresult.HealthCheckResultService; +import com.xiaojukeji.know.streaming.km.task.metrics.AbstractAsyncMetricsDispatchTask; +import lombok.AllArgsConstructor; +import lombok.NoArgsConstructor; +import org.springframework.beans.factory.annotation.Autowired; + +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.Map; + +@NoArgsConstructor +@AllArgsConstructor +@Task(name = "ClusterHealthCheckTask", + description = "Cluster健康检查", + cron = "0 0/1 * * * ? *", + autoRegister = true, + consensual = ConsensualEnum.BROADCAST, + timeout = 2 * 60) +public class ClusterHealthCheckTask extends AbstractAsyncMetricsDispatchTask { + private static final ILog log = LogFactory.getLog(ClusterHealthCheckTask.class); + + @Autowired + private HealthCheckResultService healthCheckResultService; + + @Autowired + private HealthCheckClusterService healthCheckClusterService; + + @Override + public TaskResult processClusterTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) { + return this.calAndUpdateHealthCheckResult(clusterPhy, triggerTimeUnitMs); + } + + private TaskResult calAndUpdateHealthCheckResult(ClusterPhy clusterPhy, long triggerTimeUnitMs) { + // 获取配置,<配置名,配置信息> + Map healthConfigMap = healthCheckResultService.getClusterHealthConfig(clusterPhy.getId()); + + // 检查结果 + List resultList = new ArrayList<>(); + + // 遍历Check-Service + List paramList = healthCheckClusterService.getResList(clusterPhy.getId()); + if (ValidateUtils.isEmptyList(paramList)) { + // 当前无该维度的资源,则直接设置为 + resultList.addAll(this.getNoResResult(clusterPhy.getId(), healthCheckClusterService, healthConfigMap)); + } + + // 遍历资源 + for (ClusterPhyParam clusterPhyParam: paramList) { + resultList.addAll(this.checkAndGetResult(healthCheckClusterService, clusterPhyParam, healthConfigMap)); + } + + for (HealthCheckResult checkResult: resultList) { + try { + healthCheckResultService.replace(checkResult); + } catch (Exception e) { + log.error("class=ClusterHealthCheckTask||method=processSubTask||clusterPhyId={}||checkResult={}||errMsg=exception!", clusterPhy.getId(), checkResult, e); + } + } + + // 删除10分钟之前的检查结果 + try { + healthCheckResultService.deleteByUpdateTimeBeforeInDB(clusterPhy.getId(), new Date(triggerTimeUnitMs - 10 * 60 * 1000)); + } catch (Exception e) { + log.error("class=ClusterHealthCheckTask||method=processSubTask||clusterPhyId={}||errMsg=exception!", clusterPhy.getId(), e); + } + + return TaskResult.SUCCESS; + } + + private List getNoResResult(Long clusterPhyId, AbstractHealthCheckService healthCheckService, Map healthConfigMap) { + List resultList = new ArrayList<>(); + + // 进行检查 + for (BaseClusterHealthConfig clusterHealthConfig: healthConfigMap.values()) { + HealthCheckDimensionEnum dimensionEnum = healthCheckService.getHealthCheckDimensionEnum(); + if (!clusterHealthConfig.getCheckNameEnum().getDimensionEnum().equals(dimensionEnum)) { + // 类型不匹配 + continue; + } + + // 记录 + HealthCheckResult checkResult = new HealthCheckResult( + dimensionEnum.getDimension(), + clusterHealthConfig.getCheckNameEnum().getConfigName(), + clusterPhyId, + "-1" + ); + checkResult.setPassed(Constant.YES); + resultList.add(checkResult); + } + + return resultList; + } + + private List checkAndGetResult(AbstractHealthCheckService healthCheckService, + ClusterPhyParam clusterPhyParam, + Map healthConfigMap) { + List resultList = new ArrayList<>(); + + // 进行检查 + for (BaseClusterHealthConfig clusterHealthConfig: healthConfigMap.values()) { + HealthCheckResult healthCheckResult = healthCheckService.checkAndGetResult(clusterPhyParam, clusterHealthConfig); + if (healthCheckResult == null) { + continue; + } + + // 记录 + resultList.add(healthCheckResult); + } + + return resultList; + } +} diff --git a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/GroupHealthCheckTask.java b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/GroupHealthCheckTask.java new file mode 100644 index 00000000..581a679a --- /dev/null +++ b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/GroupHealthCheckTask.java @@ -0,0 +1,130 @@ +package com.xiaojukeji.know.streaming.km.task.health; + +import com.didiglobal.logi.job.annotation.Task; +import com.didiglobal.logi.job.common.TaskResult; +import com.didiglobal.logi.job.core.consensual.ConsensualEnum; +import com.didiglobal.logi.log.ILog; +import com.didiglobal.logi.log.LogFactory; +import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy; +import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.BaseClusterHealthConfig; +import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam; +import com.xiaojukeji.know.streaming.km.common.constant.Constant; +import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum; +import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; +import com.xiaojukeji.know.streaming.km.core.service.health.checker.AbstractHealthCheckService; +import com.xiaojukeji.know.streaming.km.core.service.health.checker.group.HealthCheckGroupService; +import com.xiaojukeji.know.streaming.km.core.service.health.checkresult.HealthCheckResultService; +import com.xiaojukeji.know.streaming.km.task.metrics.AbstractAsyncMetricsDispatchTask; +import lombok.AllArgsConstructor; +import lombok.NoArgsConstructor; +import org.springframework.beans.factory.annotation.Autowired; + +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.Map; + +@NoArgsConstructor +@AllArgsConstructor +@Task(name = "GroupHealthCheckTask", + description = "Group健康检查", + cron = "0 0/1 * * * ? *", + autoRegister = true, + consensual = ConsensualEnum.BROADCAST, + timeout = 2 * 60) +public class GroupHealthCheckTask extends AbstractAsyncMetricsDispatchTask { + private static final ILog log = LogFactory.getLog(GroupHealthCheckTask.class); + + @Autowired + private HealthCheckResultService healthCheckResultService; + + @Autowired + private HealthCheckGroupService healthCheckGroupService; + + @Override + public TaskResult processClusterTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) { + return this.calAndUpdateHealthCheckResult(clusterPhy, triggerTimeUnitMs); + } + + private TaskResult calAndUpdateHealthCheckResult(ClusterPhy clusterPhy, long triggerTimeUnitMs) { + // 获取配置,<配置名,配置信息> + Map healthConfigMap = healthCheckResultService.getClusterHealthConfig(clusterPhy.getId()); + + // 检查结果 + List resultList = new ArrayList<>(); + + // 遍历Check-Service + List paramList = healthCheckGroupService.getResList(clusterPhy.getId()); + if (ValidateUtils.isEmptyList(paramList)) { + // 当前无该维度的资源,则直接设置为 + resultList.addAll(this.getNoResResult(clusterPhy.getId(), healthCheckGroupService, healthConfigMap)); + } + + // 遍历资源 + for (ClusterPhyParam clusterPhyParam: paramList) { + resultList.addAll(this.checkAndGetResult(healthCheckGroupService, clusterPhyParam, healthConfigMap)); + } + + for (HealthCheckResult checkResult: resultList) { + try { + healthCheckResultService.replace(checkResult); + } catch (Exception e) { + log.error("class=GroupHealthCheckTask||method=processSubTask||clusterPhyId={}||checkResult={}||errMsg=exception!", clusterPhy.getId(), checkResult, e); + } + } + + // 删除10分钟之前的检查结果 + try { + healthCheckResultService.deleteByUpdateTimeBeforeInDB(clusterPhy.getId(), new Date(triggerTimeUnitMs - 10 * 60 * 1000)); + } catch (Exception e) { + log.error("class=GroupHealthCheckTask||method=processSubTask||clusterPhyId={}||errMsg=exception!", clusterPhy.getId(), e); + } + + return TaskResult.SUCCESS; + } + + private List getNoResResult(Long clusterPhyId, AbstractHealthCheckService healthCheckService, Map healthConfigMap) { + List resultList = new ArrayList<>(); + + // 进行检查 + for (BaseClusterHealthConfig clusterHealthConfig: healthConfigMap.values()) { + HealthCheckDimensionEnum dimensionEnum = healthCheckService.getHealthCheckDimensionEnum(); + if (!clusterHealthConfig.getCheckNameEnum().getDimensionEnum().equals(dimensionEnum)) { + // 类型不匹配 + continue; + } + + // 记录 + HealthCheckResult checkResult = new HealthCheckResult( + dimensionEnum.getDimension(), + clusterHealthConfig.getCheckNameEnum().getConfigName(), + clusterPhyId, + "-1" + ); + checkResult.setPassed(Constant.YES); + resultList.add(checkResult); + } + + return resultList; + } + + private List checkAndGetResult(AbstractHealthCheckService healthCheckService, + ClusterPhyParam clusterPhyParam, + Map healthConfigMap) { + List resultList = new ArrayList<>(); + + // 进行检查 + for (BaseClusterHealthConfig clusterHealthConfig: healthConfigMap.values()) { + HealthCheckResult healthCheckResult = healthCheckService.checkAndGetResult(clusterPhyParam, clusterHealthConfig); + if (healthCheckResult == null) { + continue; + } + + // 记录 + resultList.add(healthCheckResult); + } + + return resultList; + } +} diff --git a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/TopicHealthCheckTask.java b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/TopicHealthCheckTask.java new file mode 100644 index 00000000..8badae99 --- /dev/null +++ b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/health/TopicHealthCheckTask.java @@ -0,0 +1,130 @@ +package com.xiaojukeji.know.streaming.km.task.health; + +import com.didiglobal.logi.job.annotation.Task; +import com.didiglobal.logi.job.common.TaskResult; +import com.didiglobal.logi.job.core.consensual.ConsensualEnum; +import com.didiglobal.logi.log.ILog; +import com.didiglobal.logi.log.LogFactory; +import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy; +import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.BaseClusterHealthConfig; +import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam; +import com.xiaojukeji.know.streaming.km.common.constant.Constant; +import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum; +import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; +import com.xiaojukeji.know.streaming.km.core.service.health.checker.AbstractHealthCheckService; +import com.xiaojukeji.know.streaming.km.core.service.health.checker.topic.HealthCheckTopicService; +import com.xiaojukeji.know.streaming.km.core.service.health.checkresult.HealthCheckResultService; +import com.xiaojukeji.know.streaming.km.task.metrics.AbstractAsyncMetricsDispatchTask; +import lombok.AllArgsConstructor; +import lombok.NoArgsConstructor; +import org.springframework.beans.factory.annotation.Autowired; + +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.Map; + +@NoArgsConstructor +@AllArgsConstructor +@Task(name = "TopicHealthCheckTask", + description = "Topic健康检查", + cron = "0 0/1 * * * ? *", + autoRegister = true, + consensual = ConsensualEnum.BROADCAST, + timeout = 2 * 60) +public class TopicHealthCheckTask extends AbstractAsyncMetricsDispatchTask { + private static final ILog log = LogFactory.getLog(TopicHealthCheckTask.class); + + @Autowired + private HealthCheckResultService healthCheckResultService; + + @Autowired + private HealthCheckTopicService healthCheckTopicService; + + @Override + public TaskResult processClusterTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) { + return this.calAndUpdateHealthCheckResult(clusterPhy, triggerTimeUnitMs); + } + + private TaskResult calAndUpdateHealthCheckResult(ClusterPhy clusterPhy, long triggerTimeUnitMs) { + // 获取配置,<配置名,配置信息> + Map healthConfigMap = healthCheckResultService.getClusterHealthConfig(clusterPhy.getId()); + + // 检查结果 + List resultList = new ArrayList<>(); + + // 遍历Check-Service + List paramList = healthCheckTopicService.getResList(clusterPhy.getId()); + if (ValidateUtils.isEmptyList(paramList)) { + // 当前无该维度的资源,则直接设置为 + resultList.addAll(this.getNoResResult(clusterPhy.getId(), healthCheckTopicService, healthConfigMap)); + } + + // 遍历资源 + for (ClusterPhyParam clusterPhyParam: paramList) { + resultList.addAll(this.checkAndGetResult(healthCheckTopicService, clusterPhyParam, healthConfigMap)); + } + + for (HealthCheckResult checkResult: resultList) { + try { + healthCheckResultService.replace(checkResult); + } catch (Exception e) { + log.error("class=TopicHealthCheckTask||method=processSubTask||clusterPhyId={}||checkResult={}||errMsg=exception!", clusterPhy.getId(), checkResult, e); + } + } + + // 删除10分钟之前的检查结果 + try { + healthCheckResultService.deleteByUpdateTimeBeforeInDB(clusterPhy.getId(), new Date(triggerTimeUnitMs - 10 * 60 * 1000)); + } catch (Exception e) { + log.error("class=TopicHealthCheckTask||method=processSubTask||clusterPhyId={}||errMsg=exception!", clusterPhy.getId(), e); + } + + return TaskResult.SUCCESS; + } + + private List getNoResResult(Long clusterPhyId, AbstractHealthCheckService healthCheckService, Map healthConfigMap) { + List resultList = new ArrayList<>(); + + // 进行检查 + for (BaseClusterHealthConfig clusterHealthConfig: healthConfigMap.values()) { + HealthCheckDimensionEnum dimensionEnum = healthCheckService.getHealthCheckDimensionEnum(); + if (!clusterHealthConfig.getCheckNameEnum().getDimensionEnum().equals(dimensionEnum)) { + // 类型不匹配 + continue; + } + + // 记录 + HealthCheckResult checkResult = new HealthCheckResult( + dimensionEnum.getDimension(), + clusterHealthConfig.getCheckNameEnum().getConfigName(), + clusterPhyId, + "-1" + ); + checkResult.setPassed(Constant.YES); + resultList.add(checkResult); + } + + return resultList; + } + + private List checkAndGetResult(AbstractHealthCheckService healthCheckService, + ClusterPhyParam clusterPhyParam, + Map healthConfigMap) { + List resultList = new ArrayList<>(); + + // 进行检查 + for (BaseClusterHealthConfig clusterHealthConfig: healthConfigMap.values()) { + HealthCheckResult healthCheckResult = healthCheckService.checkAndGetResult(clusterPhyParam, clusterHealthConfig); + if (healthCheckResult == null) { + continue; + } + + // 记录 + resultList.add(healthCheckResult); + } + + return resultList; + } +} diff --git a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/metadata/SyncKafkaGroupTask.java b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/metadata/SyncKafkaGroupTask.java index 65b64b96..e2f749fe 100644 --- a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/metadata/SyncKafkaGroupTask.java +++ b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/metadata/SyncKafkaGroupTask.java @@ -12,6 +12,7 @@ import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException; import com.xiaojukeji.know.streaming.km.common.exception.NotExistException; import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; import com.xiaojukeji.know.streaming.km.core.service.group.GroupService; +import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService; import org.apache.kafka.clients.admin.*; import org.apache.kafka.common.TopicPartition; import org.springframework.beans.factory.annotation.Autowired; @@ -32,16 +33,14 @@ public class SyncKafkaGroupTask extends AbstractAsyncMetadataDispatchTask { @Autowired private GroupService groupService; + @Autowired + private TopicService topicService; + @Override public TaskResult processClusterTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception { - TaskResult tr = TaskResult.SUCCESS; List groupNameList = groupService.listGroupsFromKafka(clusterPhy.getId()); - for (String groupName: groupNameList) { - if (!TaskResult.SUCCESS.equals(this.updateGroupMembersTask(clusterPhy, groupName, triggerTimeUnitMs))) { - tr = TaskResult.FAIL; - } - } + TaskResult tr = updateGroupMembersTask(clusterPhy, groupNameList, triggerTimeUnitMs); if (!TaskResult.SUCCESS.equals(tr)) { return tr; @@ -53,19 +52,25 @@ public class SyncKafkaGroupTask extends AbstractAsyncMetadataDispatchTask { return tr; } - private TaskResult updateGroupMembersTask(ClusterPhy clusterPhy, String groupName, long triggerTimeUnitMs) { - try { - List poList = this.getGroupMembers(clusterPhy.getId(), groupName, new Date(triggerTimeUnitMs)); - for (GroupMemberPO po: poList) { - groupService.replaceDBData(po); + + private TaskResult updateGroupMembersTask(ClusterPhy clusterPhy, List groupNameList, long triggerTimeUnitMs) { + List groupMemberPOList = new ArrayList<>(); + TaskResult tr = TaskResult.SUCCESS; + + for (String groupName : groupNameList) { + try { + List poList = this.getGroupMembers(clusterPhy.getId(), groupName, new Date(triggerTimeUnitMs)); + groupMemberPOList.addAll(poList); + } catch (Exception e) { + log.error("method=updateGroupMembersTask||clusterPhyId={}||groupName={}||errMsg=exception", clusterPhy.getId(), groupName, e); + tr = TaskResult.FAIL; } - } catch (Exception e) { - log.error("method=updateGroupMembersTask||clusterPhyId={}||groupName={}||errMsg={}", clusterPhy.getId(), groupName, e.getMessage()); - - return TaskResult.FAIL; } - - return TaskResult.SUCCESS; + + groupMemberPOList = this.filterGroupIfTopicNotExist(clusterPhy.getId(), groupMemberPOList); + groupService.batchReplace(groupMemberPOList); + + return tr; } private List getGroupMembers(Long clusterPhyId, String groupName, Date updateTime) throws NotExistException, AdminOperateException { @@ -73,7 +78,7 @@ public class SyncKafkaGroupTask extends AbstractAsyncMetadataDispatchTask { // 获取消费组消费过哪些Topic Map offsetMap = groupService.getGroupOffset(clusterPhyId, groupName); - for (TopicPartition topicPartition: offsetMap.keySet()) { + for (TopicPartition topicPartition : offsetMap.keySet()) { GroupMemberPO po = groupMap.get(topicPartition.topic()); if (po == null) { po = new GroupMemberPO(clusterPhyId, topicPartition.topic(), groupName, updateTime); @@ -96,7 +101,7 @@ public class SyncKafkaGroupTask extends AbstractAsyncMetadataDispatchTask { } Set topicNameSet = partitionList.stream().map(elem -> elem.topic()).collect(Collectors.toSet()); - for (String topicName: topicNameSet) { + for (String topicName : topicNameSet) { groupMap.putIfAbsent(topicName, new GroupMemberPO(clusterPhyId, topicName, groupName, updateTime)); GroupMemberPO po = groupMap.get(topicName); @@ -114,4 +119,17 @@ public class SyncKafkaGroupTask extends AbstractAsyncMetadataDispatchTask { return new ArrayList<>(groupMap.values()); } + + private List filterGroupIfTopicNotExist(Long clusterPhyId, List poList) { + if (poList.isEmpty()) { + return poList; + } + + // 集群Topic集合 + Set dbTopicSet = topicService.listTopicsFromDB(clusterPhyId).stream().map(elem -> elem.getTopicName()).collect(Collectors.toSet()); + dbTopicSet.add(""); //兼容没有消费Topic的group + + // 过滤Topic不存在的消费组 + return poList.stream().filter(elem -> dbTopicSet.contains(elem.getTopicName())).collect(Collectors.toList()); + } } diff --git a/pom.xml b/pom.xml index e30add73..a168b74c 100644 --- a/pom.xml +++ b/pom.xml @@ -15,7 +15,7 @@ - 3.0.0-beta.3 + 3.1.0 8 8 @@ -230,6 +230,19 @@ io.github.zqrferrari logi-job-spring-boot-starter 1.0.23 + + + oshi-core + com.github.oshi + + + + + + + com.github.oshi + oshi-core + 5.6.1