Compare commits
23 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c56d8cfb0f | ||
|
|
c27786a257 | ||
|
|
81910d1958 | ||
|
|
55d5fc4bde | ||
|
|
f30586b150 | ||
|
|
37037c19f0 | ||
|
|
1a5e2c7309 | ||
|
|
941dd4fd65 | ||
|
|
5f6df3681c | ||
|
|
7d045dbf05 | ||
|
|
4ff4accdc3 | ||
|
|
bbe967c4a8 | ||
|
|
b101cec6fa | ||
|
|
e98ec562a2 | ||
|
|
0e71ecc587 | ||
|
|
0f11a65df8 | ||
|
|
da00c8c877 | ||
|
|
8b177877bb | ||
|
|
ea199dca8d | ||
|
|
88b5833f77 | ||
|
|
127b5be651 | ||
|
|
80f001cdd5 | ||
|
|
30d297cae1 |
@@ -1,4 +1,39 @@
|
||||
|
||||
## v3.1.0
|
||||
|
||||
**Bug修复**
|
||||
- 修复重置 Group Offset 的提示信息中,缺少Dead状态也可进行重置的描述;
|
||||
- 修复新建 Topic 后,立即查看 Topic Messages 信息时,会提示 Topic 不存在的问题;
|
||||
- 修复副本变更时,优先副本选举未被正常处罚执行的问题;
|
||||
- 修复 git 目录不存在时,打包不能正常进行的问题;
|
||||
- 修复 KRaft 模式的 Kafka 集群,JMX PORT 显示 -1 的问题;
|
||||
|
||||
|
||||
**体验优化**
|
||||
- 优化Cluster、Broker、Topic、Group的健康分为健康状态;
|
||||
- 去除健康巡检配置中的权重信息;
|
||||
- 错误提示页面展示优化;
|
||||
- 前端打包编译依赖默认使用 taobao 镜像;
|
||||
- 重新设计优化导航栏的 icon ;
|
||||
|
||||
|
||||
**新增**
|
||||
- 个人头像下拉信息中,新增产品版本信息;
|
||||
- 多集群列表页面,新增集群健康状态分布信息;
|
||||
|
||||
|
||||
**Kafka ZK 部分 (v3.1.0版本正式发布)**
|
||||
- 新增 ZK 集群的指标大盘信息;
|
||||
- 新增 ZK 集群的服务状态概览信息;
|
||||
- 新增 ZK 集群的服务节点列表信息;
|
||||
- 新增 Kafka 在 ZK 的存储数据查看功能;
|
||||
- 新增 ZK 的健康巡检及健康状态计算;
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## v3.0.1
|
||||
|
||||
**Bug修复**
|
||||
|
||||
@@ -8,7 +8,19 @@
|
||||
|
||||
暂无
|
||||
|
||||
### 6.2.1、升级至 `v3.0.1` 版本
|
||||
### 6.2.1、升级至 `v3.1.0` 版本
|
||||
|
||||
```sql
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_BRAIN_SPLIT', '{ \"value\": 1} ', 'ZK 脑裂', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_OUTSTANDING_REQUESTS', '{ \"amount\": 100, \"ratio\":0.8} ', 'ZK Outstanding 请求堆积数', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_WATCH_COUNT', '{ \"amount\": 100000, \"ratio\": 0.8 } ', 'ZK WatchCount 数', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_ALIVE_CONNECTIONS', '{ \"amount\": 10000, \"ratio\": 0.8 } ', 'ZK 连接数', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_APPROXIMATE_DATA_SIZE', '{ \"amount\": 524288000, \"ratio\": 0.8 } ', 'ZK 数据大小(Byte)', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_SENT_RATE', '{ \"amount\": 500000, \"ratio\": 0.8 } ', 'ZK 发包数', 'admin');
|
||||
|
||||
```
|
||||
|
||||
### 6.2.2、升级至 `v3.0.1` 版本
|
||||
|
||||
**ES 索引模版**
|
||||
```bash
|
||||
@@ -142,10 +154,8 @@ CREATE TABLE `ks_km_group` (
|
||||
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
|
||||
### 6.2.2、升级至 `v3.0.0` 版本
|
||||
### 6.2.3、升级至 `v3.0.0` 版本
|
||||
|
||||
**SQL 变更**
|
||||
|
||||
@@ -157,7 +167,7 @@ ADD COLUMN `zk_properties` TEXT NULL COMMENT 'ZK配置' AFTER `jmx_properties`;
|
||||
---
|
||||
|
||||
|
||||
### 6.2.3、升级至 `v3.0.0-beta.2`版本
|
||||
### 6.2.4、升级至 `v3.0.0-beta.2`版本
|
||||
|
||||
**配置变更**
|
||||
|
||||
@@ -228,7 +238,7 @@ ALTER TABLE `logi_security_oplog`
|
||||
|
||||
---
|
||||
|
||||
### 6.2.4、升级至 `v3.0.0-beta.1`版本
|
||||
### 6.2.5、升级至 `v3.0.0-beta.1`版本
|
||||
|
||||
**SQL 变更**
|
||||
|
||||
@@ -247,7 +257,7 @@ ALTER COLUMN `operation_methods` set default '';
|
||||
|
||||
---
|
||||
|
||||
### 6.2.5、`2.x`版本 升级至 `v3.0.0-beta.0`版本
|
||||
### 6.2.6、`2.x`版本 升级至 `v3.0.0-beta.0`版本
|
||||
|
||||
**升级步骤:**
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package com.xiaojukeji.know.streaming.km.biz.cluster;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhysHealthState;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhysState;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.MultiClusterDashboardDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
|
||||
@@ -15,6 +16,8 @@ public interface MultiClusterPhyManager {
|
||||
*/
|
||||
ClusterPhysState getClusterPhysState();
|
||||
|
||||
ClusterPhysHealthState getClusterPhysHealthState();
|
||||
|
||||
/**
|
||||
* 查询多集群大盘
|
||||
* @param dto 分页信息
|
||||
|
||||
@@ -5,9 +5,7 @@ import com.didiglobal.logi.log.LogFactory;
|
||||
import com.xiaojukeji.know.streaming.km.biz.cluster.ClusterZookeepersManager;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.ClusterZookeepersOverviewDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.ZKConfig;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ZookeeperMetrics;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.metric.ZookeeperMetricParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
|
||||
@@ -20,7 +18,6 @@ import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.zookeeper.ZKRoleEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.PaginationUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.ZookeeperMetricVersionItems;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.zookeeper.ZnodeService;
|
||||
@@ -30,7 +27,6 @@ import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
||||
@Service
|
||||
@@ -56,11 +52,6 @@ public class ClusterZookeepersManagerImpl implements ClusterZookeepersManager {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.CLUSTER_NOT_EXIST, MsgConstant.getClusterPhyNotExist(clusterPhyId));
|
||||
}
|
||||
|
||||
// // TODO
|
||||
// private Integer healthState;
|
||||
// private Integer healthCheckPassed;
|
||||
// private Integer healthCheckTotal;
|
||||
|
||||
List<ZookeeperInfo> infoList = zookeeperService.listFromDBByCluster(clusterPhyId);
|
||||
|
||||
ClusterZookeepersStateVO vo = new ClusterZookeepersStateVO();
|
||||
@@ -90,12 +81,17 @@ public class ClusterZookeepersManagerImpl implements ClusterZookeepersManager {
|
||||
}
|
||||
}
|
||||
|
||||
Result<ZookeeperMetrics> metricsResult = zookeeperMetricService.collectMetricsFromZookeeper(new ZookeeperMetricParam(
|
||||
// 指标获取
|
||||
Result<ZookeeperMetrics> metricsResult = zookeeperMetricService.batchCollectMetricsFromZookeeper(
|
||||
clusterPhyId,
|
||||
infoList.stream().filter(elem -> elem.alive()).map(item -> new Tuple<String, Integer>(item.getHost(), item.getPort())).collect(Collectors.toList()),
|
||||
ConvertUtil.str2ObjByJson(clusterPhy.getZkProperties(), ZKConfig.class),
|
||||
ZookeeperMetricVersionItems.ZOOKEEPER_METRIC_WATCH_COUNT
|
||||
));
|
||||
Arrays.asList(
|
||||
ZookeeperMetricVersionItems.ZOOKEEPER_METRIC_WATCH_COUNT,
|
||||
ZookeeperMetricVersionItems.ZOOKEEPER_METRIC_HEALTH_STATE,
|
||||
ZookeeperMetricVersionItems.ZOOKEEPER_METRIC_HEALTH_CHECK_PASSED,
|
||||
ZookeeperMetricVersionItems.ZOOKEEPER_METRIC_HEALTH_CHECK_TOTAL
|
||||
)
|
||||
|
||||
);
|
||||
if (metricsResult.failed()) {
|
||||
LOGGER.error(
|
||||
"class=ClusterZookeepersManagerImpl||method=getClusterPhyZookeepersState||clusterPhyId={}||errMsg={}",
|
||||
@@ -103,8 +99,12 @@ public class ClusterZookeepersManagerImpl implements ClusterZookeepersManager {
|
||||
);
|
||||
return Result.buildSuc(vo);
|
||||
}
|
||||
Float watchCount = metricsResult.getData().getMetric(ZookeeperMetricVersionItems.ZOOKEEPER_METRIC_WATCH_COUNT);
|
||||
vo.setWatchCount(watchCount != null? watchCount.intValue(): null);
|
||||
|
||||
ZookeeperMetrics metrics = metricsResult.getData();
|
||||
vo.setWatchCount(ConvertUtil.float2Integer(metrics.getMetrics().get(ZookeeperMetricVersionItems.ZOOKEEPER_METRIC_WATCH_COUNT)));
|
||||
vo.setHealthState(ConvertUtil.float2Integer(metrics.getMetrics().get(ZookeeperMetricVersionItems.ZOOKEEPER_METRIC_HEALTH_STATE)));
|
||||
vo.setHealthCheckPassed(ConvertUtil.float2Integer(metrics.getMetrics().get(ZookeeperMetricVersionItems.ZOOKEEPER_METRIC_HEALTH_CHECK_PASSED)));
|
||||
vo.setHealthCheckTotal(ConvertUtil.float2Integer(metrics.getMetrics().get(ZookeeperMetricVersionItems.ZOOKEEPER_METRIC_HEALTH_CHECK_TOTAL)));
|
||||
|
||||
return Result.buildSuc(vo);
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import com.didiglobal.logi.log.LogFactory;
|
||||
import com.xiaojukeji.know.streaming.km.biz.cluster.MultiClusterPhyManager;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.MetricDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.MetricsClusterPhyDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhysHealthState;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhysState;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.MultiClusterDashboardDTO;
|
||||
@@ -16,6 +17,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.ClusterPhyDashboa
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||
import com.xiaojukeji.know.streaming.km.common.converter.ClusterVOConverter;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthStateEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.PaginationUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.PaginationMetricsUtil;
|
||||
@@ -75,6 +77,32 @@ public class MultiClusterPhyManagerImpl implements MultiClusterPhyManager {
|
||||
return physState;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterPhysHealthState getClusterPhysHealthState() {
|
||||
List<ClusterPhy> clusterPhyList = clusterPhyService.listAllClusters();
|
||||
|
||||
ClusterPhysHealthState physState = new ClusterPhysHealthState(clusterPhyList.size());
|
||||
for (ClusterPhy clusterPhy: clusterPhyList) {
|
||||
ClusterMetrics metrics = clusterMetricService.getLatestMetricsFromCache(clusterPhy.getId());
|
||||
Float state = metrics.getMetric(ClusterMetricVersionItems.CLUSTER_METRIC_HEALTH_STATE);
|
||||
if (state == null) {
|
||||
physState.setUnknownCount(physState.getUnknownCount() + 1);
|
||||
} else if (state.intValue() == HealthStateEnum.GOOD.getDimension()) {
|
||||
physState.setGoodCount(physState.getGoodCount() + 1);
|
||||
} else if (state.intValue() == HealthStateEnum.MEDIUM.getDimension()) {
|
||||
physState.setMediumCount(physState.getMediumCount() + 1);
|
||||
} else if (state.intValue() == HealthStateEnum.POOR.getDimension()) {
|
||||
physState.setPoorCount(physState.getPoorCount() + 1);
|
||||
} else if (state.intValue() == HealthStateEnum.DEAD.getDimension()) {
|
||||
physState.setDeadCount(physState.getDeadCount() + 1);
|
||||
} else {
|
||||
physState.setUnknownCount(physState.getUnknownCount() + 1);
|
||||
}
|
||||
}
|
||||
|
||||
return physState;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PaginationResult<ClusterPhyDashboardVO> getClusterPhysDashboard(MultiClusterDashboardDTO dto) {
|
||||
// 获取集群
|
||||
@@ -149,13 +177,7 @@ public class MultiClusterPhyManagerImpl implements MultiClusterPhyManager {
|
||||
List<ClusterMetrics> metricsList = new ArrayList<>();
|
||||
for (ClusterPhyDashboardVO vo: voList) {
|
||||
ClusterMetrics clusterMetrics = clusterMetricService.getLatestMetricsFromCache(vo.getId());
|
||||
if (!clusterMetrics.getMetrics().containsKey(ClusterMetricVersionItems.CLUSTER_METRIC_HEALTH_SCORE)) {
|
||||
Float alive = clusterMetrics.getMetrics().get(ClusterMetricVersionItems.CLUSTER_METRIC_ALIVE);
|
||||
// 如果集群没有健康分,则设置一个默认的健康分数值
|
||||
clusterMetrics.putMetric(ClusterMetricVersionItems.CLUSTER_METRIC_HEALTH_SCORE,
|
||||
(alive != null && alive <= 0)? 0.0f: Constant.DEFAULT_CLUSTER_HEALTH_SCORE.floatValue()
|
||||
);
|
||||
}
|
||||
clusterMetrics.getMetrics().putIfAbsent(ClusterMetricVersionItems.CLUSTER_METRIC_HEALTH_STATE, (float) HealthStateEnum.UNKNOWN.getDimension());
|
||||
|
||||
metricsList.add(clusterMetrics);
|
||||
}
|
||||
|
||||
@@ -209,7 +209,7 @@ public class GroupManagerImpl implements GroupManager {
|
||||
}
|
||||
|
||||
if (!ConsumerGroupState.EMPTY.equals(description.state()) && !ConsumerGroupState.DEAD.equals(description.state())) {
|
||||
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, String.format("group处于%s, 重置失败(仅Empty情况可重置)", GroupStateEnum.getByRawState(description.state()).getState()));
|
||||
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, String.format("group处于%s, 重置失败(仅Empty | Dead 情况可重置)", GroupStateEnum.getByRawState(description.state()).getState()));
|
||||
}
|
||||
|
||||
// 获取offset
|
||||
|
||||
@@ -10,14 +10,18 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicCreateParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicPartitionExpandParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.partition.Partition;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.BackoffUtils;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.FutureUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.kafka.KafkaReplicaAssignUtil;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.topic.OpTopicService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
|
||||
import kafka.admin.AdminUtils;
|
||||
@@ -52,6 +56,9 @@ public class OpTopicManagerImpl implements OpTopicManager {
|
||||
@Autowired
|
||||
private ClusterPhyService clusterPhyService;
|
||||
|
||||
@Autowired
|
||||
private PartitionService partitionService;
|
||||
|
||||
@Override
|
||||
public Result<Void> createTopic(TopicCreateDTO dto, String operator) {
|
||||
log.info("method=createTopic||param={}||operator={}.", dto, operator);
|
||||
@@ -80,7 +87,7 @@ public class OpTopicManagerImpl implements OpTopicManager {
|
||||
);
|
||||
|
||||
// 创建Topic
|
||||
return opTopicService.createTopic(
|
||||
Result<Void> createTopicRes = opTopicService.createTopic(
|
||||
new TopicCreateParam(
|
||||
dto.getClusterId(),
|
||||
dto.getTopicName(),
|
||||
@@ -90,6 +97,21 @@ public class OpTopicManagerImpl implements OpTopicManager {
|
||||
),
|
||||
operator
|
||||
);
|
||||
if (createTopicRes.successful()){
|
||||
try{
|
||||
FutureUtil.quickStartupFutureUtil.submitTask(() -> {
|
||||
BackoffUtils.backoff(3000);
|
||||
Result<List<Partition>> partitionsResult = partitionService.listPartitionsFromKafka(clusterPhy, dto.getTopicName());
|
||||
if (partitionsResult.successful()){
|
||||
partitionService.updatePartitions(clusterPhy.getId(), dto.getTopicName(), partitionsResult.getData(), new ArrayList<>());
|
||||
}
|
||||
});
|
||||
}catch (Exception e) {
|
||||
log.error("method=createTopic||param={}||operator={}||msg=add partition to db failed||errMsg=exception", dto, operator, e);
|
||||
return Result.buildFromRSAndMsg(ResultStatus.MYSQL_OPERATE_FAILED, "Topic创建成功,但记录Partition到DB中失败,等待定时任务同步partition信息");
|
||||
}
|
||||
}
|
||||
return createTopicRes;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -47,7 +47,7 @@ public class VersionControlManagerImpl implements VersionControlManager {
|
||||
|
||||
@PostConstruct
|
||||
public void init(){
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_HEALTH_SCORE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_HEALTH_STATE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_FAILED_FETCH_REQ, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_FAILED_PRODUCE_REQ, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_UNDER_REPLICA_PARTITIONS, true));
|
||||
@@ -57,7 +57,7 @@ public class VersionControlManagerImpl implements VersionControlManager {
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_BYTES_REJECTED, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_MESSAGE_IN, true));
|
||||
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_HEALTH_SCORE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_HEALTH_STATE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_ACTIVE_CONTROLLER_COUNT, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_BYTES_IN, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_BYTES_OUT, true));
|
||||
@@ -75,9 +75,9 @@ public class VersionControlManagerImpl implements VersionControlManager {
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_OFFSET_CONSUMED, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_LAG, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_STATE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_HEALTH_SCORE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_HEALTH_STATE, true));
|
||||
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_HEALTH_SCORE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_HEALTH_STATE, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_CONNECTION_COUNT, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_MESSAGE_IN, true));
|
||||
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_NETWORK_RPO_AVG_IDLE, true));
|
||||
|
||||
@@ -22,6 +22,12 @@
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.xiaojukeji.kafka</groupId>
|
||||
<artifactId>km-rebalance</artifactId>
|
||||
<version>${project.parent.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.springframework</groupId>
|
||||
<artifactId>spring-web</artifactId>
|
||||
|
||||
@@ -3,6 +3,7 @@ package com.xiaojukeji.know.streaming.km.common.bean.entity.broker;
|
||||
|
||||
import com.alibaba.fastjson.TypeReference;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.common.IpPortData;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.JmxConfig;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.broker.BrokerPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
import lombok.AllArgsConstructor;
|
||||
@@ -65,13 +66,13 @@ public class Broker implements Serializable {
|
||||
*/
|
||||
private Map<String, IpPortData> endpointMap;
|
||||
|
||||
public static Broker buildFrom(Long clusterPhyId, Node node, Long startTimestamp) {
|
||||
public static Broker buildFrom(Long clusterPhyId, Node node, Long startTimestamp, JmxConfig jmxConfig) {
|
||||
Broker metadata = new Broker();
|
||||
metadata.setClusterPhyId(clusterPhyId);
|
||||
metadata.setBrokerId(node.id());
|
||||
metadata.setHost(node.host());
|
||||
metadata.setPort(node.port());
|
||||
metadata.setJmxPort(-1);
|
||||
metadata.setJmxPort(jmxConfig != null ? jmxConfig.getJmxPort() : -1);
|
||||
metadata.setStartTimestamp(startTimestamp);
|
||||
metadata.setRack(node.rack());
|
||||
metadata.setStatus(1);
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.cluster;
|
||||
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
|
||||
/**
|
||||
* 集群状态信息
|
||||
* @author zengqiao
|
||||
* @date 22/02/24
|
||||
*/
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@AllArgsConstructor
|
||||
public class ClusterPhysHealthState {
|
||||
private Integer unknownCount;
|
||||
|
||||
private Integer goodCount;
|
||||
|
||||
private Integer mediumCount;
|
||||
|
||||
private Integer poorCount;
|
||||
|
||||
private Integer deadCount;
|
||||
|
||||
private Integer total;
|
||||
|
||||
public ClusterPhysHealthState(Integer total) {
|
||||
this.unknownCount = 0;
|
||||
this.goodCount = 0;
|
||||
this.mediumCount = 0;
|
||||
this.poorCount = 0;
|
||||
this.deadCount = 0;
|
||||
this.total = total;
|
||||
}
|
||||
}
|
||||
@@ -13,9 +13,4 @@ public class BaseClusterHealthConfig extends BaseClusterConfigValue {
|
||||
* 健康检查名称
|
||||
*/
|
||||
protected HealthCheckNameEnum checkNameEnum;
|
||||
|
||||
/**
|
||||
* 权重
|
||||
*/
|
||||
protected Float weight;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck;
|
||||
|
||||
import lombok.Data;
|
||||
|
||||
/**
|
||||
* @author wyb
|
||||
* @date 2022/10/26
|
||||
*/
|
||||
@Data
|
||||
public class HealthAmountRatioConfig extends BaseClusterHealthConfig {
|
||||
/**
|
||||
* 总数
|
||||
*/
|
||||
private Integer amount;
|
||||
/**
|
||||
* 比例
|
||||
*/
|
||||
private Double ratio;
|
||||
}
|
||||
@@ -0,0 +1,83 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.health;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.health.HealthCheckResultPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckNameEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
public class HealthCheckAggResult {
|
||||
private HealthCheckNameEnum checkNameEnum;
|
||||
|
||||
private List<HealthCheckResultPO> poList;
|
||||
|
||||
private Boolean passed;
|
||||
|
||||
public HealthCheckAggResult(HealthCheckNameEnum checkNameEnum, List<HealthCheckResultPO> poList) {
|
||||
this.checkNameEnum = checkNameEnum;
|
||||
this.poList = poList;
|
||||
if (!ValidateUtils.isEmptyList(poList) && poList.stream().filter(elem -> elem.getPassed() <= 0).count() <= 0) {
|
||||
passed = true;
|
||||
} else {
|
||||
passed = false;
|
||||
}
|
||||
}
|
||||
|
||||
public Integer getTotalCount() {
|
||||
if (poList == null) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return poList.size();
|
||||
}
|
||||
|
||||
public Integer getPassedCount() {
|
||||
if (poList == null) {
|
||||
return 0;
|
||||
}
|
||||
return (int) (poList.stream().filter(elem -> elem.getPassed() > 0).count());
|
||||
}
|
||||
|
||||
/**
|
||||
* 计算当前检查的健康分
|
||||
* 比如:计算集群Broker健康检查中的某一项的健康分
|
||||
*/
|
||||
public Integer calRawHealthScore() {
|
||||
if (poList == null || poList.isEmpty()) {
|
||||
return 100;
|
||||
}
|
||||
|
||||
return 100 * this.getPassedCount() / this.getTotalCount();
|
||||
}
|
||||
|
||||
public List<String> getNotPassedResNameList() {
|
||||
if (poList == null) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
|
||||
return poList.stream().filter(elem -> elem.getPassed() <= 0).map(elem -> elem.getResName()).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
public Date getCreateTime() {
|
||||
if (ValidateUtils.isEmptyList(poList)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return poList.get(0).getCreateTime();
|
||||
}
|
||||
|
||||
public Date getUpdateTime() {
|
||||
if (ValidateUtils.isEmptyList(poList)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return poList.get(0).getUpdateTime();
|
||||
}
|
||||
}
|
||||
@@ -17,10 +17,6 @@ import java.util.stream.Collectors;
|
||||
public class HealthScoreResult {
|
||||
private HealthCheckNameEnum checkNameEnum;
|
||||
|
||||
private Float presentDimensionTotalWeight;
|
||||
|
||||
private Float allDimensionTotalWeight;
|
||||
|
||||
private BaseClusterHealthConfig baseConfig;
|
||||
|
||||
private List<HealthCheckResultPO> poList;
|
||||
@@ -28,15 +24,11 @@ public class HealthScoreResult {
|
||||
private Boolean passed;
|
||||
|
||||
public HealthScoreResult(HealthCheckNameEnum checkNameEnum,
|
||||
Float presentDimensionTotalWeight,
|
||||
Float allDimensionTotalWeight,
|
||||
BaseClusterHealthConfig baseConfig,
|
||||
List<HealthCheckResultPO> poList) {
|
||||
this.checkNameEnum = checkNameEnum;
|
||||
this.baseConfig = baseConfig;
|
||||
this.poList = poList;
|
||||
this.presentDimensionTotalWeight = presentDimensionTotalWeight;
|
||||
this.allDimensionTotalWeight = allDimensionTotalWeight;
|
||||
if (!ValidateUtils.isEmptyList(poList) && poList.stream().filter(elem -> elem.getPassed() <= 0).count() <= 0) {
|
||||
passed = true;
|
||||
} else {
|
||||
@@ -59,32 +51,6 @@ public class HealthScoreResult {
|
||||
return (int) (poList.stream().filter(elem -> elem.getPassed() > 0).count());
|
||||
}
|
||||
|
||||
/**
|
||||
* 计算所有检查结果的健康分
|
||||
* 比如:计算集群健康分
|
||||
*/
|
||||
public Float calAllWeightHealthScore() {
|
||||
Float healthScore = 100 * baseConfig.getWeight() / allDimensionTotalWeight;
|
||||
if (poList == null || poList.isEmpty()) {
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
return healthScore * this.getPassedCount() / this.getTotalCount();
|
||||
}
|
||||
|
||||
/**
|
||||
* 计算当前维度的健康分
|
||||
* 比如:计算集群Broker健康分
|
||||
*/
|
||||
public Float calDimensionWeightHealthScore() {
|
||||
Float healthScore = 100 * baseConfig.getWeight() / presentDimensionTotalWeight;
|
||||
if (poList == null || poList.isEmpty()) {
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
return healthScore * this.getPassedCount() / this.getTotalCount();
|
||||
}
|
||||
|
||||
/**
|
||||
* 计算当前检查的健康分
|
||||
* 比如:计算集群Broker健康检查中的某一项的健康分
|
||||
@@ -102,7 +68,7 @@ public class HealthScoreResult {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
|
||||
return poList.stream().filter(elem -> elem.getPassed() <= 0).map(elem -> elem.getResName()).collect(Collectors.toList());
|
||||
return poList.stream().filter(elem -> elem.getPassed() <= 0 && !ValidateUtils.isBlank(elem.getResName())).map(elem -> elem.getResName()).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
public Date getCreateTime() {
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.param.zookeeper;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.ZKConfig;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author didi
|
||||
*/
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
public class ZookeeperParam extends ClusterPhyParam {
|
||||
private List<Tuple<String, Integer>> zkAddressList;
|
||||
|
||||
private ZKConfig zkConfig;
|
||||
|
||||
public ZookeeperParam(Long clusterPhyId, List<Tuple<String, Integer>> zkAddressList, ZKConfig zkConfig) {
|
||||
super(clusterPhyId);
|
||||
this.zkAddressList = zkAddressList;
|
||||
this.zkConfig = zkConfig;
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.entity.reassign;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.CommonUtils;
|
||||
import lombok.Data;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
@@ -19,4 +20,10 @@ public class ReassignResult {
|
||||
|
||||
return state.isDone();
|
||||
}
|
||||
|
||||
public boolean checkPreferredReplicaElectionUnNeed(String reassignBrokerIds, String originalBrokerIds) {
|
||||
Integer targetLeader = CommonUtils.string2IntList(reassignBrokerIds).get(0);
|
||||
Integer originalLeader = CommonUtils.string2IntList(originalBrokerIds).get(0);
|
||||
return originalLeader.equals(targetLeader);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,4 +16,7 @@ public class Znode {
|
||||
|
||||
@ApiModelProperty(value = "节点属性", example = "")
|
||||
private Stat stat;
|
||||
|
||||
@ApiModelProperty(value = "节点路径", example = "")
|
||||
private String namespace;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.bean.vo.cluster;
|
||||
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 22/02/24
|
||||
*/
|
||||
@Data
|
||||
@ApiModel(description = "集群健康状态信息")
|
||||
public class ClusterPhysHealthStateVO {
|
||||
@ApiModelProperty(value = "未知", example = "30")
|
||||
private Integer unknownCount;
|
||||
|
||||
@ApiModelProperty(value = "好", example = "30")
|
||||
private Integer goodCount;
|
||||
|
||||
@ApiModelProperty(value = "中", example = "30")
|
||||
private Integer mediumCount;
|
||||
|
||||
@ApiModelProperty(value = "差", example = "30")
|
||||
private Integer poorCount;
|
||||
|
||||
@ApiModelProperty(value = "down", example = "30")
|
||||
private Integer deadCount;
|
||||
|
||||
@ApiModelProperty(value = "总数", example = "150")
|
||||
private Integer total;
|
||||
}
|
||||
@@ -32,9 +32,6 @@ public class HealthCheckConfigVO {
|
||||
@ApiModelProperty(value="检查说明", example = "Group延迟")
|
||||
private String configDesc;
|
||||
|
||||
@ApiModelProperty(value="权重", example = "10")
|
||||
private Float weight;
|
||||
|
||||
@ApiModelProperty(value="检查配置", example = "100")
|
||||
private String value;
|
||||
}
|
||||
|
||||
@@ -18,6 +18,9 @@ public class HealthScoreBaseResultVO extends BaseTimeVO {
|
||||
@ApiModelProperty(value="检查维度", example = "1")
|
||||
private Integer dimension;
|
||||
|
||||
@ApiModelProperty(value="检查维度名称", example = "cluster")
|
||||
private String dimensionName;
|
||||
|
||||
@ApiModelProperty(value="检查名称", example = "Group延迟")
|
||||
private String configName;
|
||||
|
||||
@@ -27,9 +30,6 @@ public class HealthScoreBaseResultVO extends BaseTimeVO {
|
||||
@ApiModelProperty(value="检查说明", example = "Group延迟")
|
||||
private String configDesc;
|
||||
|
||||
@ApiModelProperty(value="权重百分比[0-100]", example = "10")
|
||||
private Integer weightPercent;
|
||||
|
||||
@ApiModelProperty(value="得分", example = "100")
|
||||
private Integer score;
|
||||
|
||||
|
||||
@@ -19,4 +19,7 @@ public class ZnodeVO {
|
||||
@ApiModelProperty(value = "节点属性", example = "")
|
||||
private ZnodeStatVO stat;
|
||||
|
||||
@ApiModelProperty(value = "节点路径", example = "/cluster")
|
||||
private String namespace;
|
||||
|
||||
}
|
||||
|
||||
@@ -35,14 +35,9 @@ public class Constant {
|
||||
public static final Integer DEFAULT_SESSION_TIMEOUT_UNIT_MS = 15000;
|
||||
public static final Integer DEFAULT_REQUEST_TIMEOUT_UNIT_MS = 5000;
|
||||
|
||||
public static final Float MIN_HEALTH_SCORE = 10f;
|
||||
|
||||
|
||||
/**
|
||||
* 指标相关
|
||||
*/
|
||||
public static final Integer DEFAULT_CLUSTER_HEALTH_SCORE = 90;
|
||||
|
||||
public static final Integer PER_BATCH_MAX_VALUE = 100;
|
||||
|
||||
public static final String DEFAULT_USER_NAME = "know-streaming-app";
|
||||
|
||||
@@ -15,24 +15,15 @@ public class HealthScoreVOConverter {
|
||||
private HealthScoreVOConverter() {
|
||||
}
|
||||
|
||||
public static List<HealthScoreResultDetailVO> convert2HealthScoreResultDetailVOList(List<HealthScoreResult> healthScoreResultList, boolean useGlobalWeight) {
|
||||
Float globalWeightSum = 1f;
|
||||
if (!healthScoreResultList.isEmpty()) {
|
||||
globalWeightSum = healthScoreResultList.get(0).getAllDimensionTotalWeight();
|
||||
}
|
||||
|
||||
public static List<HealthScoreResultDetailVO> convert2HealthScoreResultDetailVOList(List<HealthScoreResult> healthScoreResultList) {
|
||||
List<HealthScoreResultDetailVO> voList = new ArrayList<>();
|
||||
for (HealthScoreResult healthScoreResult: healthScoreResultList) {
|
||||
HealthScoreResultDetailVO vo = new HealthScoreResultDetailVO();
|
||||
vo.setDimension(healthScoreResult.getCheckNameEnum().getDimensionEnum().getDimension());
|
||||
vo.setDimensionName(healthScoreResult.getCheckNameEnum().getDimensionEnum().getMessage());
|
||||
vo.setConfigName(healthScoreResult.getCheckNameEnum().getConfigName());
|
||||
vo.setConfigItem(healthScoreResult.getCheckNameEnum().getConfigItem());
|
||||
vo.setConfigDesc(healthScoreResult.getCheckNameEnum().getConfigDesc());
|
||||
if (useGlobalWeight) {
|
||||
vo.setWeightPercent(healthScoreResult.getBaseConfig().getWeight().intValue() * 100 / globalWeightSum.intValue());
|
||||
} else {
|
||||
vo.setWeightPercent(healthScoreResult.getBaseConfig().getWeight().intValue() * 100 / healthScoreResult.getPresentDimensionTotalWeight().intValue());
|
||||
}
|
||||
|
||||
vo.setScore(healthScoreResult.calRawHealthScore());
|
||||
if (healthScoreResult.getTotalCount() <= 0) {
|
||||
@@ -57,9 +48,9 @@ public class HealthScoreVOConverter {
|
||||
for (HealthScoreResult healthScoreResult: healthScoreResultList) {
|
||||
HealthScoreBaseResultVO vo = new HealthScoreBaseResultVO();
|
||||
vo.setDimension(healthScoreResult.getCheckNameEnum().getDimensionEnum().getDimension());
|
||||
vo.setDimensionName(healthScoreResult.getCheckNameEnum().getDimensionEnum().getMessage());
|
||||
vo.setConfigName(healthScoreResult.getCheckNameEnum().getConfigName());
|
||||
vo.setConfigDesc(healthScoreResult.getCheckNameEnum().getConfigDesc());
|
||||
vo.setWeightPercent(healthScoreResult.getBaseConfig().getWeight().intValue() * 100 / healthScoreResult.getPresentDimensionTotalWeight().intValue());
|
||||
vo.setScore(healthScoreResult.calRawHealthScore());
|
||||
vo.setPassed(healthScoreResult.getPassedCount().equals(healthScoreResult.getTotalCount()));
|
||||
vo.setCheckConfig(convert2HealthCheckConfigVO(ConfigGroupEnum.HEALTH.name(), healthScoreResult.getBaseConfig()));
|
||||
@@ -86,7 +77,6 @@ public class HealthScoreVOConverter {
|
||||
vo.setConfigName(config.getCheckNameEnum().getConfigName());
|
||||
vo.setConfigItem(config.getCheckNameEnum().getConfigItem());
|
||||
vo.setConfigDesc(config.getCheckNameEnum().getConfigDesc());
|
||||
vo.setWeight(config.getWeight());
|
||||
vo.setValue(ConvertUtil.obj2Json(config));
|
||||
return vo;
|
||||
}
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.converter;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.Znode;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.zookeeper.ZookeeperUtils;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
|
||||
public class ZnodeConverter {
|
||||
@@ -9,11 +11,13 @@ public class ZnodeConverter {
|
||||
|
||||
}
|
||||
|
||||
public static Znode convert2Znode(Tuple<byte[], Stat> dataAndStat, String path) {
|
||||
public static Znode convert2Znode(ClusterPhy clusterPhy, Tuple<byte[], Stat> dataAndStat, String path) {
|
||||
Znode znode = new Znode();
|
||||
znode.setStat(dataAndStat.getV2());
|
||||
znode.setData(dataAndStat.getV1() == null ? null : new String(dataAndStat.getV1()));
|
||||
znode.setName(path.substring(path.lastIndexOf('/') + 1));
|
||||
znode.setNamespace(ZookeeperUtils.getNamespace(clusterPhy.getZookeeper()));
|
||||
|
||||
return znode;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import javax.validation.constraints.NotBlank;
|
||||
import javax.validation.constraints.NotNull;
|
||||
|
||||
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
public class ClusterBalanceIntervalDTO {
|
||||
@NotBlank(message = "clusterBalanceIntervalDTO.type不允许为空")
|
||||
@ApiModelProperty("均衡维度:cpu,disk,bytesIn,bytesOut")
|
||||
private String type;
|
||||
|
||||
@NotNull(message = "clusterBalanceIntervalDTO.intervalPercent不允许为空")
|
||||
@ApiModelProperty("平衡区间百分比")
|
||||
private Double intervalPercent;
|
||||
|
||||
@NotNull(message = "clusterBalanceIntervalDTO.priority不允许为空")
|
||||
@ApiModelProperty("优先级")
|
||||
private Integer priority;
|
||||
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
public class ClusterBalanceOverviewDTO extends PaginationBaseDTO {
|
||||
|
||||
@ApiModelProperty("host")
|
||||
private String host;
|
||||
|
||||
@ApiModelProperty("key:disk,bytesOut,bytesIn value:均衡状态 0:已均衡;2:未均衡")
|
||||
private Map<String, Integer> stateParam;
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.BaseDTO;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 22/02/24
|
||||
*/
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
public class ClusterBalancePreviewDTO extends BaseDTO {
|
||||
|
||||
@ApiModelProperty("集群id")
|
||||
private Long clusterId;
|
||||
|
||||
@ApiModelProperty("均衡节点")
|
||||
private List<Integer> brokers;
|
||||
|
||||
@ApiModelProperty("topic黑名单")
|
||||
private List<String> topicBlackList;
|
||||
|
||||
@ApiModelProperty("均衡区间详情")
|
||||
private List<ClusterBalanceIntervalDTO> clusterBalanceIntervalList;
|
||||
|
||||
@ApiModelProperty("指标计算周期,单位分钟")
|
||||
private Integer metricCalculationPeriod;
|
||||
|
||||
@ApiModelProperty("任务并行数")
|
||||
private Integer parallelNum;
|
||||
|
||||
@ApiModelProperty("执行策略, 1:优先最大副本,2:优先最小副本")
|
||||
private Integer executionStrategy;
|
||||
|
||||
@ApiModelProperty("限流值")
|
||||
private Long throttleUnitB;
|
||||
|
||||
}
|
||||
@@ -0,0 +1,66 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.dto.BaseDTO;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import javax.validation.constraints.Min;
|
||||
import javax.validation.constraints.NotBlank;
|
||||
import javax.validation.constraints.NotNull;
|
||||
import java.util.List;
|
||||
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 22/02/24
|
||||
*/
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
public class ClusterBalanceStrategyDTO extends BaseDTO {
|
||||
|
||||
@ApiModelProperty("是否是周期性任务")
|
||||
private boolean scheduleJob;
|
||||
|
||||
@NotBlank(message = "scheduleCron不允许为空")
|
||||
@ApiModelProperty("如果是周期任务,那么任务的周期cron表达式")
|
||||
private String scheduleCron;
|
||||
|
||||
@NotNull(message = "status不允许为空")
|
||||
@ApiModelProperty("周期任务状态:0:不开启,1:开启")
|
||||
private Integer status;
|
||||
|
||||
@NotNull(message = "clusterId不允许为空")
|
||||
@ApiModelProperty("集群id")
|
||||
private Long clusterId;
|
||||
|
||||
@ApiModelProperty("均衡节点")
|
||||
private List<Integer> brokers;
|
||||
|
||||
@ApiModelProperty("topic黑名单")
|
||||
private List<String> topicBlackList;
|
||||
|
||||
@NotNull(message = "clusterBalanceIntervalDTO不允许为空")
|
||||
@ApiModelProperty("均衡区间详情")
|
||||
private List<ClusterBalanceIntervalDTO> clusterBalanceIntervalList;
|
||||
|
||||
@NotNull(message = "metricCalculationPeriod不允许为空")
|
||||
@ApiModelProperty("指标计算周期,单位秒")
|
||||
private Integer metricCalculationPeriod;
|
||||
|
||||
@NotNull(message = "parallelNum不允许为空")
|
||||
@ApiModelProperty("任务并行数(0代表不限)")
|
||||
private Integer parallelNum;
|
||||
|
||||
@NotNull(message = "executionStrategy不允许为空")
|
||||
@ApiModelProperty("执行策略, 1:优先最大副本,2:优先最小副本")
|
||||
private Integer executionStrategy;
|
||||
|
||||
@Min(value = 1, message = "throttleUnitB不允许小于1")
|
||||
@ApiModelProperty("限流值")
|
||||
private Long throttleUnitB;
|
||||
|
||||
@ApiModelProperty("备注说明")
|
||||
private String description;
|
||||
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@AllArgsConstructor
|
||||
@EnterpriseLoadReBalance
|
||||
public class ClusterBalanceInterval {
|
||||
/**
|
||||
* 均衡维度:cpu,disk,bytesIn,bytesOut
|
||||
*/
|
||||
private String type;
|
||||
|
||||
/**
|
||||
* 平衡区间百分比
|
||||
*/
|
||||
private Double intervalPercent;
|
||||
|
||||
/**
|
||||
* 优先级
|
||||
*/
|
||||
private Integer priority;
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||
import com.xiaojukeji.know.streaming.km.rebalance.model.Resource;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@AllArgsConstructor
|
||||
@EnterpriseLoadReBalance
|
||||
public class ClusterBalanceItemState {
|
||||
|
||||
/**
|
||||
* 是否配置集群平衡:true:已配置,false:未配置
|
||||
*/
|
||||
private Boolean configureBalance;
|
||||
|
||||
/**
|
||||
* 是否开启均衡:true:开启,false: 未开启
|
||||
*/
|
||||
private Boolean enable;
|
||||
|
||||
/**
|
||||
* 子项是否均衡:key: disk,bytesIn,bytesOut,cpu ; value:true:已均衡,false:未均衡
|
||||
* @see com.xiaojukeji.know.streaming.km.rebalance.model.Resource
|
||||
*/
|
||||
private Map<String, Boolean> itemState;
|
||||
|
||||
public Integer getResItemState(Resource res) {
|
||||
if (itemState == null || !itemState.containsKey(res.resource())) {
|
||||
return Constant.INVALID_CODE;
|
||||
}
|
||||
|
||||
return itemState.get(res.resource()) ? Constant.YES: Constant.NO;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
* Copyright (c) 2015, WINIT and/or its affiliates. All rights reserved. Use, Copy is subject to authorized license.
|
||||
*/
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.BaseEntity;
|
||||
import lombok.Data;
|
||||
|
||||
/**
|
||||
* 集群均衡任务 实体类
|
||||
*
|
||||
* @author fengqiongfeng
|
||||
* @date 2022-05-23
|
||||
*/
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
public class ClusterBalanceJobConfig extends BaseEntity {
|
||||
|
||||
/**
|
||||
* 序列化版本号
|
||||
*/
|
||||
private static final long serialVersionUID=1L;
|
||||
|
||||
/**
|
||||
* 集群id
|
||||
*/
|
||||
private Long clusterId;
|
||||
|
||||
/**
|
||||
* 均衡节点
|
||||
*/
|
||||
private String brokers;
|
||||
|
||||
/**
|
||||
* topic黑名单
|
||||
*/
|
||||
private String topicBlackList;
|
||||
|
||||
/**
|
||||
* 1:立即均衡,2:周期均衡
|
||||
*/
|
||||
private Integer type;
|
||||
|
||||
/**
|
||||
* 任务周期
|
||||
*/
|
||||
private String taskCron;
|
||||
|
||||
/**
|
||||
* 均衡区间详情
|
||||
*/
|
||||
private String balanceIntervalJson;
|
||||
|
||||
/**
|
||||
* 指标计算周期,单位分钟
|
||||
*/
|
||||
private Integer metricCalculationPeriod;
|
||||
|
||||
/**
|
||||
* 迁移脚本
|
||||
*/
|
||||
private String reassignmentJson;
|
||||
|
||||
/**
|
||||
* 任务并行数
|
||||
*/
|
||||
private Integer parallelNum;
|
||||
|
||||
/**
|
||||
* 执行策略, 1:优先最大副本,2:优先最小副本
|
||||
*/
|
||||
private Integer executionStrategy;
|
||||
|
||||
/**
|
||||
* 限流值
|
||||
*/
|
||||
private Long throttleUnitByte;
|
||||
|
||||
/**
|
||||
* 操作人
|
||||
*/
|
||||
private String creator;
|
||||
|
||||
/**
|
||||
* 任务状态 0:未开启,1:开启
|
||||
*/
|
||||
private Integer status;
|
||||
|
||||
}
|
||||
|
||||
@@ -0,0 +1,64 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import lombok.Data;
|
||||
|
||||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 22/05/06
|
||||
*/
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
public class ClusterBalanceReassign {
|
||||
/**
|
||||
* jobID
|
||||
*/
|
||||
private Long jobId;
|
||||
|
||||
/**
|
||||
* 集群id
|
||||
*/
|
||||
private Long clusterId;
|
||||
|
||||
/**
|
||||
* Topic名称
|
||||
*/
|
||||
private String topicName;
|
||||
|
||||
/**
|
||||
* 分区ID
|
||||
*/
|
||||
private Integer partitionId;
|
||||
|
||||
/**
|
||||
* 源BrokerId列表
|
||||
*/
|
||||
private String originalBrokerIds;
|
||||
|
||||
/**
|
||||
* 目标BrokerId列表
|
||||
*/
|
||||
private String reassignBrokerIds;
|
||||
|
||||
/**
|
||||
* 任务开始时间
|
||||
*/
|
||||
private Date startTime;
|
||||
|
||||
/**
|
||||
* 任务完成时间
|
||||
*/
|
||||
private Date finishedTime;
|
||||
|
||||
/**
|
||||
* 扩展数据
|
||||
*/
|
||||
private String extendData;
|
||||
|
||||
/**
|
||||
* 任务状态
|
||||
*/
|
||||
private Integer status;
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.detail.ClusterBalanceDetailDataGroupByTopic;
|
||||
import lombok.Data;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 22/05/06
|
||||
*/
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
public class ClusterBalanceReassignDetail {
|
||||
/**
|
||||
* 限流值
|
||||
*/
|
||||
private Long throttleUnitB;
|
||||
|
||||
/**
|
||||
* 开始时间
|
||||
*/
|
||||
private Date startTime;
|
||||
|
||||
/**
|
||||
* 完成时间
|
||||
*/
|
||||
private Date finishedTime;
|
||||
|
||||
/**
|
||||
* 详细信息
|
||||
*/
|
||||
private List<ClusterBalanceDetailDataGroupByTopic> reassignTopicDetailsList;
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import lombok.Data;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 22/05/06
|
||||
*/
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
public class ClusterBalanceReassignExtendData {
|
||||
/**
|
||||
* 原本保存时间
|
||||
*/
|
||||
private Long originalRetentionTimeUnitMs;
|
||||
|
||||
/**
|
||||
* 迁移时保存时间
|
||||
*/
|
||||
private Long reassignRetentionTimeUnitMs;
|
||||
|
||||
/**
|
||||
* 需迁移LogSize
|
||||
*/
|
||||
private Long needReassignLogSizeUnitB;
|
||||
|
||||
/**
|
||||
* 已完成迁移LogSize
|
||||
*/
|
||||
private Long finishedReassignLogSizeUnitB;
|
||||
|
||||
/**
|
||||
* 预计剩余时长
|
||||
*/
|
||||
private Long remainTimeUnitMs;
|
||||
|
||||
/**
|
||||
* 当前副本数
|
||||
*/
|
||||
private Integer originReplicaNum;
|
||||
|
||||
/**
|
||||
* 新的副本数
|
||||
*/
|
||||
private Integer reassignReplicaNum;
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.content;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.job.content.BaseJobCreateContent;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto.ClusterBalanceIntervalDTO;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import javax.validation.constraints.Min;
|
||||
import java.util.List;
|
||||
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
public class JobClusterBalanceContent extends BaseJobCreateContent {
|
||||
@Min(value = 1, message = "clusterId不允许为null或者小于0")
|
||||
@ApiModelProperty(value = "集群ID, 默认为逻辑集群ID", example = "6")
|
||||
private Long clusterId;
|
||||
|
||||
@Min(value = 1, message = "throttle不允许为null或者小于0")
|
||||
@ApiModelProperty(value = "限流值", example = "102400000")
|
||||
private Long throttleUnitB;
|
||||
|
||||
@ApiModelProperty("topic黑名单")
|
||||
private List<String> topicBlackList;
|
||||
|
||||
@ApiModelProperty("均衡区间详情")
|
||||
private List<ClusterBalanceIntervalDTO> clusterBalanceIntervalList;
|
||||
|
||||
@ApiModelProperty("指标计算周期,单位分钟")
|
||||
private Integer metricCalculationPeriod;
|
||||
|
||||
@ApiModelProperty("任务并行数")
|
||||
private Integer parallelNum;
|
||||
|
||||
@ApiModelProperty("执行策略, 1:优先最大副本,2:优先最小副本")
|
||||
private Integer executionStrategy;
|
||||
|
||||
@ApiModelProperty("备注说明")
|
||||
private String description;
|
||||
|
||||
@ApiModelProperty("是否是周期性任务")
|
||||
private boolean scheduleJob;
|
||||
}
|
||||
@@ -0,0 +1,79 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.detail;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import lombok.Data;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 22/05/06
|
||||
*/
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
public abstract class AbstractClusterBalanceDetailData {
|
||||
/**
|
||||
* 物流集群ID
|
||||
*/
|
||||
private Long clusterPhyId;
|
||||
|
||||
/**
|
||||
* Topic名称
|
||||
*/
|
||||
private String topicName;
|
||||
|
||||
/**
|
||||
* 源Broker列表
|
||||
*/
|
||||
private List<Integer> originalBrokerIdList;
|
||||
|
||||
/**
|
||||
* 目标Broker列表
|
||||
*/
|
||||
private List<Integer> reassignBrokerIdList;
|
||||
|
||||
/**
|
||||
* 需迁移LogSize
|
||||
*/
|
||||
private Long needReassignLogSizeUnitB;
|
||||
|
||||
/**
|
||||
* 已完成迁移LogSize
|
||||
*/
|
||||
private Long finishedReassignLogSizeUnitB;
|
||||
|
||||
/**
|
||||
* 预计剩余时长
|
||||
*/
|
||||
private Long remainTimeUnitMs;
|
||||
|
||||
/**
|
||||
* 当前副本数
|
||||
*/
|
||||
private Integer presentReplicaNum;
|
||||
|
||||
/**
|
||||
* 新的副本数
|
||||
*/
|
||||
private Integer oldReplicaNum;
|
||||
|
||||
/**
|
||||
* 新的副本数
|
||||
*/
|
||||
private Integer newReplicaNum;
|
||||
|
||||
/**
|
||||
* 原本保存时间
|
||||
*/
|
||||
private Long originalRetentionTimeUnitMs;
|
||||
|
||||
/**
|
||||
* 迁移时保存时间
|
||||
*/
|
||||
private Long reassignRetentionTimeUnitMs;
|
||||
|
||||
/**
|
||||
* 状态
|
||||
*/
|
||||
private Integer status;
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.detail;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import lombok.Data;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 22/05/06
|
||||
*/
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
public class ClusterBalanceDetailDataGroupByPartition extends AbstractClusterBalanceDetailData {
|
||||
/**
|
||||
* 分区ID
|
||||
*/
|
||||
private Integer partitionId;
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.detail;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import lombok.Data;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 22/05/06
|
||||
*/
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
public class ClusterBalanceDetailDataGroupByTopic extends AbstractClusterBalanceDetailData {
|
||||
/**
|
||||
* 分区ID列表
|
||||
*/
|
||||
private List<Integer> partitionIdList;
|
||||
|
||||
private List<ClusterBalanceDetailDataGroupByPartition> reassignPartitionDetailsList;
|
||||
}
|
||||
@@ -0,0 +1,76 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.detail;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* 集群Topic信息
|
||||
* @author zengqiao
|
||||
* @date 22/02/23
|
||||
*/
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
@ApiModel(description = "集群均衡详细信息")
|
||||
public class ClusterBalancePlanDetail implements Serializable {
|
||||
@ApiModelProperty(value = "是否均衡,1:已均衡;2:未均衡")
|
||||
private Integer status;
|
||||
|
||||
@ApiModelProperty(value = "brokerId")
|
||||
private Integer brokerId;
|
||||
|
||||
@ApiModelProperty(value = "broker host")
|
||||
private String host;
|
||||
|
||||
@ApiModelProperty(value = "均衡前 cpu")
|
||||
private Double cpuBefore;
|
||||
|
||||
@ApiModelProperty(value = "均衡前 disk")
|
||||
private Double diskBefore;
|
||||
|
||||
@ApiModelProperty(value = "均衡前 byteIn")
|
||||
private Double byteInBefore;
|
||||
|
||||
@ApiModelProperty(value = "均衡前 byteOut")
|
||||
private Double byteOutBefore;
|
||||
|
||||
@ApiModelProperty(value = "均衡后 cpu")
|
||||
private Double cpuAfter;
|
||||
|
||||
@ApiModelProperty(value = "是否均衡,1:已均衡;2:未均衡")
|
||||
private Integer cpuStatus;
|
||||
|
||||
@ApiModelProperty(value = "均衡后 disk")
|
||||
private Double diskAfter;
|
||||
|
||||
@ApiModelProperty(value = "是否均衡,1:已均衡;2:未均衡")
|
||||
private Integer diskStatus;
|
||||
|
||||
@ApiModelProperty(value = "均衡后 byteIn")
|
||||
private Double byteInAfter;
|
||||
|
||||
@ApiModelProperty(value = "是否均衡,1:已均衡;2:未均衡")
|
||||
private Integer byteInStatus;
|
||||
|
||||
@ApiModelProperty(value = "均衡后 byteOut")
|
||||
private Double byteOutAfter;
|
||||
|
||||
@ApiModelProperty(value = "是否均衡,1:已均衡;2:未均衡")
|
||||
private Integer byteOutStatus;
|
||||
|
||||
@ApiModelProperty(value = "均衡流入大小")
|
||||
private Double inSize;
|
||||
|
||||
@ApiModelProperty(value = "均衡流入副本个数")
|
||||
private Double inReplica;
|
||||
|
||||
@ApiModelProperty(value = "均衡流出大小")
|
||||
private Double outSize;
|
||||
|
||||
@ApiModelProperty(value = "均衡流出副本个数")
|
||||
private Double outReplica;
|
||||
|
||||
}
|
||||
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright (c) 2015, WINIT and/or its affiliates. All rights reserved. Use, Copy is subject to authorized license.
|
||||
*/
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po;
|
||||
|
||||
import com.baomidou.mybatisplus.annotation.TableName;
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.BasePO;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
/**
|
||||
* 集群均衡任务 实体类
|
||||
*
|
||||
* @author fengqiongfeng
|
||||
* @date 2022-05-23
|
||||
*/
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
@NoArgsConstructor
|
||||
@TableName(Constant.MYSQL_TABLE_NAME_PREFIX + "cluster_balance_job_config")
|
||||
public class ClusterBalanceJobConfigPO extends BasePO {
|
||||
|
||||
/**
|
||||
* 序列化版本号
|
||||
*/
|
||||
private static final long serialVersionUID=1L;
|
||||
|
||||
/**
|
||||
* 集群id
|
||||
*/
|
||||
private Long clusterId;
|
||||
|
||||
/**
|
||||
* topic黑名单
|
||||
*/
|
||||
private String topicBlackList;
|
||||
|
||||
/**
|
||||
* 任务周期
|
||||
*/
|
||||
private String taskCron;
|
||||
|
||||
/**
|
||||
* 均衡区间详情
|
||||
*/
|
||||
private String balanceIntervalJson;
|
||||
|
||||
/**
|
||||
* 指标计算周期,单位分钟
|
||||
*/
|
||||
private Integer metricCalculationPeriod;
|
||||
|
||||
/**
|
||||
* 迁移脚本
|
||||
*/
|
||||
private String reassignmentJson;
|
||||
|
||||
/**
|
||||
* 任务并行数
|
||||
*/
|
||||
private Integer parallelNum;
|
||||
|
||||
/**
|
||||
* 执行策略, 1:优先最大副本,2:优先最小副本
|
||||
*/
|
||||
private Integer executionStrategy;
|
||||
|
||||
/**
|
||||
* 限流值
|
||||
*/
|
||||
private Long throttleUnitB;
|
||||
|
||||
/**
|
||||
* 操作人
|
||||
*/
|
||||
private String creator;
|
||||
|
||||
/**
|
||||
* 任务状态 0:未开启,1:开启
|
||||
*/
|
||||
private Integer status;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,125 @@
|
||||
/*
|
||||
* Copyright (c) 2015, WINIT and/or its affiliates. All rights reserved. Use, Copy is subject to authorized license.
|
||||
*/
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po;
|
||||
|
||||
import com.baomidou.mybatisplus.annotation.TableName;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.BasePO;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* 集群均衡任务 实体类
|
||||
*
|
||||
* @author fengqiongfeng
|
||||
* @date 2022-05-23
|
||||
*/
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@TableName(Constant.MYSQL_TABLE_NAME_PREFIX + "cluster_balance_job")
|
||||
public class ClusterBalanceJobPO extends BasePO {
|
||||
|
||||
/**
|
||||
* 序列化版本号
|
||||
*/
|
||||
private static final long serialVersionUID=1L;
|
||||
|
||||
/**
|
||||
* 集群id
|
||||
*/
|
||||
private Long clusterId;
|
||||
|
||||
/**
|
||||
* 均衡节点
|
||||
*/
|
||||
private String brokers;
|
||||
|
||||
/**
|
||||
* topic黑名单
|
||||
*/
|
||||
private String topicBlackList;
|
||||
|
||||
/**
|
||||
* 1:立即均衡,2:周期均衡
|
||||
*/
|
||||
private Integer type;
|
||||
|
||||
/**
|
||||
* 均衡区间详情
|
||||
*/
|
||||
private String balanceIntervalJson;
|
||||
|
||||
/**
|
||||
* 指标计算周期,单位分钟
|
||||
*/
|
||||
private Integer metricCalculationPeriod;
|
||||
|
||||
/**
|
||||
* 迁移脚本
|
||||
*/
|
||||
private String reassignmentJson;
|
||||
|
||||
/**
|
||||
* 任务并行数
|
||||
*/
|
||||
private Integer parallelNum;
|
||||
|
||||
/**
|
||||
* 执行策略, 1:优先最大副本,2:优先最小副本
|
||||
*/
|
||||
private Integer executionStrategy;
|
||||
|
||||
/**
|
||||
* 限流值
|
||||
*/
|
||||
private Long throttleUnitB;
|
||||
|
||||
/**
|
||||
* 总迁移大小
|
||||
*/
|
||||
private Double totalReassignSize;
|
||||
|
||||
/**
|
||||
* 总迁移副本数
|
||||
*/
|
||||
private Integer totalReassignReplicaNum;
|
||||
|
||||
/**
|
||||
* 移入topic
|
||||
*/
|
||||
private String moveInTopicList;
|
||||
|
||||
/**
|
||||
* 节点均衡详情
|
||||
*/
|
||||
private String brokerBalanceDetail;
|
||||
|
||||
/**
|
||||
* 任务状态 1:进行中,2:准备,3,成功,4:失败,5:取消
|
||||
*/
|
||||
private Integer status;
|
||||
|
||||
/**
|
||||
* 操作人
|
||||
*/
|
||||
private String creator;
|
||||
|
||||
/**
|
||||
* 任务开始时间
|
||||
*/
|
||||
private Date startTime;
|
||||
|
||||
/**
|
||||
* 任务完成时间
|
||||
*/
|
||||
private Date finishedTime;
|
||||
|
||||
/**
|
||||
* 备注说明
|
||||
*/
|
||||
private String description;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,80 @@
|
||||
/*
|
||||
* Copyright (c) 2015, WINIT and/or its affiliates. All rights reserved. Use, Copy is subject to authorized license.
|
||||
*/
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po;
|
||||
|
||||
import com.baomidou.mybatisplus.annotation.TableName;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.BasePO;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* 集群平衡迁移详情 实体类
|
||||
*
|
||||
* @author fengqiongfeng
|
||||
* @date 2022-05-23
|
||||
*/
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@TableName(Constant.MYSQL_TABLE_NAME_PREFIX + "cluster_balance_reassign")
|
||||
public class ClusterBalanceReassignPO extends BasePO {
|
||||
|
||||
/**
|
||||
* 序列化版本号
|
||||
*/
|
||||
private static final long serialVersionUID=1L;
|
||||
/**
|
||||
* jobID
|
||||
*/
|
||||
private Long jobId;
|
||||
|
||||
/**
|
||||
* 集群id
|
||||
*/
|
||||
private Long clusterId;
|
||||
|
||||
/**
|
||||
* Topic名称
|
||||
*/
|
||||
private String topicName;
|
||||
|
||||
/**
|
||||
* 分区ID
|
||||
*/
|
||||
private Integer partitionId;
|
||||
|
||||
/**
|
||||
* 源BrokerId列表
|
||||
*/
|
||||
private String originalBrokerIds;
|
||||
|
||||
/**
|
||||
* 目标BrokerId列表
|
||||
*/
|
||||
private String reassignBrokerIds;
|
||||
|
||||
/**
|
||||
* 任务开始时间
|
||||
*/
|
||||
private Date startTime;
|
||||
|
||||
/**
|
||||
* 任务完成时间
|
||||
*/
|
||||
private Date finishedTime;
|
||||
|
||||
/**
|
||||
* 扩展数据
|
||||
*/
|
||||
private String extendData;
|
||||
|
||||
/**
|
||||
* 任务状态
|
||||
*/
|
||||
private Integer status;
|
||||
|
||||
}
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* 集群Topic信息
|
||||
* @author zengqiao
|
||||
* @date 22/02/23
|
||||
*/
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
@ApiModel(description = "集群均衡历史信息")
|
||||
public class ClusterBalanceHistorySubVO implements Serializable {
|
||||
@ApiModelProperty(value = "均衡成功节点数")
|
||||
private Long successNu;
|
||||
|
||||
@ApiModelProperty(value = "未均衡成功节点数")
|
||||
private Long failedNu;
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Date;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* 集群Topic信息
|
||||
* @author zengqiao
|
||||
* @date 22/02/23
|
||||
*/
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
@ApiModel(description = "集群均衡历史信息")
|
||||
public class ClusterBalanceHistoryVO implements Serializable {
|
||||
@ApiModelProperty(value = "均衡开始执行时间")
|
||||
private Date begin;
|
||||
|
||||
@ApiModelProperty(value = "均衡执行结束时间")
|
||||
private Date end;
|
||||
|
||||
@ApiModelProperty(value = "均衡任务id")
|
||||
private Long jobId;
|
||||
|
||||
@ApiModelProperty(value = "子项均衡历史信息", example = "cpu、disk")
|
||||
private Map<String, ClusterBalanceHistorySubVO> sub;
|
||||
|
||||
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
public class ClusterBalanceIntervalVO {
|
||||
@ApiModelProperty("均衡维度:cpu,disk,bytesIn,bytesOut")
|
||||
private String type;
|
||||
|
||||
@ApiModelProperty("平衡区间百分比")
|
||||
private Double intervalPercent;
|
||||
|
||||
@ApiModelProperty("优先级")
|
||||
private Integer priority;
|
||||
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (c) 2015, WINIT and/or its affiliates. All rights reserved. Use, Copy is subject to authorized license.
|
||||
*/
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.ClusterBalanceInterval;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* 集群均衡任务 实体类
|
||||
*
|
||||
* @author fengqiongfeng
|
||||
* @date 2022-05-23
|
||||
*/
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
public class ClusterBalanceJobConfigVO {
|
||||
/**
|
||||
* 序列化版本号
|
||||
*/
|
||||
private static final long serialVersionUID=1L;
|
||||
|
||||
@ApiModelProperty("集群id")
|
||||
private Long clusterId;
|
||||
|
||||
@ApiModelProperty("topic黑名单")
|
||||
private List<String> topicBlackList;
|
||||
|
||||
@ApiModelProperty("任务周期")
|
||||
private String scheduleCron;
|
||||
|
||||
@ApiModelProperty("均衡区间详情")
|
||||
private List<ClusterBalanceInterval> clusterBalanceIntervalList;
|
||||
|
||||
@ApiModelProperty("指标计算周期,单位分钟")
|
||||
private Integer metricCalculationPeriod;
|
||||
|
||||
@ApiModelProperty("任务并行数")
|
||||
private Integer parallelNum;
|
||||
|
||||
@ApiModelProperty("执行策略, 1:优先最大副本,2:优先最小副本")
|
||||
private Integer executionStrategy;
|
||||
|
||||
@ApiModelProperty("限流值")
|
||||
private Long throttleUnitB;
|
||||
|
||||
@ApiModelProperty("任务状态 0:未开启,1:开启")
|
||||
private Integer status;
|
||||
|
||||
}
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* 集群Topic信息
|
||||
* @author zengqiao
|
||||
* @date 22/02/23
|
||||
*/
|
||||
@Data
|
||||
@AllArgsConstructor
|
||||
@NoArgsConstructor
|
||||
@EnterpriseLoadReBalance
|
||||
@ApiModel(description = "集群均衡列表信息")
|
||||
public class ClusterBalanceOverviewSubVO implements Serializable {
|
||||
@ApiModelProperty(value = "平均值", example = "cpu的平均值,43.4")
|
||||
private Double avg;
|
||||
|
||||
@ApiModelProperty(value = "规格", example = "1000")
|
||||
private Double spec;
|
||||
|
||||
@ApiModelProperty(value = "均衡状态", example = "0:已均衡,-1:低于均衡值,1高于均衡值")
|
||||
private Integer status ;
|
||||
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* 集群Topic信息
|
||||
* @author zengqiao
|
||||
* @date 22/02/23
|
||||
*/
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
@ApiModel(description = "集群均衡列表信息")
|
||||
public class ClusterBalanceOverviewVO implements Serializable {
|
||||
@ApiModelProperty(value = "brokerId", example = "123")
|
||||
private Integer brokerId;
|
||||
|
||||
@ApiModelProperty(value = "broker host")
|
||||
private String host;
|
||||
|
||||
@ApiModelProperty(value = "broker 对应的 rack")
|
||||
private String rack;
|
||||
|
||||
@ApiModelProperty(value = "leader")
|
||||
private Integer leader;
|
||||
|
||||
@ApiModelProperty(value = "replicas")
|
||||
private Integer replicas;
|
||||
|
||||
@ApiModelProperty(value = "子项统计详细信息", example = "cpu、disk")
|
||||
private Map<String, ClusterBalanceOverviewSubVO> sub;
|
||||
}
|
||||
@@ -0,0 +1,64 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* 集群Topic信息
|
||||
* @author zengqiao
|
||||
* @date 22/02/23
|
||||
*/
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
@ApiModel(description = "集群均衡历史信息")
|
||||
public class ClusterBalancePlanDetailVO implements Serializable {
|
||||
@ApiModelProperty(value = "是否均衡,0:已均衡;2:未均衡")
|
||||
private Integer status;
|
||||
|
||||
@ApiModelProperty(value = "brokerId")
|
||||
private Integer brokerId;
|
||||
|
||||
@ApiModelProperty(value = "broker host")
|
||||
private String host;
|
||||
|
||||
@ApiModelProperty(value = "均衡前 cpu")
|
||||
private Double cpuBefore;
|
||||
|
||||
@ApiModelProperty(value = "均衡前 disk")
|
||||
private Double diskBefore;
|
||||
|
||||
@ApiModelProperty(value = "均衡前 byteIn")
|
||||
private Double byteInBefore;
|
||||
|
||||
@ApiModelProperty(value = "均衡前 byteOut")
|
||||
private Double byteOutBefore;
|
||||
|
||||
@ApiModelProperty(value = "均衡后 cpu")
|
||||
private Double cpuAfter;
|
||||
|
||||
@ApiModelProperty(value = "均衡后 disk")
|
||||
private Double diskAfter;
|
||||
|
||||
@ApiModelProperty(value = "均衡后 byteIn")
|
||||
private Double byteInAfter;
|
||||
|
||||
@ApiModelProperty(value = "均衡后 byteOut")
|
||||
private Double byteOutAfter;
|
||||
|
||||
@ApiModelProperty(value = "均衡流入大小")
|
||||
private Double inSize;
|
||||
|
||||
@ApiModelProperty(value = "均衡流入副本个数")
|
||||
private Double inReplica;
|
||||
|
||||
@ApiModelProperty(value = "均衡流出大小")
|
||||
private Double outSize;
|
||||
|
||||
@ApiModelProperty(value = "均衡流出副本个数")
|
||||
private Double outReplica;
|
||||
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* 集群Topic信息
|
||||
* @author zengqiao
|
||||
* @date 22/02/23
|
||||
*/
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
@ApiModel(description = "集群均衡信息")
|
||||
public class ClusterBalancePlanVO implements Serializable {
|
||||
@ApiModelProperty(value = "均衡计划类型,1:立即均衡;2:周期均衡")
|
||||
private Integer type;
|
||||
|
||||
@ApiModelProperty(value = "均衡执行的节点范围")
|
||||
private List<String> brokers;
|
||||
|
||||
@ApiModelProperty(value = "均衡执行的Topic黑名单")
|
||||
private List<String> blackTopics;
|
||||
|
||||
@ApiModelProperty(value = "均衡执行移入的Topic名单")
|
||||
private List<String> topics;
|
||||
|
||||
@ApiModelProperty(value = "均衡总迁移的磁盘大小,单位byte")
|
||||
private Double moveSize;
|
||||
|
||||
@ApiModelProperty(value = "均衡总迁移的副本个数")
|
||||
private Integer replicas;
|
||||
|
||||
@ApiModelProperty(value = "均衡阈值")
|
||||
private String threshold;
|
||||
|
||||
@ApiModelProperty(value = "reassignment json")
|
||||
private String reassignmentJson;
|
||||
|
||||
@ApiModelProperty(value = "均衡区间信息")
|
||||
private List<ClusterBalanceIntervalVO> clusterBalanceIntervalList;
|
||||
|
||||
@ApiModelProperty(value = "均衡计划明细")
|
||||
private List<ClusterBalancePlanDetailVO> detail;
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
|
||||
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
@AllArgsConstructor
|
||||
@NoArgsConstructor
|
||||
@ApiModel(description = "集群均衡状态子项的详细统计信息")
|
||||
public class ClusterBalanceStateSubVO {
|
||||
|
||||
@ApiModelProperty(value = "平均值", example = "cpu的平均值,43.4")
|
||||
private Double avg;
|
||||
|
||||
@ApiModelProperty(value = "周期均衡时的均衡区间", example = "cpu的均衡值")
|
||||
private Double interval;
|
||||
|
||||
@ApiModelProperty(value = "处于周期均衡时的均衡区间的最小值以下的broker个数", example = "4")
|
||||
private Long smallNu;
|
||||
|
||||
@ApiModelProperty(value = "处于周期均衡时的均衡区间的broker个数", example = "4")
|
||||
private Long betweenNu;
|
||||
|
||||
@ApiModelProperty(value = "处于周期均衡时的均衡区间的最大值以上的broker个数", example = "4")
|
||||
private Long bigNu;
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Data;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Date;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* 集群Topic信息
|
||||
* @author zengqiao
|
||||
* @date 22/02/23
|
||||
*/
|
||||
@Data
|
||||
@EnterpriseLoadReBalance
|
||||
@ApiModel(description = "集群均衡状态信息")
|
||||
public class ClusterBalanceStateVO implements Serializable {
|
||||
@ApiModelProperty(value = "均衡状态", example = "0:已均衡,2:未均衡")
|
||||
private Integer status;
|
||||
|
||||
@ApiModelProperty(value = "是否开启均衡", example = "true:开启,false:未开启")
|
||||
private Boolean enable;
|
||||
|
||||
@ApiModelProperty(value = "下次均衡开始时间")
|
||||
private Date next;
|
||||
|
||||
@ApiModelProperty(value = "子项统计详细信息", example = "cpu、disk")
|
||||
private Map<String, ClusterBalanceStateSubVO> sub;
|
||||
}
|
||||
@@ -0,0 +1,476 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.converter;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto.ClusterBalanceIntervalDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto.ClusterBalancePreviewDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto.ClusterBalanceStrategyDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.BrokerSpec;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.ClusterBalanceInterval;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.ClusterBalanceReassignExtendData;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.detail.ClusterBalancePlanDetail;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.content.JobClusterBalanceContent;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceJobConfigPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceJobPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceReassignPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo.*;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.enums.ClusterBalanceStateEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.enums.ClusterBalanceTypeEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.job.JobStatusEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.job.JobTypeEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.CommonUtils;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.*;
|
||||
import com.xiaojukeji.know.streaming.km.rebalance.model.Resource;
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.PARTITION_INDEX;
|
||||
|
||||
@EnterpriseLoadReBalance
|
||||
public class ClusterBalanceConverter {
|
||||
|
||||
private ClusterBalanceConverter() {
|
||||
}
|
||||
|
||||
public static BalanceParameter convert2BalanceParameter(ClusterBalanceJobConfigPO configPO, Map<Integer, Broker> brokerMap, Map<Integer, BrokerSpec> brokerSpecMap, ClusterPhy clusterPhy, String esUrl, List<String> topicNames) {
|
||||
BalanceParameter balanceParameter = new BalanceParameter();
|
||||
List<ClusterBalanceIntervalDTO> clusterBalanceIntervalDTOS = ConvertUtil.str2ObjArrayByJson(configPO.getBalanceIntervalJson(), ClusterBalanceIntervalDTO.class);
|
||||
|
||||
List<String> goals = new ArrayList<>();
|
||||
for(ClusterBalanceIntervalDTO clusterBalanceIntervalDTO : clusterBalanceIntervalDTOS){
|
||||
if (Resource.DISK.resource().equals(clusterBalanceIntervalDTO.getType())){
|
||||
balanceParameter.setDiskThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
|
||||
goals.add(BalanceGoal.DISK.goal());
|
||||
}else if (Resource.CPU.resource().equals(clusterBalanceIntervalDTO.getType())){
|
||||
balanceParameter.setCpuThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
|
||||
// todo cpu底层暂未实现,先不加goal
|
||||
}else if (Resource.NW_IN.resource().equals(clusterBalanceIntervalDTO.getType())){
|
||||
balanceParameter.setNetworkInThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
|
||||
goals.add(BalanceGoal.NW_IN.goal());
|
||||
}else if (Resource.NW_OUT.resource().equals(clusterBalanceIntervalDTO.getType())){
|
||||
balanceParameter.setNetworkOutThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
|
||||
goals.add(BalanceGoal.NW_OUT.goal());
|
||||
}
|
||||
}
|
||||
balanceParameter.setGoals(goals);
|
||||
balanceParameter.setCluster(clusterPhy.getId().toString());
|
||||
balanceParameter.setExcludedTopics(configPO.getTopicBlackList());
|
||||
balanceParameter.setEsIndexPrefix(PARTITION_INDEX + "_");
|
||||
balanceParameter.setEsRestURL(esUrl);
|
||||
balanceParameter.setBalanceBrokers(CommonUtils.intSet2String(brokerMap.keySet()));
|
||||
balanceParameter.setHardwareEnv(convert2ListHostEnv(brokerMap, brokerSpecMap));
|
||||
balanceParameter.setBeforeSeconds(configPO.getMetricCalculationPeriod());
|
||||
balanceParameter.setIgnoredTopics(CommonUtils.strList2String(topicNames));
|
||||
|
||||
Properties kafkaConfig = new Properties();
|
||||
kafkaConfig.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers());
|
||||
kafkaConfig.putAll(ConvertUtil.str2ObjByJson(clusterPhy.getClientProperties(), Properties.class));
|
||||
balanceParameter.setKafkaConfig(kafkaConfig);
|
||||
return balanceParameter;
|
||||
|
||||
}
|
||||
|
||||
public static BalanceParameter convert2BalanceParameter(ClusterBalanceJobPO clusterBalanceJobPO, Map<Integer, Broker> brokerMap, Map<Integer, BrokerSpec> brokerSpecMap, ClusterPhy clusterPhy, String esUrl, List<String> topicNames) {
|
||||
BalanceParameter balanceParameter = new BalanceParameter();
|
||||
List<ClusterBalanceIntervalDTO> clusterBalanceIntervalDTOS = ConvertUtil.str2ObjArrayByJson(clusterBalanceJobPO.getBalanceIntervalJson(), ClusterBalanceIntervalDTO.class);
|
||||
|
||||
List<String> goals = new ArrayList<>();
|
||||
for(ClusterBalanceIntervalDTO clusterBalanceIntervalDTO : clusterBalanceIntervalDTOS){
|
||||
if (Resource.DISK.resource().equals(clusterBalanceIntervalDTO.getType())){
|
||||
balanceParameter.setDiskThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
|
||||
goals.add(BalanceGoal.DISK.goal());
|
||||
}else if (Resource.CPU.resource().equals(clusterBalanceIntervalDTO.getType())){
|
||||
balanceParameter.setCpuThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
|
||||
// todo cpu底层暂未实现,先不加goal
|
||||
}else if (Resource.NW_IN.resource().equals(clusterBalanceIntervalDTO.getType())){
|
||||
balanceParameter.setNetworkInThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
|
||||
goals.add(BalanceGoal.NW_IN.goal());
|
||||
}else if (Resource.NW_OUT.resource().equals(clusterBalanceIntervalDTO.getType())){
|
||||
balanceParameter.setNetworkOutThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
|
||||
goals.add(BalanceGoal.NW_OUT.goal());
|
||||
}
|
||||
}
|
||||
balanceParameter.setGoals(goals);
|
||||
balanceParameter.setCluster(clusterPhy.getId().toString());
|
||||
balanceParameter.setExcludedTopics(clusterBalanceJobPO.getTopicBlackList());
|
||||
balanceParameter.setEsIndexPrefix(PARTITION_INDEX + "_");
|
||||
balanceParameter.setEsRestURL(esUrl);
|
||||
balanceParameter.setBalanceBrokers(clusterBalanceJobPO.getBrokers());
|
||||
balanceParameter.setHardwareEnv(convert2ListHostEnv(brokerMap, brokerSpecMap));
|
||||
balanceParameter.setBeforeSeconds(clusterBalanceJobPO.getMetricCalculationPeriod());
|
||||
balanceParameter.setIgnoredTopics(CommonUtils.strList2String(topicNames));
|
||||
|
||||
Properties kafkaConfig = new Properties();
|
||||
kafkaConfig.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers());
|
||||
kafkaConfig.putAll(ConvertUtil.str2ObjByJson(clusterPhy.getClientProperties(), Properties.class));
|
||||
balanceParameter.setKafkaConfig(kafkaConfig);
|
||||
return balanceParameter;
|
||||
|
||||
}
|
||||
|
||||
public static BalanceParameter convert2BalanceParameter(JobClusterBalanceContent dto, List<Broker> brokers, Map<Integer, BrokerSpec> brokerSpecMap, ClusterPhy clusterPhy, String esUrl, List<String> topicNames) {
|
||||
BalanceParameter balanceParameter = new BalanceParameter();
|
||||
List<ClusterBalanceIntervalDTO> clusterBalanceIntervalDTOS = dto.getClusterBalanceIntervalList().stream()
|
||||
.sorted(Comparator.comparing(ClusterBalanceIntervalDTO::getPriority)).collect(Collectors.toList());
|
||||
List<String> goals = new ArrayList<>();
|
||||
for(ClusterBalanceIntervalDTO clusterBalanceIntervalDTO : clusterBalanceIntervalDTOS){
|
||||
if (Resource.DISK.resource().equals(clusterBalanceIntervalDTO.getType())){
|
||||
balanceParameter.setDiskThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
|
||||
goals.add(BalanceGoal.DISK.goal());
|
||||
}else if (Resource.CPU.resource().equals(clusterBalanceIntervalDTO.getType())){
|
||||
balanceParameter.setCpuThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
|
||||
// todo cpu底层暂未实现,先不加goal
|
||||
}else if (Resource.NW_IN.resource().equals(clusterBalanceIntervalDTO.getType())){
|
||||
balanceParameter.setNetworkInThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
|
||||
goals.add(BalanceGoal.NW_IN.goal());
|
||||
}else if (Resource.NW_OUT.resource().equals(clusterBalanceIntervalDTO.getType())){
|
||||
balanceParameter.setNetworkOutThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
|
||||
goals.add(BalanceGoal.NW_OUT.goal());
|
||||
}
|
||||
}
|
||||
|
||||
Map<Integer, Broker> brokerMap = brokers.stream().collect(Collectors.toMap(Broker::getBrokerId, Function.identity()));
|
||||
balanceParameter.setGoals(goals);
|
||||
balanceParameter.setCluster(clusterPhy.getId().toString());
|
||||
balanceParameter.setExcludedTopics(CommonUtils.strList2String(dto.getTopicBlackList()));
|
||||
balanceParameter.setEsIndexPrefix(PARTITION_INDEX + "_");
|
||||
balanceParameter.setEsRestURL(esUrl);
|
||||
balanceParameter.setBalanceBrokers(CommonUtils.intSet2String(brokerMap.keySet()));
|
||||
balanceParameter.setHardwareEnv(convert2ListHostEnv(brokerMap, brokerSpecMap));
|
||||
balanceParameter.setBeforeSeconds(dto.getMetricCalculationPeriod());
|
||||
balanceParameter.setIgnoredTopics(CommonUtils.strList2String(topicNames));
|
||||
|
||||
Properties kafkaConfig = new Properties();
|
||||
kafkaConfig.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers());
|
||||
kafkaConfig.putAll(ConvertUtil.str2ObjByJson(clusterPhy.getClientProperties(), Properties.class));
|
||||
balanceParameter.setKafkaConfig(kafkaConfig);
|
||||
return balanceParameter;
|
||||
|
||||
}
|
||||
|
||||
public static BalanceParameter convert2BalanceParameter(ClusterBalancePreviewDTO dto, Map<Integer, Broker> brokerMap, Map<Integer, BrokerSpec> brokerSpecMap, ClusterPhy clusterPhy, String esUrl, List<String> topicNames) {
|
||||
BalanceParameter balanceParameter = new BalanceParameter();
|
||||
List<ClusterBalanceIntervalDTO> clusterBalanceIntervalDTOS = dto.getClusterBalanceIntervalList().stream()
|
||||
.sorted(Comparator.comparing(ClusterBalanceIntervalDTO::getPriority)).collect(Collectors.toList());
|
||||
List<String> goals = new ArrayList<>();
|
||||
for(ClusterBalanceIntervalDTO clusterBalanceIntervalDTO : clusterBalanceIntervalDTOS){
|
||||
if (Resource.DISK.resource().equals(clusterBalanceIntervalDTO.getType())){
|
||||
balanceParameter.setDiskThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
|
||||
goals.add(BalanceGoal.DISK.goal());
|
||||
}else if (Resource.CPU.resource().equals(clusterBalanceIntervalDTO.getType())){
|
||||
balanceParameter.setCpuThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
|
||||
// todo cpu底层暂未实现,先不加goal
|
||||
}else if (Resource.NW_IN.resource().equals(clusterBalanceIntervalDTO.getType())){
|
||||
balanceParameter.setNetworkInThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
|
||||
goals.add(BalanceGoal.NW_IN.goal());
|
||||
}else if (Resource.NW_OUT.resource().equals(clusterBalanceIntervalDTO.getType())){
|
||||
balanceParameter.setNetworkOutThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
|
||||
goals.add(BalanceGoal.NW_OUT.goal());
|
||||
}
|
||||
}
|
||||
balanceParameter.setGoals(goals);
|
||||
balanceParameter.setCluster(clusterPhy.getId().toString());
|
||||
balanceParameter.setExcludedTopics(CommonUtils.strList2String(dto.getTopicBlackList()));
|
||||
balanceParameter.setEsIndexPrefix(PARTITION_INDEX + "_");
|
||||
balanceParameter.setEsRestURL(esUrl);
|
||||
balanceParameter.setBalanceBrokers(CommonUtils.intList2String(dto.getBrokers()));
|
||||
balanceParameter.setHardwareEnv(convert2ListHostEnv(brokerMap, brokerSpecMap));
|
||||
balanceParameter.setBeforeSeconds(dto.getMetricCalculationPeriod());
|
||||
balanceParameter.setIgnoredTopics(CommonUtils.strList2String(topicNames));
|
||||
|
||||
Properties kafkaConfig = new Properties();
|
||||
kafkaConfig.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers());
|
||||
kafkaConfig.putAll(ConvertUtil.str2ObjByJson(clusterPhy.getClientProperties(), Properties.class));
|
||||
balanceParameter.setKafkaConfig(kafkaConfig);
|
||||
return balanceParameter;
|
||||
|
||||
}
|
||||
|
||||
|
||||
public static ClusterBalanceJobPO convert2ClusterBalanceJobPO(Long jobId, JobClusterBalanceContent jobDTO, OptimizerResult optimizerResult, List<Broker> brokers, String operator, String json) {
|
||||
if (ValidateUtils.anyNull(jobDTO, optimizerResult, optimizerResult.resultJsonOverview(),
|
||||
optimizerResult.resultJsonDetailed(), optimizerResult.resultDetailed(), optimizerResult.resultJsonTask())){
|
||||
return null;
|
||||
}
|
||||
|
||||
ClusterBalanceJobPO clusterBalanceJobPO = new ClusterBalanceJobPO();
|
||||
clusterBalanceJobPO.setId(jobId);
|
||||
clusterBalanceJobPO.setType(jobDTO.isScheduleJob()?
|
||||
ClusterBalanceTypeEnum.CYCLE.getType():ClusterBalanceTypeEnum.IMMEDIATELY.getType());
|
||||
clusterBalanceJobPO.setStatus(JobStatusEnum.WAITING.getStatus());
|
||||
clusterBalanceJobPO.setCreator(operator);
|
||||
clusterBalanceJobPO.setParallelNum(jobDTO.getParallelNum());
|
||||
clusterBalanceJobPO.setThrottleUnitB(jobDTO.getThrottleUnitB());
|
||||
clusterBalanceJobPO.setDescription(jobDTO.getDescription());
|
||||
clusterBalanceJobPO.setBrokers(CommonUtils.intList2String(brokers.stream().map(Broker::getBrokerId).collect(Collectors.toList())));
|
||||
clusterBalanceJobPO.setClusterId(jobDTO.getClusterId());
|
||||
clusterBalanceJobPO.setTopicBlackList(CommonUtils.strList2String(jobDTO.getTopicBlackList()));
|
||||
clusterBalanceJobPO.setMoveInTopicList(optimizerResult.resultOverview().getMoveTopics());
|
||||
clusterBalanceJobPO.setExecutionStrategy(jobDTO.getExecutionStrategy());
|
||||
clusterBalanceJobPO.setBalanceIntervalJson(ConvertUtil.obj2Json(jobDTO.getClusterBalanceIntervalList()));
|
||||
clusterBalanceJobPO.setBrokerBalanceDetail(ConvertUtil.obj2Json(convert2ClusterBalancePlanDetail(optimizerResult.resultDetailed())));
|
||||
clusterBalanceJobPO.setMetricCalculationPeriod(jobDTO.getMetricCalculationPeriod());
|
||||
clusterBalanceJobPO.setReassignmentJson(json);
|
||||
clusterBalanceJobPO.setTotalReassignSize(optimizerResult.resultOverview().getTotalMoveSize());
|
||||
clusterBalanceJobPO.setTotalReassignReplicaNum(optimizerResult.resultOverview().getMoveReplicas());
|
||||
clusterBalanceJobPO.setDescription(optimizerResult.resultJsonBalanceActionHistory());
|
||||
return clusterBalanceJobPO;
|
||||
}
|
||||
|
||||
public static ClusterBalanceReassignPO convert2ClusterBalanceReassignPO(BalanceTask balanceTask, Topic topic, Long jobId, Long clusterId) {
|
||||
ClusterBalanceReassignPO reassignPO = new ClusterBalanceReassignPO();
|
||||
reassignPO.setClusterId(clusterId);
|
||||
reassignPO.setJobId(jobId);
|
||||
reassignPO.setPartitionId(balanceTask.getPartition());
|
||||
reassignPO.setOriginalBrokerIds(CommonUtils.intList2String(topic.getPartitionMap().get(balanceTask.getPartition())));
|
||||
reassignPO.setReassignBrokerIds(CommonUtils.intList2String(balanceTask.getReplicas()));
|
||||
reassignPO.setTopicName(balanceTask.getTopic());
|
||||
ClusterBalanceReassignExtendData extendData = new ClusterBalanceReassignExtendData();
|
||||
extendData.setOriginalRetentionTimeUnitMs(topic.getRetentionMs());
|
||||
extendData.setReassignRetentionTimeUnitMs(topic.getRetentionMs());
|
||||
extendData.setOriginReplicaNum(topic.getReplicaNum());
|
||||
extendData.setReassignReplicaNum(balanceTask.getReplicas().size());
|
||||
reassignPO.setExtendData(ConvertUtil.obj2Json(extendData));
|
||||
reassignPO.setStatus(JobStatusEnum.WAITING.getStatus());
|
||||
return reassignPO;
|
||||
}
|
||||
|
||||
public static List<ClusterBalanceReassignPO> convert2ListClusterBalanceReassignPO(List<BalanceTask> balanceTasks, Map<String, Topic> topicMap, Long jobId, Long clusterId) {
|
||||
List<ClusterBalanceReassignPO> reassignPOs = new ArrayList<>();
|
||||
//生成迁移详情
|
||||
Map<String, List<BalanceTask>> balanceTaskMap = balanceTasks.stream().collect(Collectors.groupingBy(BalanceTask::getTopic));
|
||||
for (Map.Entry<String, List<BalanceTask>> entry : balanceTaskMap.entrySet()){
|
||||
Topic topic = topicMap.get(entry.getKey());
|
||||
if (topic == null || topic.getPartitionMap() == null){
|
||||
continue;
|
||||
}
|
||||
for (BalanceTask balanceTask : entry.getValue()){
|
||||
reassignPOs.add(ClusterBalanceConverter.convert2ClusterBalanceReassignPO(balanceTask, topic, jobId, clusterId));
|
||||
}
|
||||
}
|
||||
return reassignPOs;
|
||||
}
|
||||
|
||||
public static ClusterBalanceJobConfigPO convert2ClusterBalanceJobConfigPO(ClusterBalanceStrategyDTO dto, String operator) {
|
||||
ClusterBalanceJobConfigPO jobConfigPO = new ClusterBalanceJobConfigPO();
|
||||
jobConfigPO.setCreator(operator);
|
||||
jobConfigPO.setParallelNum(dto.getParallelNum());
|
||||
jobConfigPO.setThrottleUnitB(dto.getThrottleUnitB());
|
||||
jobConfigPO.setClusterId(dto.getClusterId());
|
||||
jobConfigPO.setExecutionStrategy(dto.getExecutionStrategy());
|
||||
jobConfigPO.setBalanceIntervalJson(ConvertUtil.obj2Json(dto.getClusterBalanceIntervalList()));
|
||||
jobConfigPO.setTaskCron(dto.getScheduleCron());
|
||||
jobConfigPO.setMetricCalculationPeriod(dto.getMetricCalculationPeriod());
|
||||
jobConfigPO.setStatus(dto.getStatus());
|
||||
return jobConfigPO;
|
||||
}
|
||||
|
||||
public static JobClusterBalanceContent convert2JobClusterBalanceContent(ClusterBalanceJobConfigPO configPO) {
|
||||
JobClusterBalanceContent content = new JobClusterBalanceContent();
|
||||
content.setType(JobTypeEnum.CLUSTER_BALANCE.getType());
|
||||
content.setParallelNum(configPO.getParallelNum());
|
||||
content.setThrottleUnitB(configPO.getThrottleUnitB());
|
||||
content.setClusterId(configPO.getClusterId());
|
||||
content.setExecutionStrategy(configPO.getExecutionStrategy());
|
||||
content.setClusterBalanceIntervalList(ConvertUtil.str2ObjArrayByJson(configPO.getBalanceIntervalJson(), ClusterBalanceIntervalDTO.class));
|
||||
content.setMetricCalculationPeriod(configPO.getMetricCalculationPeriod());
|
||||
content.setTopicBlackList(CommonUtils.string2StrList(configPO.getTopicBlackList()));
|
||||
content.setScheduleJob(Boolean.TRUE);
|
||||
return content;
|
||||
}
|
||||
|
||||
public static List<ClusterBalancePlanDetail> convert2ClusterBalancePlanDetail(Map<Integer, BalanceDetailed> detailedMap) {
|
||||
List<ClusterBalancePlanDetail> details = new ArrayList<>();
|
||||
for(Map.Entry<Integer, BalanceDetailed> entry : detailedMap.entrySet()){
|
||||
BalanceDetailed balanceDetailed = entry.getValue();
|
||||
if (balanceDetailed == null){
|
||||
continue ;
|
||||
}
|
||||
ClusterBalancePlanDetail planDetail = new ClusterBalancePlanDetail();
|
||||
planDetail.setStatus(balanceDetailed.getBalanceState()==ClusterBalanceStateEnum.BALANCE.getState()?ClusterBalanceStateEnum.BALANCE.getState():ClusterBalanceStateEnum.UNBALANCED.getState());
|
||||
planDetail.setHost(balanceDetailed.getHost());
|
||||
planDetail.setBrokerId(entry.getKey());
|
||||
planDetail.setCpuBefore(balanceDetailed.getCurrentCPUUtilization()*Constant.ONE_HUNDRED);
|
||||
planDetail.setCpuAfter(balanceDetailed.getLastCPUUtilization()*Constant.ONE_HUNDRED);
|
||||
planDetail.setDiskBefore(balanceDetailed.getCurrentDiskUtilization()*Constant.ONE_HUNDRED);
|
||||
planDetail.setDiskAfter(balanceDetailed.getLastDiskUtilization()*Constant.ONE_HUNDRED);
|
||||
planDetail.setByteInBefore(balanceDetailed.getCurrentNetworkInUtilization()*Constant.ONE_HUNDRED);
|
||||
planDetail.setByteInAfter(balanceDetailed.getLastNetworkInUtilization()*Constant.ONE_HUNDRED);
|
||||
planDetail.setByteOutBefore(balanceDetailed.getCurrentNetworkOutUtilization()*Constant.ONE_HUNDRED);
|
||||
planDetail.setByteOutAfter(balanceDetailed.getLastNetworkOutUtilization()*Constant.ONE_HUNDRED);
|
||||
planDetail.setInReplica(balanceDetailed.getMoveInReplicas());
|
||||
planDetail.setOutReplica(balanceDetailed.getMoveOutReplicas());
|
||||
planDetail.setInSize(balanceDetailed.getMoveInDiskSize());
|
||||
planDetail.setOutSize(balanceDetailed.getMoveOutDiskSize());
|
||||
details.add(planDetail);
|
||||
}
|
||||
return details;
|
||||
}
|
||||
|
||||
//更新平衡任务完成后的集群均衡状态
|
||||
public static List<ClusterBalancePlanDetail> convert2ClusterBalancePlanDetail(List<ClusterBalancePlanDetail> details, Map<Integer, BrokerBalanceState> stateMap) {
|
||||
details.forEach(planDetail ->{
|
||||
BrokerBalanceState state = stateMap.get(planDetail.getBrokerId());
|
||||
if (state == null){
|
||||
return;
|
||||
}
|
||||
planDetail.setCpuStatus(state.getCpuBalanceState());
|
||||
planDetail.setDiskStatus(state.getDiskBalanceState());
|
||||
planDetail.setByteInStatus(state.getBytesInBalanceState());
|
||||
planDetail.setByteOutStatus(state.getBytesOutBalanceState());
|
||||
if ((state.getCpuBalanceState() == null || ClusterBalanceStateEnum.BALANCE.getState().equals(state.getCpuBalanceState()))
|
||||
&& (state.getDiskBalanceState() == null || ClusterBalanceStateEnum.BALANCE.getState().equals(state.getDiskBalanceState()))
|
||||
&& (state.getBytesInBalanceState() == null || ClusterBalanceStateEnum.BALANCE.getState().equals(state.getBytesInBalanceState()))
|
||||
&& (state.getBytesOutBalanceState() == null || ClusterBalanceStateEnum.BALANCE.getState().equals(state.getBytesOutBalanceState()))) {
|
||||
planDetail.setStatus(ClusterBalanceStateEnum.BALANCE.getState());
|
||||
}else {
|
||||
planDetail.setStatus(ClusterBalanceStateEnum.UNBALANCED.getState());
|
||||
}
|
||||
});
|
||||
return details;
|
||||
}
|
||||
|
||||
public static List<ClusterBalancePlanDetailVO> convert2ClusterBalancePlanDetailVO(List<Integer> balanceBrokerIds, Map<Integer, BalanceDetailed> detailedMap) {
|
||||
List<ClusterBalancePlanDetailVO> detailVOS = new ArrayList<>();
|
||||
for(Map.Entry<Integer, BalanceDetailed> entry : detailedMap.entrySet()){
|
||||
BalanceDetailed value = entry.getValue();
|
||||
if (value == null || !balanceBrokerIds.contains(entry.getKey())){
|
||||
continue ;
|
||||
}
|
||||
ClusterBalancePlanDetailVO planDetailVO = new ClusterBalancePlanDetailVO();
|
||||
planDetailVO.setStatus(value.getBalanceState()==ClusterBalanceStateEnum.BALANCE.getState()?ClusterBalanceStateEnum.BALANCE.getState():ClusterBalanceStateEnum.UNBALANCED.getState());
|
||||
planDetailVO.setHost(value.getHost());
|
||||
planDetailVO.setBrokerId(entry.getKey());
|
||||
planDetailVO.setCpuBefore(value.getCurrentCPUUtilization()*Constant.ONE_HUNDRED);
|
||||
planDetailVO.setCpuAfter(value.getLastCPUUtilization()*Constant.ONE_HUNDRED);
|
||||
planDetailVO.setDiskBefore(value.getCurrentDiskUtilization()*Constant.ONE_HUNDRED);
|
||||
planDetailVO.setDiskAfter(value.getLastDiskUtilization()*Constant.ONE_HUNDRED);
|
||||
planDetailVO.setByteInBefore(value.getCurrentNetworkInUtilization()*Constant.ONE_HUNDRED);
|
||||
planDetailVO.setByteInAfter(value.getLastNetworkInUtilization()*Constant.ONE_HUNDRED);
|
||||
planDetailVO.setByteOutBefore(value.getCurrentNetworkOutUtilization()*Constant.ONE_HUNDRED);
|
||||
planDetailVO.setByteOutAfter(value.getLastNetworkOutUtilization()*Constant.ONE_HUNDRED);
|
||||
planDetailVO.setInReplica(value.getMoveInReplicas());
|
||||
planDetailVO.setOutReplica(value.getMoveOutReplicas());
|
||||
planDetailVO.setInSize(value.getMoveInDiskSize());
|
||||
planDetailVO.setOutSize(value.getMoveOutDiskSize());
|
||||
detailVOS.add(planDetailVO);
|
||||
}
|
||||
return detailVOS;
|
||||
}
|
||||
|
||||
public static ClusterBalancePlanVO convert2ClusterBalancePlanVO(ClusterBalancePreviewDTO jobDTO, OptimizerResult optimizerResult, List<Broker> allBrokers) {
|
||||
if (ValidateUtils.anyNull(jobDTO, optimizerResult, optimizerResult.resultJsonOverview(),
|
||||
optimizerResult.resultJsonDetailed(), optimizerResult.resultDetailed(), optimizerResult.resultJsonTask())){
|
||||
return null;
|
||||
}
|
||||
ClusterBalancePlanVO planVO = new ClusterBalancePlanVO();
|
||||
planVO.setTopics(CommonUtils.string2StrList(optimizerResult.resultOverview().getMoveTopics()));
|
||||
planVO.setType(ClusterBalanceTypeEnum.IMMEDIATELY.getType());
|
||||
planVO.setReplicas(optimizerResult.resultOverview().getMoveReplicas());
|
||||
planVO.setBlackTopics(jobDTO.getTopicBlackList());
|
||||
planVO.setMoveSize(optimizerResult.resultOverview().getTotalMoveSize());
|
||||
planVO.setThreshold(ConvertUtil.obj2Json(jobDTO.getClusterBalanceIntervalList()));
|
||||
planVO.setBrokers(convert2HostList(allBrokers, optimizerResult.resultOverview().getNodeRange()));
|
||||
planVO.setDetail(convert2ClusterBalancePlanDetailVO(jobDTO.getBrokers(), optimizerResult.resultDetailed()));
|
||||
planVO.setClusterBalanceIntervalList(ConvertUtil.list2List(jobDTO.getClusterBalanceIntervalList(), ClusterBalanceIntervalVO.class));
|
||||
planVO.setReassignmentJson(optimizerResult.resultJsonTask());
|
||||
return planVO;
|
||||
}
|
||||
|
||||
public static ClusterBalancePreviewDTO convert2ClusterBalancePreviewDTO(ClusterBalanceJobPO clusterBalanceJobPO) {
|
||||
|
||||
ClusterBalancePreviewDTO planVO = new ClusterBalancePreviewDTO();
|
||||
planVO.setBrokers(CommonUtils.string2IntList(clusterBalanceJobPO.getBrokers()));
|
||||
planVO.setClusterBalanceIntervalList(ConvertUtil.str2ObjArrayByJson(clusterBalanceJobPO.getBalanceIntervalJson(), ClusterBalanceIntervalDTO.class));
|
||||
planVO.setClusterId(clusterBalanceJobPO.getClusterId());
|
||||
planVO.setExecutionStrategy(clusterBalanceJobPO.getExecutionStrategy());
|
||||
planVO.setParallelNum(clusterBalanceJobPO.getParallelNum());
|
||||
planVO.setThrottleUnitB(clusterBalanceJobPO.getThrottleUnitB());
|
||||
planVO.setMetricCalculationPeriod(clusterBalanceJobPO.getMetricCalculationPeriod());
|
||||
planVO.setTopicBlackList(CommonUtils.string2StrList(clusterBalanceJobPO.getTopicBlackList()));
|
||||
return planVO;
|
||||
}
|
||||
|
||||
public static Map<String, ClusterBalanceOverviewSubVO> convert2MapClusterBalanceOverviewSubVO(BrokerSpec brokerSpec, BrokerBalanceState state) {
|
||||
Map<String, ClusterBalanceOverviewSubVO> subVOMap = new HashMap<>();
|
||||
if (brokerSpec == null){
|
||||
brokerSpec = new BrokerSpec();
|
||||
}
|
||||
if (state == null){
|
||||
state = new BrokerBalanceState();
|
||||
}
|
||||
Double cpuSpec = brokerSpec.getCpu()!=null?brokerSpec.getCpu()*Constant.ONE_HUNDRED:null;//转成基础单位
|
||||
subVOMap.put(Resource.DISK.resource(),
|
||||
new ClusterBalanceOverviewSubVO(
|
||||
state.getDiskAvgResource(), brokerSpec.getDisk(),
|
||||
state.getDiskBalanceState() == null || state.getDiskBalanceState().equals(ClusterBalanceStateEnum.BALANCE.getState())?state.getDiskBalanceState():ClusterBalanceStateEnum.UNBALANCED.getState()));
|
||||
subVOMap.put(Resource.CPU.resource(),
|
||||
new ClusterBalanceOverviewSubVO(state.getCpuAvgResource(), cpuSpec,
|
||||
state.getCpuBalanceState() == null || state.getCpuBalanceState().equals(ClusterBalanceStateEnum.BALANCE.getState())?state.getCpuBalanceState():ClusterBalanceStateEnum.UNBALANCED.getState()));
|
||||
subVOMap.put(Resource.NW_IN.resource(),
|
||||
new ClusterBalanceOverviewSubVO(
|
||||
state.getBytesInAvgResource(), brokerSpec.getFlow(),
|
||||
state.getBytesInBalanceState() == null || state.getBytesInBalanceState().equals(ClusterBalanceStateEnum.BALANCE.getState())?state.getBytesInBalanceState():ClusterBalanceStateEnum.UNBALANCED.getState()));
|
||||
subVOMap.put(Resource.NW_OUT.resource(),
|
||||
new ClusterBalanceOverviewSubVO(
|
||||
state.getBytesOutAvgResource(), brokerSpec.getFlow(),
|
||||
state.getBytesOutBalanceState() == null || state.getBytesOutBalanceState().equals(ClusterBalanceStateEnum.BALANCE.getState())?state.getBytesOutBalanceState():ClusterBalanceStateEnum.UNBALANCED.getState()));
|
||||
return subVOMap;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public static ClusterBalanceJobConfigVO convert2ClusterBalanceJobConfigVO(ClusterBalanceJobConfigPO clusterBalanceJobConfigPO){
|
||||
ClusterBalanceJobConfigVO configVO = new ClusterBalanceJobConfigVO();
|
||||
configVO.setScheduleCron(clusterBalanceJobConfigPO.getTaskCron());
|
||||
configVO.setClusterBalanceIntervalList(ConvertUtil.str2ObjArrayByJson(clusterBalanceJobConfigPO.getBalanceIntervalJson(), ClusterBalanceInterval.class));
|
||||
configVO.setClusterId(clusterBalanceJobConfigPO.getClusterId());
|
||||
configVO.setExecutionStrategy(clusterBalanceJobConfigPO.getExecutionStrategy());
|
||||
configVO.setParallelNum(clusterBalanceJobConfigPO.getParallelNum());
|
||||
configVO.setMetricCalculationPeriod(clusterBalanceJobConfigPO.getMetricCalculationPeriod());
|
||||
configVO.setThrottleUnitB(clusterBalanceJobConfigPO.getThrottleUnitB());
|
||||
configVO.setTopicBlackList(CommonUtils.string2StrList(clusterBalanceJobConfigPO.getTopicBlackList()));
|
||||
configVO.setStatus(clusterBalanceJobConfigPO.getStatus());
|
||||
return configVO;
|
||||
}
|
||||
|
||||
|
||||
public static List<String> convert2HostList(List<Broker> allBrokers, String brokerIdStr){
|
||||
if (allBrokers.isEmpty() || ValidateUtils.isBlank(brokerIdStr)){
|
||||
return new ArrayList<>();
|
||||
}
|
||||
List<Integer> brokerIds = CommonUtils.string2IntList(brokerIdStr);
|
||||
return allBrokers.stream().filter(broker -> brokerIds.contains(broker.getBrokerId()))
|
||||
.map(Broker::getHost).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private static List<HostEnv> convert2ListHostEnv(Map<Integer, Broker> brokerMap, Map<Integer, BrokerSpec> brokerSpecMap) {
|
||||
List<HostEnv> hostEnvs = new ArrayList<>();
|
||||
for (Map.Entry<Integer, Broker> entry : brokerMap.entrySet()) {
|
||||
HostEnv hostEnv = new HostEnv();
|
||||
hostEnv.setId(entry.getKey());
|
||||
hostEnv.setHost(entry.getValue().getHost());
|
||||
hostEnv.setRackId(entry.getValue().getRack());
|
||||
BrokerSpec brokerSpec = brokerSpecMap.get(entry.getKey());
|
||||
if (brokerSpec == null){
|
||||
continue;
|
||||
}
|
||||
hostEnv.setCpu(brokerSpec.getCpu().intValue() * Constant.ONE_HUNDRED);
|
||||
hostEnv.setDisk(brokerSpec.getDisk() * Constant.B_TO_GB);
|
||||
hostEnv.setNetwork(brokerSpec.getFlow() * Constant.B_TO_MB);
|
||||
hostEnvs.add(hostEnv);
|
||||
}
|
||||
|
||||
return hostEnvs;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,218 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.converter;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.ClusterBalanceReassignDetail;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.ClusterBalanceReassignExtendData;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.detail.ClusterBalanceDetailDataGroupByPartition;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.detail.ClusterBalanceDetailDataGroupByTopic;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.job.Job;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.job.JobStatus;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.job.detail.JobDetail;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.job.detail.SubJobReplicaMoveDetail;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.reassign.strategy.ReplaceReassignSub;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceJobPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceReassignPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.job.sub.SubJobClusterBalanceReplicaMoveVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.job.sub.SubJobPartitionDetailVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.job.sub.SubJobVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.job.JobStatusEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.CommonUtils;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@EnterpriseLoadReBalance
|
||||
public class ClusterBalanceReassignConverter {
|
||||
|
||||
private ClusterBalanceReassignConverter() {
|
||||
}
|
||||
|
||||
public static JobDetail convert2JobDetail(Job job, ClusterBalanceReassignDetail reassignDetail) {
|
||||
JobDetail jobDetail = new JobDetail();
|
||||
jobDetail.setId(job.getId());
|
||||
jobDetail.setJobType(job.getJobType());
|
||||
jobDetail.setJobName(job.getJobName());
|
||||
jobDetail.setJobStatus(job.getJobStatus());
|
||||
jobDetail.setPlanTime(job.getPlanTime());
|
||||
|
||||
jobDetail.setStartTime(reassignDetail.getStartTime());
|
||||
jobDetail.setEndTime(reassignDetail.getFinishedTime());
|
||||
jobDetail.setFlowLimit(reassignDetail.getThrottleUnitB().doubleValue());
|
||||
|
||||
JobStatus jobStatus = new JobStatus(reassignDetail.getReassignTopicDetailsList().stream().map(elem -> elem.getStatus()).collect(Collectors.toList()));
|
||||
jobDetail.setTotal(jobStatus.getTotal());
|
||||
jobDetail.setSuccess(jobStatus.getSuccess());
|
||||
jobDetail.setFail(jobStatus.getFailed());
|
||||
jobDetail.setDoing(jobStatus.getDoing());
|
||||
|
||||
List<SubJobVO> subJobDetailList = new ArrayList<>();
|
||||
subJobDetailList.addAll(
|
||||
ConvertUtil.list2List(convert2SubJobReplicaMoveDetailList(reassignDetail.getReassignTopicDetailsList()), SubJobClusterBalanceReplicaMoveVO.class)
|
||||
);
|
||||
jobDetail.setSubJobs(subJobDetailList);
|
||||
|
||||
return jobDetail;
|
||||
}
|
||||
|
||||
public static ClusterBalanceReassignDetail convert2ClusterBalanceReassignDetail(ClusterBalanceJobPO jobPO, List<ClusterBalanceReassignPO> reassignPOS) {
|
||||
// 按照Topic做聚合
|
||||
Map<String, List<ClusterBalanceReassignPO>> topicJobPOMap = new HashMap<>();
|
||||
reassignPOS.forEach(elem -> {
|
||||
topicJobPOMap.putIfAbsent(elem.getTopicName(), new ArrayList<>());
|
||||
topicJobPOMap.get(elem.getTopicName()).add(elem);
|
||||
});
|
||||
|
||||
List<ClusterBalanceDetailDataGroupByTopic> reassignTopicDetailsList = new ArrayList<>();
|
||||
for (Map.Entry<String, List<ClusterBalanceReassignPO>> entry: topicJobPOMap.entrySet()) {
|
||||
reassignTopicDetailsList.add(convert2ClusterBalanceDetailDataGroupByTopic(entry.getValue()));
|
||||
}
|
||||
|
||||
ClusterBalanceReassignDetail jobDetail = new ClusterBalanceReassignDetail();
|
||||
jobDetail.setThrottleUnitB(jobPO.getThrottleUnitB());
|
||||
jobDetail.setReassignTopicDetailsList(reassignTopicDetailsList);
|
||||
jobDetail.setStartTime(jobPO.getStartTime());
|
||||
if (JobStatusEnum.isFinished(jobPO.getStatus())) {
|
||||
jobDetail.setFinishedTime(jobPO.getFinishedTime());
|
||||
}
|
||||
return jobDetail;
|
||||
}
|
||||
|
||||
private static ClusterBalanceDetailDataGroupByTopic convert2ClusterBalanceDetailDataGroupByTopic(List<ClusterBalanceReassignPO> reassingns) {
|
||||
Set<Integer> originalBrokerIdSet = new HashSet<>();
|
||||
Set<Integer> reassignBrokerIdSet = new HashSet<>();
|
||||
|
||||
// 分区的信息
|
||||
List<ClusterBalanceDetailDataGroupByPartition> partitionDetailList = new ArrayList<>();
|
||||
for (ClusterBalanceReassignPO reassignPO : reassingns) {
|
||||
ClusterBalanceDetailDataGroupByPartition detail = new ClusterBalanceDetailDataGroupByPartition();
|
||||
detail.setPartitionId(reassignPO.getPartitionId());
|
||||
detail.setClusterPhyId(reassignPO.getClusterId());
|
||||
detail.setTopicName(reassignPO.getTopicName());
|
||||
detail.setOriginalBrokerIdList(CommonUtils.string2IntList(reassignPO.getOriginalBrokerIds()));
|
||||
detail.setReassignBrokerIdList(CommonUtils.string2IntList(reassignPO.getReassignBrokerIds()));
|
||||
detail.setStatus(reassignPO.getStatus());
|
||||
|
||||
ClusterBalanceReassignExtendData extendData = ConvertUtil.str2ObjByJson(reassignPO.getExtendData(), ClusterBalanceReassignExtendData.class);
|
||||
if (extendData != null) {
|
||||
detail.setNeedReassignLogSizeUnitB(extendData.getNeedReassignLogSizeUnitB());
|
||||
detail.setFinishedReassignLogSizeUnitB(extendData.getFinishedReassignLogSizeUnitB());
|
||||
detail.setRemainTimeUnitMs(extendData.getRemainTimeUnitMs());
|
||||
detail.setPresentReplicaNum(extendData.getOriginReplicaNum());
|
||||
detail.setNewReplicaNum(extendData.getReassignReplicaNum());
|
||||
detail.setOriginalRetentionTimeUnitMs(extendData.getOriginalRetentionTimeUnitMs());
|
||||
detail.setReassignRetentionTimeUnitMs(extendData.getReassignRetentionTimeUnitMs());
|
||||
}
|
||||
|
||||
originalBrokerIdSet.addAll(detail.getOriginalBrokerIdList());
|
||||
reassignBrokerIdSet.addAll(detail.getReassignBrokerIdList());
|
||||
partitionDetailList.add(detail);
|
||||
}
|
||||
// Topic的详细信息
|
||||
ClusterBalanceDetailDataGroupByTopic topicDetail = new ClusterBalanceDetailDataGroupByTopic();
|
||||
topicDetail.setPartitionIdList(partitionDetailList.stream().map(elem -> elem.getPartitionId()).collect(Collectors.toList()));
|
||||
topicDetail.setReassignPartitionDetailsList(partitionDetailList);
|
||||
topicDetail.setClusterPhyId(reassingns.get(0).getClusterId());
|
||||
topicDetail.setTopicName(reassingns.get(0).getTopicName());
|
||||
|
||||
topicDetail.setOriginalBrokerIdList(new ArrayList<>(originalBrokerIdSet));
|
||||
topicDetail.setReassignBrokerIdList(new ArrayList<>(reassignBrokerIdSet));
|
||||
|
||||
List<Long> needSizeList = partitionDetailList
|
||||
.stream()
|
||||
.filter(elem -> elem.getNeedReassignLogSizeUnitB() != null)
|
||||
.map(item -> item.getNeedReassignLogSizeUnitB()).collect(Collectors.toList());
|
||||
topicDetail.setNeedReassignLogSizeUnitB(needSizeList.isEmpty()? null: needSizeList.stream().reduce(Long::sum).get());
|
||||
|
||||
List<Long> finishedSizeList = partitionDetailList
|
||||
.stream()
|
||||
.filter(elem -> elem.getFinishedReassignLogSizeUnitB() != null)
|
||||
.map(item -> item.getFinishedReassignLogSizeUnitB()).collect(Collectors.toList());
|
||||
topicDetail.setFinishedReassignLogSizeUnitB(finishedSizeList.isEmpty()? null: finishedSizeList.stream().reduce(Long::sum).get());
|
||||
|
||||
List<Long> remainList = partitionDetailList
|
||||
.stream()
|
||||
.filter(elem -> elem.getRemainTimeUnitMs() != null)
|
||||
.map(item -> item.getRemainTimeUnitMs()).collect(Collectors.toList());
|
||||
topicDetail.setRemainTimeUnitMs(remainList.isEmpty()? null: remainList.stream().reduce(Long::max).get());
|
||||
|
||||
topicDetail.setPresentReplicaNum(partitionDetailList.get(0).getPresentReplicaNum());
|
||||
topicDetail.setNewReplicaNum(partitionDetailList.get(0).getNewReplicaNum());
|
||||
topicDetail.setOriginalRetentionTimeUnitMs(partitionDetailList.get(0).getOriginalRetentionTimeUnitMs());
|
||||
topicDetail.setReassignRetentionTimeUnitMs(partitionDetailList.get(0).getReassignRetentionTimeUnitMs());
|
||||
|
||||
topicDetail.setStatus(
|
||||
new JobStatus(
|
||||
partitionDetailList.stream().map(elem -> elem.getStatus()).collect(Collectors.toList())
|
||||
).getStatus()
|
||||
);
|
||||
|
||||
return topicDetail;
|
||||
}
|
||||
|
||||
public static List<SubJobPartitionDetailVO> convert2SubJobPartitionDetailVOList(ClusterBalanceDetailDataGroupByTopic detailDataGroupByTopic) {
|
||||
List<SubJobPartitionDetailVO> voList = new ArrayList<>();
|
||||
for (ClusterBalanceDetailDataGroupByPartition groupByPartition: detailDataGroupByTopic.getReassignPartitionDetailsList()) {
|
||||
SubJobPartitionDetailVO vo = new SubJobPartitionDetailVO();
|
||||
vo.setPartitionId(groupByPartition.getPartitionId());
|
||||
vo.setSourceBrokerIds(groupByPartition.getOriginalBrokerIdList());
|
||||
vo.setDesBrokerIds(groupByPartition.getReassignBrokerIdList());
|
||||
vo.setTotalSize(groupByPartition.getNeedReassignLogSizeUnitB() != null ? groupByPartition.getNeedReassignLogSizeUnitB().doubleValue(): null);
|
||||
vo.setMovedSize(groupByPartition.getFinishedReassignLogSizeUnitB() != null ? groupByPartition.getFinishedReassignLogSizeUnitB().doubleValue(): null);
|
||||
vo.setStatus(groupByPartition.getStatus());
|
||||
vo.setRemainTime(groupByPartition.getRemainTimeUnitMs());
|
||||
|
||||
voList.add(vo);
|
||||
}
|
||||
|
||||
return voList;
|
||||
}
|
||||
|
||||
private static List<SubJobReplicaMoveDetail> convert2SubJobReplicaMoveDetailList(List<ClusterBalanceDetailDataGroupByTopic> reassignTopicDetailsList) {
|
||||
List<SubJobReplicaMoveDetail> detailList = new ArrayList<>();
|
||||
|
||||
for (ClusterBalanceDetailDataGroupByTopic detailDataGroupByTopic: reassignTopicDetailsList) {
|
||||
SubJobReplicaMoveDetail detail = new SubJobReplicaMoveDetail();
|
||||
detail.setTopicName(detailDataGroupByTopic.getTopicName());
|
||||
detail.setPartitions(detailDataGroupByTopic.getPartitionIdList());
|
||||
detail.setCurrentTimeSpent(detailDataGroupByTopic.getOriginalRetentionTimeUnitMs());
|
||||
detail.setMoveTimeSpent(detailDataGroupByTopic.getReassignRetentionTimeUnitMs());
|
||||
detail.setSourceBrokers(detailDataGroupByTopic.getOriginalBrokerIdList());
|
||||
detail.setDesBrokers(detailDataGroupByTopic.getReassignBrokerIdList());
|
||||
detail.setStatus(detailDataGroupByTopic.getStatus());
|
||||
if (detailDataGroupByTopic.getNeedReassignLogSizeUnitB() != null) {
|
||||
detail.setTotalSize(detailDataGroupByTopic.getNeedReassignLogSizeUnitB().doubleValue());
|
||||
}
|
||||
if (detailDataGroupByTopic.getFinishedReassignLogSizeUnitB() != null) {
|
||||
detail.setMovedSize(detailDataGroupByTopic.getFinishedReassignLogSizeUnitB().doubleValue());
|
||||
}
|
||||
JobStatus jobStatus = new JobStatus(detailDataGroupByTopic.getReassignPartitionDetailsList().stream().map(elem -> elem.getStatus()).collect(Collectors.toList())); detail.setTotal(jobStatus.getTotal());
|
||||
detail.setSuccess(jobStatus.getSuccess());
|
||||
detail.setFail(jobStatus.getFailed());
|
||||
detail.setDoing(jobStatus.getDoing());
|
||||
detail.setRemainTime(detailDataGroupByTopic.getRemainTimeUnitMs());
|
||||
detailList.add(detail);
|
||||
}
|
||||
|
||||
return detailList;
|
||||
}
|
||||
|
||||
public static List<ReplaceReassignSub> convert2ReplaceReassignSubList(List<ClusterBalanceReassignPO> reassignPOList) {
|
||||
List<ReplaceReassignSub> voList = new ArrayList<>();
|
||||
for (ClusterBalanceReassignPO reassignPO: reassignPOList) {
|
||||
voList.add(convert2ReplaceReassignSub(reassignPO));
|
||||
}
|
||||
return voList;
|
||||
}
|
||||
|
||||
public static ReplaceReassignSub convert2ReplaceReassignSub(ClusterBalanceReassignPO reassignPO) {
|
||||
ReplaceReassignSub reassignSub = new ReplaceReassignSub();
|
||||
reassignSub.setClusterPhyId(reassignPO.getClusterId());
|
||||
reassignSub.setOriginalBrokerIdList(CommonUtils.string2IntList(reassignPO.getOriginalBrokerIds()));
|
||||
reassignSub.setReassignBrokerIdList(CommonUtils.string2IntList(reassignPO.getReassignBrokerIds()));
|
||||
reassignSub.setPartitionId(reassignPO.getPartitionId());
|
||||
reassignSub.setTopicName(reassignPO.getTopicName());
|
||||
return reassignSub;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.enums;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import lombok.Getter;
|
||||
|
||||
/**
|
||||
* 集群平衡状态
|
||||
* @author zengqiao
|
||||
* @date 22/03/08
|
||||
*/
|
||||
@Getter
|
||||
@EnterpriseLoadReBalance
|
||||
public enum ClusterBalanceStateEnum {
|
||||
BELOW_BALANCE(-1, "低于均衡范围"),
|
||||
|
||||
BALANCE(0, "均衡范围内"),
|
||||
|
||||
ABOVE_BALANCE(1, "高于均衡范围"),
|
||||
|
||||
UNBALANCED(2, "不均衡"),
|
||||
;
|
||||
|
||||
private final Integer state;
|
||||
|
||||
private final String message;
|
||||
|
||||
ClusterBalanceStateEnum(int state, String message) {
|
||||
this.state = state;
|
||||
this.message = message;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.enums;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import lombok.Getter;
|
||||
|
||||
/**
|
||||
* 集群平衡任务类型
|
||||
* @author zengqiao
|
||||
* @date 22/03/08
|
||||
*/
|
||||
@Getter
|
||||
@EnterpriseLoadReBalance
|
||||
public enum ClusterBalanceTypeEnum {
|
||||
|
||||
IMMEDIATELY(1, "立即"),
|
||||
|
||||
CYCLE(2, "周期"),
|
||||
;
|
||||
|
||||
private final int type;
|
||||
|
||||
private final String message;
|
||||
|
||||
ClusterBalanceTypeEnum(int type, String message) {
|
||||
this.type = type;
|
||||
this.message = message;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
/**
|
||||
* Load-reBalance相关功能模块
|
||||
* km-extends/km-rebalance 模块,是依据指标生成迁移 plan 的模块,是底层的一个基础功能
|
||||
* 当前 package 模块是依据产品的要求,依赖 km-extends/km-rebalance 模块,构建产品实际使用功能
|
||||
*/
|
||||
@EnterpriseLoadReBalance
|
||||
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
@@ -10,13 +10,15 @@ import lombok.Getter;
|
||||
public enum HealthCheckDimensionEnum {
|
||||
UNKNOWN(-1, "未知"),
|
||||
|
||||
CLUSTER(0, "Cluster维度"),
|
||||
CLUSTER(0, "Cluster"),
|
||||
|
||||
BROKER(1, "Broker维度"),
|
||||
BROKER(1, "Broker"),
|
||||
|
||||
TOPIC(2, "Topic维度"),
|
||||
TOPIC(2, "Topic"),
|
||||
|
||||
GROUP(3, "消费组维度"),
|
||||
GROUP(3, "Group"),
|
||||
|
||||
ZOOKEEPER(4, "Zookeeper"),
|
||||
|
||||
;
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package com.xiaojukeji.know.streaming.km.common.enums.health;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.BaseClusterHealthConfig;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.HealthAmountRatioConfig;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.HealthCompareValueConfig;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.HealthDetectedInLatestMinutesConfig;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||
@@ -19,7 +20,8 @@ public enum HealthCheckNameEnum {
|
||||
"未知",
|
||||
Constant.HC_CONFIG_NAME_PREFIX + "UNKNOWN",
|
||||
"未知",
|
||||
BaseClusterHealthConfig.class
|
||||
BaseClusterHealthConfig.class,
|
||||
false
|
||||
),
|
||||
|
||||
CLUSTER_NO_CONTROLLER(
|
||||
@@ -27,7 +29,8 @@ public enum HealthCheckNameEnum {
|
||||
"Controller",
|
||||
Constant.HC_CONFIG_NAME_PREFIX + "CLUSTER_NO_CONTROLLER",
|
||||
"集群Controller数正常",
|
||||
HealthCompareValueConfig.class
|
||||
HealthCompareValueConfig.class,
|
||||
true
|
||||
),
|
||||
|
||||
BROKER_REQUEST_QUEUE_FULL(
|
||||
@@ -35,7 +38,8 @@ public enum HealthCheckNameEnum {
|
||||
"RequestQueueSize",
|
||||
Constant.HC_CONFIG_NAME_PREFIX + "BROKER_REQUEST_QUEUE_FULL",
|
||||
"Broker-RequestQueueSize指标",
|
||||
HealthCompareValueConfig.class
|
||||
HealthCompareValueConfig.class,
|
||||
false
|
||||
),
|
||||
|
||||
BROKER_NETWORK_PROCESSOR_AVG_IDLE_TOO_LOW(
|
||||
@@ -43,7 +47,8 @@ public enum HealthCheckNameEnum {
|
||||
"NetworkProcessorAvgIdlePercent",
|
||||
Constant.HC_CONFIG_NAME_PREFIX + "BROKER_NETWORK_PROCESSOR_AVG_IDLE_TOO_LOW",
|
||||
"Broker-NetworkProcessorAvgIdlePercent指标",
|
||||
HealthCompareValueConfig.class
|
||||
HealthCompareValueConfig.class,
|
||||
false
|
||||
),
|
||||
|
||||
GROUP_RE_BALANCE_TOO_FREQUENTLY(
|
||||
@@ -51,7 +56,8 @@ public enum HealthCheckNameEnum {
|
||||
"Group Re-Balance",
|
||||
Constant.HC_CONFIG_NAME_PREFIX + "GROUP_RE_BALANCE_TOO_FREQUENTLY",
|
||||
"Group re-balance频率",
|
||||
HealthDetectedInLatestMinutesConfig.class
|
||||
HealthDetectedInLatestMinutesConfig.class,
|
||||
false
|
||||
),
|
||||
|
||||
TOPIC_NO_LEADER(
|
||||
@@ -59,7 +65,8 @@ public enum HealthCheckNameEnum {
|
||||
"NoLeader",
|
||||
Constant.HC_CONFIG_NAME_PREFIX + "TOPIC_NO_LEADER",
|
||||
"Topic 无Leader数",
|
||||
HealthCompareValueConfig.class
|
||||
HealthCompareValueConfig.class,
|
||||
false
|
||||
),
|
||||
|
||||
TOPIC_UNDER_REPLICA_TOO_LONG(
|
||||
@@ -67,9 +74,66 @@ public enum HealthCheckNameEnum {
|
||||
"UnderReplicaTooLong",
|
||||
Constant.HC_CONFIG_NAME_PREFIX + "TOPIC_UNDER_REPLICA_TOO_LONG",
|
||||
"Topic 未同步持续时间",
|
||||
HealthDetectedInLatestMinutesConfig.class
|
||||
HealthDetectedInLatestMinutesConfig.class,
|
||||
false
|
||||
),
|
||||
|
||||
ZK_BRAIN_SPLIT(
|
||||
HealthCheckDimensionEnum.ZOOKEEPER,
|
||||
"BrainSplit",
|
||||
Constant.HC_CONFIG_NAME_PREFIX + "ZK_BRAIN_SPLIT",
|
||||
"ZK 脑裂",
|
||||
HealthCompareValueConfig.class,
|
||||
true
|
||||
),
|
||||
|
||||
ZK_OUTSTANDING_REQUESTS(
|
||||
HealthCheckDimensionEnum.ZOOKEEPER,
|
||||
"OutstandingRequests",
|
||||
Constant.HC_CONFIG_NAME_PREFIX + "ZK_OUTSTANDING_REQUESTS",
|
||||
"ZK Outstanding 请求堆积数",
|
||||
HealthAmountRatioConfig.class,
|
||||
false
|
||||
),
|
||||
|
||||
ZK_WATCH_COUNT(
|
||||
HealthCheckDimensionEnum.ZOOKEEPER,
|
||||
"WatchCount",
|
||||
Constant.HC_CONFIG_NAME_PREFIX + "ZK_WATCH_COUNT",
|
||||
"ZK WatchCount 数",
|
||||
HealthAmountRatioConfig.class,
|
||||
false
|
||||
),
|
||||
|
||||
ZK_ALIVE_CONNECTIONS(
|
||||
HealthCheckDimensionEnum.ZOOKEEPER,
|
||||
"AliveConnections",
|
||||
Constant.HC_CONFIG_NAME_PREFIX + "ZK_ALIVE_CONNECTIONS",
|
||||
"ZK 连接数",
|
||||
HealthAmountRatioConfig.class,
|
||||
false
|
||||
),
|
||||
|
||||
ZK_APPROXIMATE_DATA_SIZE(
|
||||
HealthCheckDimensionEnum.ZOOKEEPER,
|
||||
"ApproximateDataSize",
|
||||
Constant.HC_CONFIG_NAME_PREFIX + "ZK_APPROXIMATE_DATA_SIZE",
|
||||
"ZK 数据大小(Byte)",
|
||||
HealthAmountRatioConfig.class,
|
||||
false
|
||||
),
|
||||
|
||||
ZK_SENT_RATE(
|
||||
HealthCheckDimensionEnum.ZOOKEEPER,
|
||||
"SentRate",
|
||||
Constant.HC_CONFIG_NAME_PREFIX + "ZK_SENT_RATE",
|
||||
"ZK 发包数",
|
||||
HealthAmountRatioConfig.class,
|
||||
false
|
||||
),
|
||||
|
||||
|
||||
|
||||
;
|
||||
|
||||
/**
|
||||
@@ -97,12 +161,18 @@ public enum HealthCheckNameEnum {
|
||||
*/
|
||||
private final Class configClazz;
|
||||
|
||||
HealthCheckNameEnum(HealthCheckDimensionEnum dimensionEnum, String configItem, String configName, String configDesc, Class configClazz) {
|
||||
/**
|
||||
* 是可用性检查?
|
||||
*/
|
||||
private final boolean availableChecker;
|
||||
|
||||
HealthCheckNameEnum(HealthCheckDimensionEnum dimensionEnum, String configItem, String configName, String configDesc, Class configClazz, boolean availableChecker) {
|
||||
this.dimensionEnum = dimensionEnum;
|
||||
this.configItem = configItem;
|
||||
this.configName = configName;
|
||||
this.configDesc = configDesc;
|
||||
this.configClazz = configClazz;
|
||||
this.availableChecker = availableChecker;
|
||||
}
|
||||
|
||||
public static HealthCheckNameEnum getByName(String configName) {
|
||||
|
||||
@@ -16,7 +16,7 @@ public enum HealthStateEnum {
|
||||
|
||||
POOR(2, "差"),
|
||||
|
||||
DEAD(3, "宕机"),
|
||||
DEAD(3, "Down"),
|
||||
|
||||
;
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package com.xiaojukeji.know.streaming.km.common.enums.operaterecord;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||
|
||||
import java.util.List;
|
||||
@@ -37,6 +38,9 @@ public enum ModuleEnum {
|
||||
|
||||
JOB_KAFKA_REPLICA_REASSIGN(110, "Job-KafkaReplica迁移"),
|
||||
|
||||
@EnterpriseLoadReBalance
|
||||
JOB_CLUSTER_BALANCE(111, "Job-ClusterBalance"),
|
||||
|
||||
;
|
||||
|
||||
ModuleEnum(int code, String desc) {
|
||||
|
||||
@@ -16,7 +16,7 @@ public class ZookeeperUtils {
|
||||
* 解析ZK地址
|
||||
* @see ConnectStringParser
|
||||
*/
|
||||
public static List<Tuple<String, Integer>> connectStringParser(String connectString) throws Exception {
|
||||
public static List<Tuple<String, Integer>> connectStringParser(String connectString) {
|
||||
List<Tuple<String, Integer>> ipPortList = new ArrayList<>();
|
||||
|
||||
if (connectString == null) {
|
||||
@@ -55,5 +55,14 @@ public class ZookeeperUtils {
|
||||
return ipPortList;
|
||||
}
|
||||
|
||||
public static String getNamespace(String zookeeperAddress) {
|
||||
int index = zookeeperAddress.indexOf('/');
|
||||
String namespace = "/";
|
||||
if (index != -1) {
|
||||
namespace = zookeeperAddress.substring(index);
|
||||
}
|
||||
return namespace;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ npm install -g lerna
|
||||
npm run i
|
||||
```
|
||||
|
||||
我们默认保留了 `package-lock.json` 文件,以防止可能的依赖包自动升级导致的问题。依赖默认会通过 taobao 镜像 `https://registry.npmmirror.com/` 服务下载。
|
||||
我们默认保留了 `package-lock.json` 文件,以防止可能的依赖包自动升级导致的问题。依赖默认会通过 taobao 镜像 `https://registry.npmmirror.com/` 服务下载(如需修改下载源,请见当前目录下 package.json 文件)。
|
||||
|
||||
## 三、启动项目(可选,打包构建请直接看步骤三)
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
"prettier": "2.3.2"
|
||||
},
|
||||
"scripts": {
|
||||
"i": "npm install && lerna bootstrap",
|
||||
"i": "npm config set registry https://registry.npmmirror.com/ && npm install && lerna bootstrap",
|
||||
"clean": "rm -rf node_modules package-lock.json packages/*/node_modules packages/*/package-lock.json",
|
||||
"start": "lerna run start",
|
||||
"build": "lerna run build",
|
||||
|
||||
@@ -1345,9 +1345,9 @@
|
||||
}
|
||||
},
|
||||
"@knowdesign/icons": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmmirror.com/@knowdesign/icons/-/icons-1.0.0.tgz",
|
||||
"integrity": "sha512-7c+h2TSbh2ihTkXIivuO+DddNC5wG7hVv9SS4ccmkvTKls2ZTLitPu+U0wpufDxPhkPMaKEQfsECsVJ+7jLMiw==",
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmmirror.com/@knowdesign/icons/-/icons-1.0.2.tgz",
|
||||
"integrity": "sha512-eQuUQZbPRvC1xU4ouzgrk8j6UE39Cui+eEkYkLbfGLpVbGPFKJ7yEmUyKhIjG9zhf1qS7/h08yzq0hAHajBi8g==",
|
||||
"requires": {
|
||||
"@ant-design/colors": "^6.0.0",
|
||||
"@ant-design/icons": "^4.7.0",
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
"build": "cross-env NODE_ENV=production webpack --max_old_space_size=8000"
|
||||
},
|
||||
"dependencies": {
|
||||
"@knowdesign/icons": "^1.0.0",
|
||||
"@knowdesign/icons": "^1.0.2",
|
||||
"babel-preset-react-app": "^10.0.0",
|
||||
"classnames": "^2.2.6",
|
||||
"dotenv": "^16.0.1",
|
||||
|
||||
@@ -1388,9 +1388,9 @@
|
||||
}
|
||||
},
|
||||
"@knowdesign/icons": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmmirror.com/@knowdesign/icons/-/icons-1.0.1.tgz",
|
||||
"integrity": "sha512-EI3s25BJt+Slv7/t6B3K3zv7I6TKkk2Wf1y68zuxK80MMkWf8lqqUtyAZbFDoPUfXAjw6vHktMBH44gbMHMRFA==",
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmmirror.com/@knowdesign/icons/-/icons-1.0.2.tgz",
|
||||
"integrity": "sha512-eQuUQZbPRvC1xU4ouzgrk8j6UE39Cui+eEkYkLbfGLpVbGPFKJ7yEmUyKhIjG9zhf1qS7/h08yzq0hAHajBi8g==",
|
||||
"requires": {
|
||||
"@ant-design/colors": "^6.0.0",
|
||||
"@ant-design/icons": "^4.7.0",
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"dependencies": {
|
||||
"@ant-design/compatible": "^1.0.8",
|
||||
"@ant-design/icons": "^4.6.2",
|
||||
"@knowdesign/icons": "^1.0.1",
|
||||
"@knowdesign/icons": "^1.0.2",
|
||||
"@types/react": "^17.0.39",
|
||||
"@types/react-copy-to-clipboard": "^5.0.2",
|
||||
"@types/react-dom": "^17.0.11",
|
||||
|
||||
@@ -24,6 +24,7 @@ const api = {
|
||||
logout: `${securityPrefix}/account/logout`,
|
||||
|
||||
// 全局信息
|
||||
getVersionInfo: () => getApi('/self/version'),
|
||||
getUserInfo: (userId: number) => `${securityPrefix}/user/${userId}`,
|
||||
getPermissionTree: `${securityPrefix}/permission/tree`,
|
||||
getKafkaVersionItems: () => getApi('/kafka-versions-items'),
|
||||
@@ -60,6 +61,7 @@ const api = {
|
||||
phyClustersDashbord: getApi(`/physical-clusters/dashboard`),
|
||||
supportKafkaVersion: getApi(`/support-kafka-versions`),
|
||||
phyClusterState: getApi(`/physical-clusters/state`),
|
||||
phyClusterHealthState: getApi(`/physical-clusters/health-state`),
|
||||
|
||||
getOperatingStateList: (clusterPhyId: number) => getApi(`/clusters/${clusterPhyId}/groups-overview`),
|
||||
getGroupTopicList: (clusterPhyId: number, groupName: string) => getApi(`/clusters/${clusterPhyId}/groups/${groupName}/topics-overview`),
|
||||
@@ -201,6 +203,14 @@ const api = {
|
||||
getJobsTaskData: (clusterPhyId: string, jobId: string | number) => getApi(`/clusters/${clusterPhyId}/jobs/${jobId}/modify-detail`),
|
||||
//编辑任务
|
||||
putJobsTaskData: (clusterPhyId: string) => getApi(`/clusters/${clusterPhyId}/jobs`),
|
||||
|
||||
// Zookeeper 接口
|
||||
getZookeeperState: (clusterPhyId: string) => getApi(`/clusters/${clusterPhyId}/zookeepers-state`),
|
||||
getZookeeperList: (clusterPhyId: number) => getApi(`/clusters/${clusterPhyId}/zookeepers-overview`),
|
||||
getZookeeperNodeChildren: (clusterPhyId: number) => getApi(`/clusters/${clusterPhyId}/znode-children`),
|
||||
getZookeeperNodeData: (clusterPhyId: number) => getApi(`/clusters/${clusterPhyId}/znode-data`),
|
||||
getZookeeperMetricsInfo: (clusterPhyId: number) => getApi(`/clusters/${clusterPhyId}/zookeeper-latest-metrics`),
|
||||
getZookeeperMetrics: (clusterPhyId: string) => getApi(`/clusters/${clusterPhyId}/zookeeper-metrics`),
|
||||
};
|
||||
|
||||
export default api;
|
||||
|
||||
@@ -3,12 +3,13 @@ import '@babel/polyfill';
|
||||
import React, { useState, useEffect, useLayoutEffect } from 'react';
|
||||
import { BrowserRouter, Switch, Route, useLocation, useHistory } from 'react-router-dom';
|
||||
import { get as lodashGet } from 'lodash';
|
||||
import { DProLayout, AppContainer, Menu, Utils, Page403, Page404, Page500, Modal } from 'knowdesign';
|
||||
import { DProLayout, AppContainer, Menu, Utils, Page500, Modal } from 'knowdesign';
|
||||
import { IconFont } from '@knowdesign/icons';
|
||||
import dantdZhCN from 'knowdesign/es/locale/zh_CN';
|
||||
import dantdEnUS from 'knowdesign/es/locale/en_US';
|
||||
import { DotChartOutlined } from '@ant-design/icons';
|
||||
import { licenseEventBus } from './constants/axiosConfig';
|
||||
import { Page403, Page404, NoLicense } from './pages/ErrorPages';
|
||||
import intlZhCN from './locales/zh';
|
||||
import intlEnUS from './locales/en';
|
||||
import registerApps from '../config/registerApps';
|
||||
@@ -18,13 +19,21 @@ import { Login } from './pages/Login';
|
||||
import { getLicenseInfo } from './constants/common';
|
||||
import api from './api';
|
||||
import ClusterContainer from './pages/index';
|
||||
import NoLicense from './pages/NoLicense';
|
||||
import ksLogo from './assets/ks-logo.png';
|
||||
|
||||
interface ILocaleMap {
|
||||
[index: string]: any;
|
||||
}
|
||||
|
||||
interface VersionInfo {
|
||||
'git.branch': string;
|
||||
'git.build.itme': string;
|
||||
'git.build.version': string;
|
||||
'git.commit.id': string;
|
||||
'git.commit.id.abbrev': string;
|
||||
'git.commit.time': string;
|
||||
}
|
||||
|
||||
const localeMap: ILocaleMap = {
|
||||
'zh-CN': {
|
||||
dantd: dantdZhCN,
|
||||
@@ -106,6 +115,7 @@ const AppContent = (props: { setlanguage: (language: string) => void }) => {
|
||||
const history = useHistory();
|
||||
const userInfo = localStorage.getItem('userInfo');
|
||||
const [curActiveAppName, setCurActiveAppName] = useState('');
|
||||
const [versionInfo, setVersionInfo] = useState<VersionInfo>();
|
||||
|
||||
useEffect(() => {
|
||||
if (pathname.startsWith('/config')) {
|
||||
@@ -115,6 +125,13 @@ const AppContent = (props: { setlanguage: (language: string) => void }) => {
|
||||
}
|
||||
}, [pathname]);
|
||||
|
||||
// 获取版本信息
|
||||
useEffect(() => {
|
||||
Utils.request(api.getVersionInfo()).then((res: VersionInfo) => {
|
||||
setVersionInfo(res);
|
||||
});
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<DProLayout.Container
|
||||
headerProps={{
|
||||
@@ -141,7 +158,12 @@ const AppContent = (props: { setlanguage: (language: string) => void }) => {
|
||||
],
|
||||
isFixed: false,
|
||||
userDropMenuItems: [
|
||||
<Menu.Item key={0} onClick={logout}>
|
||||
<Menu.Item key={0}>
|
||||
<a href="https://github.com/didi/KnowStreaming/releases" rel="noreferrer" target="_blank">
|
||||
版本: {versionInfo?.['git.build.version']}
|
||||
</a>
|
||||
</Menu.Item>,
|
||||
<Menu.Item key={1} onClick={logout}>
|
||||
登出
|
||||
</Menu.Item>,
|
||||
],
|
||||
|
||||
BIN
km-console/packages/layout-clusters-fe/src/assets/beta-tag.png
Normal file
|
After Width: | Height: | Size: 6.2 KiB |
|
Before Width: | Height: | Size: 5.9 KiB After Width: | Height: | Size: 5.9 KiB |
|
After Width: | Height: | Size: 26 KiB |
|
Before Width: | Height: | Size: 4.8 KiB After Width: | Height: | Size: 4.8 KiB |
|
After Width: | Height: | Size: 26 KiB |
|
Before Width: | Height: | Size: 4.9 KiB After Width: | Height: | Size: 4.9 KiB |
|
After Width: | Height: | Size: 24 KiB |
|
After Width: | Height: | Size: 24 KiB |
|
After Width: | Height: | Size: 25 KiB |
|
After Width: | Height: | Size: 21 KiB |
|
After Width: | Height: | Size: 21 KiB |
BIN
km-console/packages/layout-clusters-fe/src/assets/page403.png
Normal file
|
After Width: | Height: | Size: 28 KiB |
BIN
km-console/packages/layout-clusters-fe/src/assets/page404.png
Normal file
|
After Width: | Height: | Size: 35 KiB |
@@ -1,11 +1,12 @@
|
||||
/* eslint-disable react/display-name */
|
||||
import React, { useState, useEffect } from 'react';
|
||||
import { useHistory, useLocation, useParams } from 'react-router-dom';
|
||||
import { useLocation, useParams } from 'react-router-dom';
|
||||
import CardBar from '@src/components/CardBar';
|
||||
import { healthDataProps } from '.';
|
||||
import { Tag, Utils } from 'knowdesign';
|
||||
import { Utils } from 'knowdesign';
|
||||
import Api from '@src/api';
|
||||
import { hashDataParse } from '@src/constants/common';
|
||||
import { HealthStateEnum } from '../HealthState';
|
||||
|
||||
export default (props: { record: any }) => {
|
||||
const { record } = props;
|
||||
@@ -14,22 +15,20 @@ export default (props: { record: any }) => {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [cardData, setCardData] = useState([]);
|
||||
const [healthData, setHealthData] = useState<healthDataProps>({
|
||||
score: 0,
|
||||
state: HealthStateEnum.UNKNOWN,
|
||||
passed: 0,
|
||||
total: 0,
|
||||
alive: 0,
|
||||
});
|
||||
const healthItems = ['HealthScore_Topics', 'HealthCheckPassed_Topics', 'HealthCheckTotal_Topics', 'live'];
|
||||
|
||||
useEffect(() => {
|
||||
setLoading(true);
|
||||
Utils.post(Api.getBrokerDetailMetricPoints(hashDataParse(urlLocation.hash)?.brokerId, urlParams?.clusterId), [
|
||||
'Partitions',
|
||||
'Leaders',
|
||||
'PartitionURP',
|
||||
'HealthScore',
|
||||
'HealthCheckPassed',
|
||||
'HealthCheckTotal',
|
||||
'Alive',
|
||||
'HealthState',
|
||||
]).then((data: any) => {
|
||||
setLoading(false);
|
||||
const rightData = JSON.parse(JSON.stringify(data.metrics));
|
||||
@@ -47,14 +46,12 @@ export default (props: { record: any }) => {
|
||||
value: rightData['PartitionURP'] || '-',
|
||||
},
|
||||
];
|
||||
const healthResData: any = {};
|
||||
healthResData.score = data?.metrics?.['HealthScore'] || 0;
|
||||
healthResData.passed = data?.metrics?.['HealthCheckPassed'] || 0;
|
||||
healthResData.total = data?.metrics?.['HealthCheckTotal'] || 0;
|
||||
healthResData.alive = data?.metrics?.['Alive'] || 0;
|
||||
setCardData(cordRightMap);
|
||||
setHealthData(healthResData);
|
||||
// setCardData(data.metrics)
|
||||
setHealthData({
|
||||
state: data?.metrics?.['HealthState'],
|
||||
passed: data?.metrics?.['HealthCheckPassed'] || 0,
|
||||
total: data?.metrics?.['HealthCheckTotal'] || 0,
|
||||
});
|
||||
});
|
||||
}, []);
|
||||
return (
|
||||
|
||||
@@ -6,6 +6,7 @@ import { healthDataProps } from '.';
|
||||
import { Tag, Tooltip, Utils } from 'knowdesign';
|
||||
import api from '@src/api';
|
||||
import { QuestionCircleOutlined } from '@ant-design/icons';
|
||||
import { HealthStateEnum } from '../HealthState';
|
||||
|
||||
export default () => {
|
||||
const routeParams = useParams<{
|
||||
@@ -14,26 +15,21 @@ export default () => {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [cardData, setCardData] = useState([]);
|
||||
const [healthData, setHealthData] = useState<healthDataProps>({
|
||||
score: 0,
|
||||
state: HealthStateEnum.UNKNOWN,
|
||||
passed: 0,
|
||||
total: 0,
|
||||
alive: 0,
|
||||
});
|
||||
const cardItems = ['Partitions', 'PartitionsSkew', 'Leaders', 'LeadersSkew', 'LogSize'];
|
||||
const healthItems = ['HealthScore_Brokers', 'HealthCheckPassed_Brokers', 'HealthCheckTotal_Brokers', 'Alive'];
|
||||
const healthItems = ['HealthCheckPassed_Brokers', 'HealthCheckTotal_Brokers', 'HealthState'];
|
||||
|
||||
useEffect(() => {
|
||||
setLoading(true);
|
||||
// 获取左侧健康度
|
||||
const brokerMetric = Utils.post(api.getBrokerMetricPoints(Number(routeParams.clusterId)), healthItems).then((data: any) => {
|
||||
const healthResData: any = {};
|
||||
// healthResData.score = data?.find((item:any) => item.metricName === 'HealthScore_Brokers')?.value || 0;
|
||||
// healthResData.passed = data?.find((item:any) => item.metricName === 'HealthCheckPassed_Brokers')?.value || 0;
|
||||
// healthResData.total = data?.find((item:any) => item.metricName === 'HealthCheckTotal_Brokers')?.value || 0;
|
||||
healthResData.score = data?.metrics?.['HealthScore_Brokers'] || 0;
|
||||
healthResData.passed = data?.metrics?.['HealthCheckPassed_Brokers'] || 0;
|
||||
healthResData.total = data?.metrics?.['HealthCheckTotal_Brokers'] || 0;
|
||||
healthResData.alive = data?.metrics?.['Alive'] || 0;
|
||||
setHealthData(healthResData);
|
||||
setHealthData({
|
||||
state: data?.metrics?.['HealthState'],
|
||||
passed: data?.metrics?.['HealthCheckPassed_Brokers'] || 0,
|
||||
total: data?.metrics?.['HealthCheckTotal_Brokers'] || 0,
|
||||
});
|
||||
});
|
||||
// 获取右侧状态
|
||||
const brokersState = Utils.request(api.getBrokersState(routeParams?.clusterId)).then((data) => {
|
||||
@@ -115,6 +111,6 @@ export default () => {
|
||||
setLoading(false);
|
||||
});
|
||||
}, [routeParams.clusterId]);
|
||||
// console.log('cardData', cardData, healthData);
|
||||
|
||||
return <CardBar scene="broker" healthData={healthData} cardColumns={cardData} loading={loading}></CardBar>;
|
||||
};
|
||||
|
||||
@@ -4,6 +4,7 @@ import CardBar from '@src/components/CardBar';
|
||||
import { healthDataProps } from '.';
|
||||
import { Utils } from 'knowdesign';
|
||||
import api from '@src/api';
|
||||
import { HealthStateEnum } from '../HealthState';
|
||||
|
||||
export default () => {
|
||||
const routeParams = useParams<{
|
||||
@@ -12,22 +13,17 @@ export default () => {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [cardData, setCardData] = useState([]);
|
||||
const [healthData, setHealthData] = useState<healthDataProps>({
|
||||
score: 0,
|
||||
state: HealthStateEnum.UNKNOWN,
|
||||
passed: 0,
|
||||
total: 0,
|
||||
alive: 0,
|
||||
});
|
||||
const [healthDetail, setHealthDetail] = useState([]);
|
||||
const cardItems = ['Groups', 'GroupActives', 'GroupEmptys', 'GroupRebalances', 'GroupDeads'];
|
||||
const healthItems = ['HealthScore_Groups', 'HealthCheckPassed_Groups', 'HealthCheckTotal_Groups', 'Alive'];
|
||||
const healthItems = ['HealthCheckPassed_Groups', 'HealthCheckTotal_Groups', 'HealthState'];
|
||||
|
||||
useEffect(() => {
|
||||
setLoading(true);
|
||||
Utils.post(api.getMetricPointsLatest(Number(routeParams.clusterId)), cardItems.concat(healthItems)).then((data: any) => {
|
||||
setLoading(false);
|
||||
// setCardData(data
|
||||
// .filter((item: any) => cardItems.indexOf(item.metricName) >= 0)
|
||||
// .map((item: any) => ({ title: item.metricName, value: item.value }))
|
||||
// )
|
||||
setCardData(
|
||||
cardItems.map((item) => {
|
||||
if (item === 'GroupDeads') {
|
||||
@@ -36,12 +32,11 @@ export default () => {
|
||||
return { title: item, value: data.metrics[item] };
|
||||
})
|
||||
);
|
||||
const healthResData: any = {};
|
||||
healthResData.score = data.metrics['HealthScore_Groups'] || 0;
|
||||
healthResData.passed = data.metrics['HealthCheckPassed_Groups'] || 0;
|
||||
healthResData.total = data.metrics['HealthCheckTotal_Groups'] || 0;
|
||||
healthResData.alive = data.metrics['Alive'] || 0;
|
||||
setHealthData(healthResData);
|
||||
setHealthData({
|
||||
state: data?.metrics?.['HealthState'],
|
||||
passed: data?.metrics?.['HealthCheckPassed_Groups'] || 0,
|
||||
total: data?.metrics?.['HealthCheckTotal_Groups'] || 0,
|
||||
});
|
||||
});
|
||||
}, []);
|
||||
return <CardBar scene="group" healthData={healthData} cardColumns={cardData} loading={loading}></CardBar>;
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
import React, { useState, useEffect } from 'react';
|
||||
import { useParams } from 'react-router-dom';
|
||||
import CardBar from '@src/components/CardBar';
|
||||
import { healthDataProps } from '.';
|
||||
import { Tag, Utils } from 'knowdesign';
|
||||
import { Utils } from 'knowdesign';
|
||||
import Api from '@src/api';
|
||||
|
||||
export default () => {
|
||||
@@ -12,14 +11,7 @@ export default () => {
|
||||
}>();
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [cardData, setCardData] = useState([]);
|
||||
const [healthData, setHealthData] = useState<healthDataProps>({
|
||||
score: 0,
|
||||
passed: 0,
|
||||
total: 0,
|
||||
alive: 0,
|
||||
});
|
||||
const cardItems = ['Partitions', 'PartitionsSkew', 'Leaders', 'LeadersSkew', 'LogSize'];
|
||||
const healthItems = ['HealthScore_Brokers', 'HealthCheckPassed_Brokers', 'HealthCheckTotal_Brokers', 'alive'];
|
||||
|
||||
const getCordRightMap = (data: any) => {
|
||||
const cordRightMap = [
|
||||
{
|
||||
@@ -49,6 +41,7 @@ export default () => {
|
||||
];
|
||||
return cordRightMap;
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
setLoading(true);
|
||||
// 获取状态
|
||||
|
||||
@@ -32,7 +32,6 @@ const LoadRebalanceCardBar = (props: any) => {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [cardData, setCardData] = useState([]);
|
||||
const [normsVisible, setNormsVisible] = useState(null);
|
||||
const cardItems = ['AclEnable', 'Acls', 'AclUsers', 'AclTopics', 'AclGroups'];
|
||||
const onClose = () => {
|
||||
setNormsVisible(false);
|
||||
};
|
||||
@@ -45,11 +44,9 @@ const LoadRebalanceCardBar = (props: any) => {
|
||||
// 获取右侧状态
|
||||
getCartInfo()
|
||||
.then((res: any) => {
|
||||
// const { AclEnable, Acls, AclUsers, AclTopics, AclGroups } = res.metrics;
|
||||
const { next, sub, status } = res;
|
||||
const { cpu, disk, bytesIn, bytesOut } = sub;
|
||||
const newNextDate: any = transUnitTimePro(moment(next).valueOf() - moment().valueOf());
|
||||
// const newNextDate = parseInt(`${transUnitTimePro(moment(next).valueOf() - moment().valueOf())}`);
|
||||
const cardMap = [
|
||||
{
|
||||
title() {
|
||||
@@ -80,20 +77,15 @@ const LoadRebalanceCardBar = (props: any) => {
|
||||
>
|
||||
{!status ? '已均衡' : '未均衡'}
|
||||
</Tag>
|
||||
{/* <Tag style={{ padding: '2px 4px', backgroundColor: 'rgba(85,110,230,0.10)', color: '#556EE6' }}>已均衡</Tag> */}
|
||||
</div>
|
||||
<div style={{ display: 'flex', justifyContent: 'space-between' }}>
|
||||
<span>
|
||||
周期均衡 <IconFont className="cutomIcon" type={`${!status ? 'icon-zhengchang' : 'icon-warning'}`} />
|
||||
</span>
|
||||
{/* <span>
|
||||
周期均衡 <IconFont className="cutomIcon" type="icon-zhengchang" />
|
||||
</span> */}
|
||||
<span>
|
||||
距下次均衡还剩{newNextDate?.value || 0}
|
||||
{newNextDate?.unit || '分钟'}
|
||||
</span>
|
||||
{/* {<span>距下次均衡还剩{1}小时</span>} */}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
@@ -106,73 +98,6 @@ const LoadRebalanceCardBar = (props: any) => {
|
||||
padding: '12px 12px 8px 12px',
|
||||
},
|
||||
},
|
||||
// {
|
||||
// // title: 'CPU avg',
|
||||
// title() {
|
||||
// return (
|
||||
// <div>
|
||||
// <span style={{ display: 'inline-block', marginRight: '8px' }}>CPU AVG</span>
|
||||
// {!cpu?.interval && cpu?.interval !== 0 && (
|
||||
// <Tooltip overlayClassName="rebalance-tooltip" title="未设置均衡策略">
|
||||
// <QuestionCircleOutlined />
|
||||
// </Tooltip>
|
||||
// )}
|
||||
// {/* <IconFont className="cutomIcon" onClick={() => setNormsVisible(true)} type="icon-shezhi"></IconFont> */}
|
||||
// </div>
|
||||
// );
|
||||
// },
|
||||
// value(visibleType: boolean) {
|
||||
// return (
|
||||
// <div id="CPU" style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'flex-end' }}>
|
||||
// <div style={{ display: 'inline-block' }}>
|
||||
// <div style={{ margin: '5px 0', fontFamily: 'DIDIFD-Medium' }}>
|
||||
// <span style={{ fontSize: '24px' }}>{cpu?.avg || 0}</span>
|
||||
// <span style={{ fontSize: '14px', display: 'inline-block', marginLeft: '4px' }}>%</span>
|
||||
// </div>
|
||||
// <div style={{ marginTop: '-4px', display: 'flex', justifyContent: 'space-between' }}>
|
||||
// <span>均衡区间: ±{cpu?.interval || 0}%</span>
|
||||
// </div>
|
||||
// </div>
|
||||
// <Popover
|
||||
// // visible={visibleType} // 修改为hover柱状图
|
||||
// overlayClassName="custom-popover"
|
||||
// content={
|
||||
// <div style={{ color: '#495057' }}>
|
||||
// <div>
|
||||
// <IconFont className="cutomIcon cutomIcon-red" type="icon-chaoguo" />
|
||||
// 超过均衡区间的有: {cpu?.bigNu || 0}
|
||||
// </div>
|
||||
// <div style={{ margin: '6px 0' }}>
|
||||
// <IconFont className="cutomIcon cutomIcon-green" type="icon-qujian" />
|
||||
// 在均衡区间内的有: {cpu?.betweenNu || 0}
|
||||
// </div>
|
||||
// <div>
|
||||
// <IconFont className="cutomIcon cutomIcon-red" type="icon-diyu" />
|
||||
// 低于均衡区间的有: {cpu?.smallNu || 0}
|
||||
// </div>
|
||||
// </div>
|
||||
// }
|
||||
// getPopupContainer={(triggerNode: any) => {
|
||||
// return triggerNode;
|
||||
// }}
|
||||
// color="#ffffff"
|
||||
// >
|
||||
// <div style={{ width: '44px', height: '30px' }}>
|
||||
// <StateChart
|
||||
// data={[
|
||||
// { name: 'bigNu', value: cpu?.bigNu || 0 },
|
||||
// { name: 'betweenNu', value: cpu?.betweenNu || 0 },
|
||||
// { name: 'smallNu', value: cpu?.smallNu || 0 },
|
||||
// ]}
|
||||
// />
|
||||
// </div>
|
||||
// </Popover>
|
||||
// </div>
|
||||
// );
|
||||
// },
|
||||
// className: 'custom-card-bar',
|
||||
// valueClassName: 'custom-card-bar-value',
|
||||
// },
|
||||
{
|
||||
title() {
|
||||
return (
|
||||
|
||||
@@ -5,8 +5,10 @@ import { healthDataProps } from '.';
|
||||
import { Utils } from 'knowdesign';
|
||||
import { IconFont } from '@knowdesign/icons';
|
||||
import api from '@src/api';
|
||||
import { healthScoreCondition } from './const';
|
||||
import { hashDataParse } from '@src/constants/common';
|
||||
import { HealthStateEnum } from '../HealthState';
|
||||
|
||||
const healthItems = ['HealthCheckPassed', 'HealthCheckTotal', 'HealthState'];
|
||||
|
||||
const renderValue = (v: string | number | ((visibleType?: boolean) => JSX.Element), visibleType?: boolean) => {
|
||||
return typeof v === 'function' ? v(visibleType) : v;
|
||||
@@ -19,14 +21,12 @@ export default (props: { record: any }) => {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [cardData, setCardData] = useState([]);
|
||||
const [healthData, setHealthData] = useState<healthDataProps>({
|
||||
score: 0,
|
||||
state: HealthStateEnum.UNKNOWN,
|
||||
passed: 0,
|
||||
total: 0,
|
||||
alive: 0,
|
||||
});
|
||||
const [healthDetail, setHealthDetail] = useState([]);
|
||||
const [clusterAlive, setClusterAlive] = useState(0);
|
||||
const healthItems = ['HealthScore', 'HealthCheckPassed', 'HealthCheckTotal', 'alive'];
|
||||
|
||||
const getNumAndSubTitles = (cardColumnsItemData: any) => {
|
||||
return (
|
||||
<div style={{ width: '100%', display: 'flex', alignItems: 'end' }}>
|
||||
@@ -40,21 +40,21 @@ export default (props: { record: any }) => {
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
setLoading(true);
|
||||
const topicName = hashDataParse(location.hash)['topicName'];
|
||||
let detailHealthPromise = Utils.post(api.getTopicMetricPointsLatest(Number(routeParams.clusterId), topicName), healthItems).then(
|
||||
const detailHealthPromise = Utils.post(api.getTopicMetricPointsLatest(Number(routeParams.clusterId), topicName), healthItems).then(
|
||||
(data: any) => {
|
||||
let healthResData: any = {};
|
||||
healthResData.score = data.metrics['HealthScore'] || 0;
|
||||
healthResData.passed = data.metrics['HealthCheckPassed'] || 0;
|
||||
healthResData.total = data.metrics['HealthCheckTotal'] || 0;
|
||||
// healthResData.alive = data.metrics['alive'] || 0
|
||||
setHealthData(healthResData);
|
||||
setHealthData({
|
||||
state: data.metrics['HealthState'],
|
||||
passed: data.metrics['HealthCheckPassed'] || 0,
|
||||
total: data.metrics['HealthCheckTotal'] || 0,
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
let detailStatePromise = Utils.request(api.getTopicState(Number(routeParams.clusterId), topicName)).then((topicHealthState: any) => {
|
||||
const detailStatePromise = Utils.request(api.getTopicState(Number(routeParams.clusterId), topicName)).then((topicHealthState: any) => {
|
||||
setCardData([
|
||||
{
|
||||
title: 'Partitions',
|
||||
@@ -87,13 +87,12 @@ export default (props: { record: any }) => {
|
||||
]);
|
||||
});
|
||||
// 获取集群维度的指标信息
|
||||
let clusterStatePromise = Utils.post(api.getMetricPointsLatest(Number(routeParams.clusterId)), ['Alive']).then(
|
||||
const clusterStatePromise = Utils.post(api.getMetricPointsLatest(Number(routeParams.clusterId)), ['Alive']).then(
|
||||
(clusterHealthState: any) => {
|
||||
let clusterAlive = clusterHealthState?.metrics?.Alive || 0;
|
||||
setClusterAlive(clusterAlive);
|
||||
setClusterAlive(clusterHealthState?.metrics?.Alive || 0);
|
||||
}
|
||||
);
|
||||
Promise.all([detailHealthPromise, detailStatePromise, clusterStatePromise]).then((res) => {
|
||||
Promise.all([detailHealthPromise, detailStatePromise, clusterStatePromise]).then(() => {
|
||||
setLoading(false);
|
||||
});
|
||||
}, []);
|
||||
@@ -101,7 +100,7 @@ export default (props: { record: any }) => {
|
||||
<CardBar
|
||||
record={record}
|
||||
scene="topic"
|
||||
healthData={{ ...healthData, alive: clusterAlive }}
|
||||
healthData={{ ...healthData, state: clusterAlive ? healthData.state : HealthStateEnum.DOWN }}
|
||||
cardColumns={cardData}
|
||||
showCardBg={false}
|
||||
loading={loading}
|
||||
|
||||
@@ -4,6 +4,7 @@ import CardBar from '@src/components/CardBar';
|
||||
import { healthDataProps } from '.';
|
||||
import { Utils } from 'knowdesign';
|
||||
import api from '@src/api';
|
||||
import { HealthStateEnum } from '../HealthState';
|
||||
|
||||
export default () => {
|
||||
const routeParams = useParams<{
|
||||
@@ -12,14 +13,12 @@ export default () => {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [cardData, setCardData] = useState([]);
|
||||
const [healthData, setHealthData] = useState<healthDataProps>({
|
||||
score: 0,
|
||||
state: HealthStateEnum.UNKNOWN,
|
||||
passed: 0,
|
||||
total: 0,
|
||||
alive: 0,
|
||||
});
|
||||
const [healthDetail, setHealthDetail] = useState([]);
|
||||
const cardItems = ['Topics', 'Partitions', 'PartitionNoLeader', 'PartitionMinISR_S', 'PartitionMinISR_E', 'PartitionURP'];
|
||||
const healthItems = ['HealthScore_Topics', 'HealthCheckPassed_Topics', 'HealthCheckTotal_Topics', 'Alive'];
|
||||
const healthItems = ['HealthCheckPassed_Topics', 'HealthCheckTotal_Topics', 'HealthState'];
|
||||
useEffect(() => {
|
||||
setLoading(true);
|
||||
Utils.post(api.getMetricPointsLatest(Number(routeParams.clusterId)), cardItems.concat(healthItems)).then((data: any) => {
|
||||
@@ -42,12 +41,6 @@ export default () => {
|
||||
PartitionURP: 'URP',
|
||||
PartitionNoLeader: 'No Leader',
|
||||
};
|
||||
// setCardData(data
|
||||
// .filter(item => cardItems.indexOf(item.name) >= 0)
|
||||
// .map(item => {
|
||||
// return { title: metricElmMap[item.name] || item.name, value: item.value }
|
||||
// })
|
||||
// )
|
||||
setCardData(
|
||||
cardItems.map((item) => {
|
||||
let title = item;
|
||||
@@ -66,12 +59,11 @@ export default () => {
|
||||
return { title, value: data.metrics[item] };
|
||||
})
|
||||
);
|
||||
const healthResData: any = {};
|
||||
healthResData.score = data.metrics['HealthScore_Topics'] || 0;
|
||||
healthResData.passed = data.metrics['HealthCheckPassed_Topics'] || 0;
|
||||
healthResData.total = data.metrics['HealthCheckTotal_Topics'] || 0;
|
||||
healthResData.alive = data.metrics['Alive'] || 0;
|
||||
setHealthData(healthResData);
|
||||
setHealthData({
|
||||
state: data.metrics['HealthState'],
|
||||
passed: data.metrics['HealthCheckPassed_Topics'] || 0,
|
||||
total: data.metrics['HealthCheckTotal_Topics'] || 0,
|
||||
});
|
||||
});
|
||||
}, []);
|
||||
return <CardBar scene="topic" healthData={healthData} cardColumns={cardData} loading={loading}></CardBar>;
|
||||
|
||||
@@ -0,0 +1,120 @@
|
||||
import React, { useState, useEffect } from 'react';
|
||||
import { useParams } from 'react-router-dom';
|
||||
import CardBar, { healthDataProps } from './index';
|
||||
import { Utils } from 'knowdesign';
|
||||
import api from '@src/api';
|
||||
import { HealthStateEnum } from '../HealthState';
|
||||
|
||||
interface ZookeeperState {
|
||||
aliveFollowerCount: number;
|
||||
aliveObserverCount: number;
|
||||
aliveServerCount: number;
|
||||
healthCheckPassed: number;
|
||||
healthCheckTotal: number;
|
||||
healthState: number;
|
||||
leaderNode: string;
|
||||
totalFollowerCount: number;
|
||||
totalObserverCount: number;
|
||||
totalServerCount: number;
|
||||
watchCount: number;
|
||||
}
|
||||
|
||||
const getVal = (val: string | number | undefined | null) => {
|
||||
return val === undefined || val === null || val === '' ? '-' : val;
|
||||
};
|
||||
|
||||
const ZookeeperCard = () => {
|
||||
const { clusterId } = useParams<{
|
||||
clusterId: string;
|
||||
}>();
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [cardData, setCardData] = useState([]);
|
||||
const [healthData, setHealthData] = useState<healthDataProps>({
|
||||
state: HealthStateEnum.UNKNOWN,
|
||||
passed: 0,
|
||||
total: 0,
|
||||
});
|
||||
|
||||
const getHealthData = () => {
|
||||
return Utils.post(api.getZookeeperMetricsInfo(Number(clusterId)), ['HealthCheckPassed', 'HealthCheckTotal', 'HealthState']).then(
|
||||
(data: any) => {
|
||||
setHealthData({
|
||||
state: data?.metrics?.['HealthState'],
|
||||
passed: data?.metrics?.['HealthCheckPassed'] || 0,
|
||||
total: data?.metrics?.['HealthCheckTotal'] || 0,
|
||||
});
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
const getCardInfo = () => {
|
||||
return Utils.request(api.getZookeeperState(clusterId)).then((res: ZookeeperState) => {
|
||||
const {
|
||||
aliveFollowerCount,
|
||||
aliveObserverCount,
|
||||
aliveServerCount,
|
||||
totalFollowerCount,
|
||||
totalObserverCount,
|
||||
totalServerCount,
|
||||
watchCount,
|
||||
leaderNode,
|
||||
} = res || {};
|
||||
const cardMap = [
|
||||
{
|
||||
title: 'Node Count',
|
||||
value() {
|
||||
return (
|
||||
<span>
|
||||
{aliveServerCount || '-'}/{totalServerCount || '-'}
|
||||
</span>
|
||||
);
|
||||
},
|
||||
customStyle: {
|
||||
// 自定义cardbar样式
|
||||
marginLeft: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
title: 'Watch Count',
|
||||
value: getVal(watchCount),
|
||||
},
|
||||
{
|
||||
title: 'Leader',
|
||||
value() {
|
||||
return <span style={{ fontSize: 24 }}>{leaderNode || '-'}</span>;
|
||||
},
|
||||
},
|
||||
{
|
||||
title: 'Follower',
|
||||
value() {
|
||||
return (
|
||||
<span>
|
||||
{getVal(aliveFollowerCount)}/{getVal(totalFollowerCount)}
|
||||
</span>
|
||||
);
|
||||
},
|
||||
},
|
||||
{
|
||||
title: 'Observer',
|
||||
value() {
|
||||
return (
|
||||
<span>
|
||||
{getVal(aliveObserverCount)}/{getVal(totalObserverCount)}
|
||||
</span>
|
||||
);
|
||||
},
|
||||
},
|
||||
];
|
||||
setCardData(cardMap);
|
||||
});
|
||||
};
|
||||
useEffect(() => {
|
||||
setLoading(true);
|
||||
Promise.all([getHealthData(), getCardInfo()]).finally(() => {
|
||||
setLoading(false);
|
||||
});
|
||||
}, [clusterId]);
|
||||
return <CardBar scene="zookeeper" healthData={healthData} cardColumns={cardData} loading={loading}></CardBar>;
|
||||
};
|
||||
|
||||
export default ZookeeperCard;
|
||||
@@ -10,48 +10,15 @@
|
||||
height: 88px;
|
||||
width: 100%;
|
||||
display: flex;
|
||||
// justify-content: space-between;
|
||||
align-items: center;
|
||||
.card-bar-health {
|
||||
width: 240px;
|
||||
height: 70px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
// justify-content: space-between;
|
||||
.card-bar-health-process {
|
||||
height: 100%;
|
||||
margin-right: 24px;
|
||||
.dcloud-progress-inner {
|
||||
border-radius: 50%;
|
||||
}
|
||||
.dcloud-progress-status-normal {
|
||||
.dcloud-progress-inner {
|
||||
background: rgba(85, 110, 230, 0.03);
|
||||
}
|
||||
.dcloud-progress-inner:not(.dcloud-progress-circle-gradient) .dcloud-progress-circle-path {
|
||||
stroke: rgb(85, 110, 230);
|
||||
}
|
||||
}
|
||||
.dcloud-progress-status-success {
|
||||
.dcloud-progress-inner {
|
||||
background: rgba(0, 192, 162, 0.03);
|
||||
}
|
||||
.dcloud-progress-inner:not(.dcloud-progress-circle-gradient) .dcloud-progress-circle-path {
|
||||
stroke: rgb(0, 192, 162);
|
||||
}
|
||||
}
|
||||
.dcloud-progress-status-exception {
|
||||
.dcloud-progress-inner {
|
||||
background: rgba(255, 112, 102, 0.03);
|
||||
}
|
||||
.dcloud-progress-inner:not(.dcloud-progress-circle-gradient) .dcloud-progress-circle-path {
|
||||
stroke: rgb(255, 112, 102);
|
||||
}
|
||||
}
|
||||
.dcloud-progress-inner {
|
||||
font-family: DIDIFD-Regular;
|
||||
font-size: 40px !important;
|
||||
}
|
||||
padding-top: 30px;
|
||||
margin-right: 20px;
|
||||
}
|
||||
.state {
|
||||
font-size: 13px;
|
||||
@@ -61,20 +28,6 @@
|
||||
line-height: 20px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
.health-status-image {
|
||||
width: 15px;
|
||||
height: 15px;
|
||||
background-size: cover;
|
||||
}
|
||||
.health-status-image-success {
|
||||
background-image: url('../../assets/health-status-success.png');
|
||||
}
|
||||
.health-status-image-exception {
|
||||
background-image: url('../../assets/health-status-exception.png');
|
||||
}
|
||||
.health-status-image-normal {
|
||||
background-image: url('../../assets/health-status-normal.png');
|
||||
}
|
||||
}
|
||||
.value-bar {
|
||||
display: flex;
|
||||
|
||||
@@ -1,25 +1,24 @@
|
||||
import React, { useEffect, useState } from 'react';
|
||||
import { useParams } from 'react-router-dom';
|
||||
import { Drawer, Select, Spin, Table } from 'knowdesign';
|
||||
import { Drawer, Spin, Table, Utils } from 'knowdesign';
|
||||
import { IconFont } from '@knowdesign/icons';
|
||||
import { Utils, Progress } from 'knowdesign';
|
||||
import './index.less';
|
||||
import api from '@src/api';
|
||||
import moment from 'moment';
|
||||
import TagsWithHide from '../TagsWithHide/index';
|
||||
import { getHealthProcessColor } from '@src/pages/SingleClusterDetail/config';
|
||||
import HealthState, { getHealthStateDesc, getHealthStateEmoji, HealthStateEnum } from '../HealthState';
|
||||
import { getConfigItemDetailDesc } from '@src/pages/SingleClusterDetail/config';
|
||||
|
||||
export interface healthDataProps {
|
||||
score: number;
|
||||
state: HealthStateEnum;
|
||||
passed: number;
|
||||
total: number;
|
||||
alive: number;
|
||||
}
|
||||
export interface CardBarProps {
|
||||
cardColumns?: any[];
|
||||
healthData?: healthDataProps;
|
||||
showCardBg?: boolean;
|
||||
scene: 'topic' | 'broker' | 'group';
|
||||
scene: 'topic' | 'broker' | 'group' | 'zookeeper';
|
||||
record?: any;
|
||||
loading?: boolean;
|
||||
needProgress?: boolean;
|
||||
@@ -27,36 +26,27 @@ export interface CardBarProps {
|
||||
const renderValue = (v: string | number | ((visibleType?: boolean) => JSX.Element), visibleType?: boolean) => {
|
||||
return typeof v === 'function' ? v(visibleType) : v;
|
||||
};
|
||||
const statusTxtEmojiMap = {
|
||||
success: {
|
||||
emoji: '👍',
|
||||
txt: '优异',
|
||||
},
|
||||
normal: {
|
||||
emoji: '😊',
|
||||
txt: '正常',
|
||||
},
|
||||
exception: {
|
||||
emoji: '👻',
|
||||
txt: '异常',
|
||||
},
|
||||
};
|
||||
const sceneCodeMap = {
|
||||
topic: {
|
||||
code: 2,
|
||||
fieldName: 'topicName',
|
||||
alias: 'Topics',
|
||||
},
|
||||
broker: {
|
||||
code: 1,
|
||||
fieldName: 'brokerId',
|
||||
alias: 'Brokers',
|
||||
},
|
||||
topic: {
|
||||
code: 2,
|
||||
fieldName: 'topicName',
|
||||
alias: 'Topics',
|
||||
},
|
||||
group: {
|
||||
code: 3,
|
||||
fieldName: 'groupName',
|
||||
alias: 'Consumers',
|
||||
},
|
||||
zookeeper: {
|
||||
code: 4,
|
||||
fieldName: 'zookeeperId',
|
||||
alias: 'Zookeeper',
|
||||
},
|
||||
};
|
||||
const CardColumnsItem: any = (cardItem: any) => {
|
||||
const { cardColumnsItemData, showCardBg } = cardItem;
|
||||
@@ -92,16 +82,7 @@ const CardBar = (props: CardBarProps) => {
|
||||
}>();
|
||||
const { healthData, cardColumns, showCardBg = true, scene, record, loading, needProgress = true } = props;
|
||||
const [detailDrawerVisible, setDetailDrawerVisible] = useState(false);
|
||||
const [progressStatus, setProgressStatus] = useState<'success' | 'exception' | 'normal'>('success');
|
||||
const [healthCheckDetailList, setHealthCheckDetailList] = useState([]);
|
||||
const [isAlive, setIsAlive] = useState(true);
|
||||
|
||||
useEffect(() => {
|
||||
if (healthData) {
|
||||
setProgressStatus(!isAlive ? 'exception' : healthData.score >= 90 ? 'success' : 'normal');
|
||||
setIsAlive(healthData.alive === 1);
|
||||
}
|
||||
}, [healthData, isAlive]);
|
||||
|
||||
useEffect(() => {
|
||||
const sceneObj = sceneCodeMap[scene];
|
||||
@@ -120,23 +101,24 @@ const CardBar = (props: CardBarProps) => {
|
||||
const columns = [
|
||||
{
|
||||
title: '检查项',
|
||||
dataIndex: 'configDesc',
|
||||
key: 'configDesc',
|
||||
},
|
||||
{
|
||||
title: '权重',
|
||||
dataIndex: 'weightPercent',
|
||||
key: 'weightPercent',
|
||||
},
|
||||
{
|
||||
title: '得分',
|
||||
dataIndex: 'score',
|
||||
key: 'score',
|
||||
dataIndex: 'checkConfig',
|
||||
render(config: any, record: any) {
|
||||
let valueGroup = {};
|
||||
try {
|
||||
valueGroup = JSON.parse(config.value);
|
||||
} catch (e) {
|
||||
//
|
||||
}
|
||||
return getConfigItemDetailDesc(record.configItem, valueGroup) || record.configDesc || '-';
|
||||
},
|
||||
},
|
||||
// {
|
||||
// title: '得分',
|
||||
// dataIndex: 'score',
|
||||
// },
|
||||
{
|
||||
title: '检查时间',
|
||||
dataIndex: 'updateTime',
|
||||
key: 'updateTime',
|
||||
render: (value: number) => {
|
||||
return moment(value).format('YYYY-MM-DD HH:mm:ss');
|
||||
},
|
||||
@@ -144,8 +126,6 @@ const CardBar = (props: CardBarProps) => {
|
||||
{
|
||||
title: '检查结果',
|
||||
dataIndex: 'passed',
|
||||
key: 'passed',
|
||||
width: 280,
|
||||
render(value: boolean, record: any) {
|
||||
const icon = value ? <IconFont type="icon-zhengchang"></IconFont> : <IconFont type="icon-yichang"></IconFont>;
|
||||
const txt = value ? '已通过' : '未通过';
|
||||
@@ -168,40 +148,13 @@ const CardBar = (props: CardBarProps) => {
|
||||
{!loading && healthData && needProgress && (
|
||||
<div className="card-bar-health">
|
||||
<div className="card-bar-health-process">
|
||||
<Progress
|
||||
width={70}
|
||||
type="circle"
|
||||
percent={!isAlive ? 100 : healthData.score}
|
||||
status={progressStatus}
|
||||
format={(percent, successPercent) => {
|
||||
return !isAlive ? (
|
||||
<div
|
||||
style={{
|
||||
fontFamily: 'HelveticaNeue-Medium',
|
||||
fontSize: 22,
|
||||
color: getHealthProcessColor(healthData.score, healthData.alive),
|
||||
}}
|
||||
>
|
||||
Down
|
||||
</div>
|
||||
) : (
|
||||
<div
|
||||
style={{
|
||||
textIndent: Math.round(percent) >= 100 ? '-4px' : '',
|
||||
color: getHealthProcessColor(healthData.score, healthData.alive),
|
||||
}}
|
||||
>
|
||||
{Math.round(percent)}
|
||||
</div>
|
||||
);
|
||||
}}
|
||||
strokeWidth={3}
|
||||
/>
|
||||
<HealthState state={healthData?.state} width={74} height={74} />
|
||||
</div>
|
||||
<div>
|
||||
<div className="state">
|
||||
<div className={`health-status-image health-status-image-${progressStatus}`}></div>
|
||||
{sceneCodeMap[scene].alias}状态{statusTxtEmojiMap[progressStatus].txt}
|
||||
{getHealthStateEmoji(healthData?.state)}
|
||||
{sceneCodeMap[scene].alias}
|
||||
{getHealthStateDesc(healthData?.state)}
|
||||
</div>
|
||||
<div className="value-bar">
|
||||
<div className="value">{`${healthData?.passed}/${healthData?.total}`}</div>
|
||||
|
||||
@@ -26,7 +26,6 @@ const OptionsDefault = [
|
||||
|
||||
const NodeScope = ({ nodeScopeModule, change }: propsType) => {
|
||||
const {
|
||||
hasCustomScope,
|
||||
customScopeList: customList,
|
||||
scopeName = '',
|
||||
scopeLabel = '自定义范围',
|
||||
@@ -129,79 +128,75 @@ const NodeScope = ({ nodeScopeModule, change }: propsType) => {
|
||||
</Space>
|
||||
</Radio.Group>
|
||||
</div>
|
||||
{hasCustomScope && (
|
||||
<div className="flx_r">
|
||||
<h6 className="time_title">{scopeLabel}</h6>
|
||||
<div className="custom-scope">
|
||||
<div className="check-row">
|
||||
<Checkbox className="check-all" indeterminate={indeterminate} onChange={onCheckAllChange} checked={checkAll}>
|
||||
全选
|
||||
</Checkbox>
|
||||
<Input
|
||||
className="search-input"
|
||||
suffix={<IconFont type="icon-fangdajing" style={{ fontSize: '16px' }} />}
|
||||
size="small"
|
||||
placeholder={searchPlaceholder}
|
||||
onChange={(e) => setScopeSearchValue(e.target.value)}
|
||||
/>
|
||||
</div>
|
||||
<div className="fixed-height">
|
||||
<Checkbox.Group style={{ width: '100%' }} onChange={checkChange} value={checkedListTemp}>
|
||||
<Row gutter={[10, 12]}>
|
||||
{customList
|
||||
.filter((item) => item.label.includes(scopeSearchValue))
|
||||
.map((item) => (
|
||||
<Col span={12} key={item.value}>
|
||||
<Checkbox value={item.value}>{item.label}</Checkbox>
|
||||
</Col>
|
||||
))}
|
||||
</Row>
|
||||
</Checkbox.Group>
|
||||
</div>
|
||||
<div className="flx_r">
|
||||
<h6 className="time_title">{scopeLabel}</h6>
|
||||
<div className="custom-scope">
|
||||
<div className="check-row">
|
||||
<Checkbox className="check-all" indeterminate={indeterminate} onChange={onCheckAllChange} checked={checkAll}>
|
||||
全选
|
||||
</Checkbox>
|
||||
<Input
|
||||
className="search-input"
|
||||
suffix={<IconFont type="icon-fangdajing" style={{ fontSize: '16px' }} />}
|
||||
size="small"
|
||||
placeholder={searchPlaceholder}
|
||||
onChange={(e) => setScopeSearchValue(e.target.value)}
|
||||
/>
|
||||
</div>
|
||||
<div className="fixed-height">
|
||||
<Checkbox.Group style={{ width: '100%' }} onChange={checkChange} value={checkedListTemp}>
|
||||
<Row gutter={[10, 12]}>
|
||||
{customList
|
||||
.filter((item) => item.label.includes(scopeSearchValue))
|
||||
.map((item) => (
|
||||
<Col span={12} key={item.value}>
|
||||
<Checkbox value={item.value}>{item.label}</Checkbox>
|
||||
</Col>
|
||||
))}
|
||||
</Row>
|
||||
</Checkbox.Group>
|
||||
</div>
|
||||
|
||||
<div className="btn-con">
|
||||
<Button
|
||||
type="primary"
|
||||
size="small"
|
||||
className="btn-sure"
|
||||
onClick={customSure}
|
||||
disabled={checkedListTemp?.length > 0 ? false : true}
|
||||
>
|
||||
确定
|
||||
</Button>
|
||||
<Button size="small" onClick={customCancel}>
|
||||
取消
|
||||
</Button>
|
||||
</div>
|
||||
<div className="btn-con">
|
||||
<Button
|
||||
type="primary"
|
||||
size="small"
|
||||
className="btn-sure"
|
||||
onClick={customSure}
|
||||
disabled={checkedListTemp?.length > 0 ? false : true}
|
||||
>
|
||||
确定
|
||||
</Button>
|
||||
<Button size="small" onClick={customCancel}>
|
||||
取消
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
return (
|
||||
<>
|
||||
<div id="d-node-scope">
|
||||
<div className="scope-title">{scopeName}筛选:</div>
|
||||
<Popover
|
||||
trigger={['click']}
|
||||
visible={popVisible}
|
||||
content={clickContent}
|
||||
placement="bottomRight"
|
||||
overlayClassName={`d-node-scope-popover ${hasCustomScope ? 'large-size' : ''}`}
|
||||
onVisibleChange={visibleChange}
|
||||
>
|
||||
<span className="input-span">
|
||||
<Input
|
||||
className={isTop ? 'relativeTime d-node-scope-input' : 'absoluteTime d-node-scope-input'}
|
||||
value={inputValue}
|
||||
readOnly={true}
|
||||
suffix={<IconFont type="icon-jiantou1" rotate={90} style={{ color: '#74788D' }}></IconFont>}
|
||||
/>
|
||||
</span>
|
||||
</Popover>
|
||||
</div>
|
||||
</>
|
||||
<div id="d-node-scope">
|
||||
<div className="scope-title">{scopeName}筛选:</div>
|
||||
<Popover
|
||||
trigger={['click']}
|
||||
visible={popVisible}
|
||||
content={clickContent}
|
||||
placement="bottomRight"
|
||||
overlayClassName="d-node-scope-popover large-size"
|
||||
onVisibleChange={visibleChange}
|
||||
>
|
||||
<span className="input-span">
|
||||
<Input
|
||||
className={isTop ? 'relativeTime d-node-scope-input' : 'absoluteTime d-node-scope-input'}
|
||||
value={inputValue}
|
||||
readOnly={true}
|
||||
suffix={<IconFont type="icon-jiantou1" rotate={90} style={{ color: '#74788D' }}></IconFont>}
|
||||
/>
|
||||
</span>
|
||||
</Popover>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
|
||||
@@ -47,7 +47,6 @@ export interface IcustomScope {
|
||||
}
|
||||
|
||||
export interface InodeScopeModule {
|
||||
hasCustomScope: boolean;
|
||||
customScopeList: IcustomScope[];
|
||||
scopeName?: string;
|
||||
scopeLabel?: string;
|
||||
@@ -87,7 +86,6 @@ const GRID_SIZE_OPTIONS = [
|
||||
const MetricOperateBar = ({
|
||||
metricSelect,
|
||||
nodeScopeModule = {
|
||||
hasCustomScope: false,
|
||||
customScopeList: [],
|
||||
},
|
||||
hideNodeScope = false,
|
||||
|
||||
@@ -4,7 +4,7 @@ import { getBasicChartConfig, CHART_COLOR_LIST } from '@src/constants/chartConfi
|
||||
const METRIC_DASHBOARD_REQ_MAP = {
|
||||
[MetricType.Broker]: (clusterId: string) => api.getDashboardMetricChartData(clusterId, MetricType.Broker),
|
||||
[MetricType.Topic]: (clusterId: string) => api.getDashboardMetricChartData(clusterId, MetricType.Topic),
|
||||
[MetricType.Zookeeper]: (clusterId: string) => '',
|
||||
[MetricType.Zookeeper]: (clusterId: string) => api.getZookeeperMetrics(clusterId),
|
||||
};
|
||||
|
||||
export const getMetricDashboardReq = (clusterId: string, type: MetricType.Broker | MetricType.Topic | MetricType.Zookeeper) =>
|
||||
|
||||
@@ -108,10 +108,10 @@ const DraggableCharts = (props: PropsType): JSX.Element => {
|
||||
startTime,
|
||||
endTime,
|
||||
metricsNames: selectedMetricNames,
|
||||
topNu: curHeaderOptions?.scopeData?.isTop ? curHeaderOptions.scopeData.data : null,
|
||||
},
|
||||
dashboardType === MetricType.Broker || dashboardType === MetricType.Topic
|
||||
? {
|
||||
topNu: curHeaderOptions?.scopeData?.isTop ? curHeaderOptions.scopeData.data : null,
|
||||
[dashboardType === MetricType.Broker ? 'brokerIds' : 'topics']: curHeaderOptions?.scopeData?.isTop
|
||||
? null
|
||||
: curHeaderOptions.scopeData.data,
|
||||
@@ -233,8 +233,8 @@ const DraggableCharts = (props: PropsType): JSX.Element => {
|
||||
<div id="dashboard-drag-chart" className="topic-dashboard">
|
||||
<ChartOperateBar
|
||||
onChange={ksHeaderChange}
|
||||
hideNodeScope={dashboardType === MetricType.Zookeeper}
|
||||
nodeScopeModule={{
|
||||
hasCustomScope: !(dashboardType === MetricType.Zookeeper),
|
||||
customScopeList: scopeList,
|
||||
scopeName: dashboardType === MetricType.Broker ? 'Broker' : dashboardType === MetricType.Topic ? 'Topic' : 'Zookeeper',
|
||||
scopeLabel: `自定义 ${
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
.health-state {
|
||||
img {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,76 @@
|
||||
import React from 'react';
|
||||
import GoodState from '@src/assets/health-good.png';
|
||||
import MediumState from '@src/assets/health-medium.png';
|
||||
import PoorState from '@src/assets/health-poor.png';
|
||||
import DownState from '@src/assets/health-down.png';
|
||||
import UnknownState from '@src/assets/health-unknown.png';
|
||||
import GoodStateEmoji from '@src/assets/health-good-emoji.png';
|
||||
import MediumStateEmoji from '@src/assets/health-medium-emoji.png';
|
||||
import PoorStateEmoji from '@src/assets/health-poor-emoji.png';
|
||||
import DownStateEmoji from '@src/assets/health-down-emoji.png';
|
||||
import './index.less';
|
||||
|
||||
export enum HealthStateEnum {
|
||||
UNKNOWN = -1,
|
||||
GOOD,
|
||||
MEDIUM,
|
||||
POOR,
|
||||
DOWN,
|
||||
}
|
||||
|
||||
interface HealthStateProps {
|
||||
state: HealthStateEnum;
|
||||
width: string | number;
|
||||
height: string | number;
|
||||
}
|
||||
|
||||
const HEALTH_STATE_MAP = {
|
||||
[HealthStateEnum.GOOD]: GoodState,
|
||||
[HealthStateEnum.MEDIUM]: MediumState,
|
||||
[HealthStateEnum.POOR]: PoorState,
|
||||
[HealthStateEnum.DOWN]: DownState,
|
||||
[HealthStateEnum.UNKNOWN]: UnknownState,
|
||||
};
|
||||
|
||||
const HEALTH_STATE_EMOJI_MAP = {
|
||||
[HealthStateEnum.GOOD]: GoodStateEmoji,
|
||||
[HealthStateEnum.MEDIUM]: MediumStateEmoji,
|
||||
[HealthStateEnum.POOR]: PoorStateEmoji,
|
||||
[HealthStateEnum.DOWN]: DownStateEmoji,
|
||||
[HealthStateEnum.UNKNOWN]: DownStateEmoji,
|
||||
};
|
||||
|
||||
const HEALTH_STATE_DESC_MAP = {
|
||||
[HealthStateEnum.GOOD]: '状态优异',
|
||||
[HealthStateEnum.MEDIUM]: '状态良好',
|
||||
[HealthStateEnum.POOR]: '状态较差',
|
||||
[HealthStateEnum.DOWN]: '状态异常',
|
||||
[HealthStateEnum.UNKNOWN]: '状态异常',
|
||||
};
|
||||
|
||||
export const getHealthStateEmoji = (state: HealthStateEnum, width = 16, height = 16) => {
|
||||
return (
|
||||
<img
|
||||
width={width}
|
||||
height={height}
|
||||
style={{ marginTop: -3 }}
|
||||
src={HEALTH_STATE_EMOJI_MAP[state] || HEALTH_STATE_EMOJI_MAP[HealthStateEnum.UNKNOWN]}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export const getHealthStateDesc = (state: HealthStateEnum) => {
|
||||
return HEALTH_STATE_DESC_MAP[state] || HEALTH_STATE_DESC_MAP[HealthStateEnum.UNKNOWN];
|
||||
};
|
||||
|
||||
const HealthState = (props: HealthStateProps) => {
|
||||
const { state, width, height } = props;
|
||||
|
||||
return (
|
||||
<div className="health-state" style={{ width, height }}>
|
||||
<img src={HEALTH_STATE_MAP[state] || UnknownState} />
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default HealthState;
|
||||