初始化3.0.0版本

This commit is contained in:
zengqiao
2022-08-18 17:04:05 +08:00
parent 462303fca0
commit 51832385b1
2446 changed files with 93177 additions and 127211 deletions

98
km-biz/pom.xml Normal file
View File

@@ -0,0 +1,98 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>km-biz</artifactId>
<version>${km.revision}</version>
<packaging>jar</packaging>
<parent>
<artifactId>km</artifactId>
<groupId>com.xiaojukeji.kafka</groupId>
<version>${km.revision}</version>
</parent>
<properties>
<!-- maven properties -->
<maven.test.skip>true</maven.test.skip>
<downloadSources>true</downloadSources>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<file_encoding>UTF-8</file_encoding>
</properties>
<dependencies>
<dependency>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>km-core</artifactId>
<version>${project.parent.version}</version>
</dependency>
<!-- spring -->
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-web</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
<version>${spring.version}</version>
</dependency>
<!-- javax -->
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
</dependency>
<dependency>
<groupId>javax.annotation</groupId>
<artifactId>javax.annotation-api</artifactId>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>${kafka-clients.version}</version>
</dependency>
<dependency>
<groupId>commons-lang</groupId>
<artifactId>commons-lang</artifactId>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-util</artifactId>
</dependency>
<dependency>
<groupId>com.github.ben-manes.caffeine</groupId>
<artifactId>caffeine</artifactId>
</dependency>
<!-- json -->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,19 @@
package com.xiaojukeji.know.streaming.km.biz.broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.config.KafkaBrokerConfigModifyParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.vo.config.kafka.KafkaBrokerConfigVO;
import java.util.List;
public interface BrokerConfigManager {
/**
* 获取Broker配置详细信息
* @param clusterPhyId 物理集群ID
* @param brokerId brokerId
* @return
*/
Result<List<KafkaBrokerConfigVO>> getBrokerConfigDetail(Long clusterPhyId, Integer brokerId);
Result<Void> modifyBrokerConfig(KafkaBrokerConfigModifyParam modifyParam, String operator);
}

View File

@@ -0,0 +1,13 @@
package com.xiaojukeji.know.streaming.km.biz.broker;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.vo.broker.BrokerBasicVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.log.LogDirVO;
public interface BrokerManager {
Result<BrokerBasicVO> getBrokerBasic(Long clusterPhyId, Integer brokerId);
PaginationResult<LogDirVO> getBrokerLogDirs(Long clusterPhyId, Integer brokerId, PaginationBaseDTO dto);
}

View File

@@ -0,0 +1,97 @@
package com.xiaojukeji.know.streaming.km.biz.broker.impl;
import com.xiaojukeji.know.streaming.km.biz.broker.BrokerConfigManager;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.kafkaconfig.KafkaConfigDetail;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.config.KafkaBrokerConfigModifyParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.broker.BrokerConfigPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.config.kafka.KafkaBrokerConfigVO;
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
import com.xiaojukeji.know.streaming.km.common.enums.config.ConfigDiffTypeEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerConfigService;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import org.apache.kafka.common.config.ConfigDef;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
@Component
public class BrokerConfigManagerImpl implements BrokerConfigManager {
@Autowired
private BrokerService brokerService;
@Autowired
private BrokerConfigService brokerConfigService;
@Override
public Result<List<KafkaBrokerConfigVO>> getBrokerConfigDetail(Long clusterPhyId, Integer brokerId) {
// 获取当前broker配置
Result<List<KafkaConfigDetail>> configResult = brokerConfigService.getBrokerConfigDetailFromKafka(clusterPhyId, brokerId);
if (configResult.failed()) {
return Result.buildFromIgnoreData(configResult);
}
// 获取差异的配置
List<BrokerConfigPO> diffPOList = brokerConfigService.getBrokerConfigDiffFromDB(clusterPhyId, brokerId);
// 组装数据
return Result.buildSuc(this.convert2KafkaBrokerConfigVOList(configResult.getData(), diffPOList));
}
private List<KafkaBrokerConfigVO> convert2KafkaBrokerConfigVOList(List<KafkaConfigDetail> configList, List<BrokerConfigPO> diffPOList) {
if (ValidateUtils.isEmptyList(configList)) {
return new ArrayList<>();
}
Map<String, BrokerConfigPO> poMap = diffPOList.stream().collect(Collectors.toMap(BrokerConfigPO::getConfigName, Function.identity()));
List<KafkaBrokerConfigVO> voList = ConvertUtil.list2List(configList, KafkaBrokerConfigVO.class);
for (KafkaBrokerConfigVO vo: voList) {
BrokerConfigPO po = poMap.get(vo.getName());
if (po != null) {
vo.setExclusive(po.getDiffType().equals(ConfigDiffTypeEnum.ALONE_POSSESS.getCode()));
vo.setDifferentiated(po.getDiffType().equals(ConfigDiffTypeEnum.UN_EQUAL.getCode()));
} else {
vo.setExclusive(false);
vo.setDifferentiated(false);
}
ConfigDef.ConfigKey configKey = KafkaConstant.KAFKA_ALL_CONFIG_DEF_MAP.get(vo.getName());
if (configKey == null) {
continue;
}
try {
vo.setDocumentation(configKey.documentation);
vo.setDefaultValue(configKey.defaultValue.toString());
} catch (Exception e) {
// ignore
}
}
return voList;
}
@Override
public Result<Void> modifyBrokerConfig(KafkaBrokerConfigModifyParam modifyParam, String operator) {
if (modifyParam.getApplyAll() == null || !modifyParam.getApplyAll()) {
return brokerConfigService.modifyBrokerConfig(modifyParam, operator);
}
List<Broker> brokerList = brokerService.listAliveBrokersFromDB(modifyParam.getClusterPhyId());
for (Broker broker: brokerList) {
modifyParam.setBrokerId(broker.getBrokerId());
Result<Void> rv = brokerConfigService.modifyBrokerConfig(modifyParam, operator);
if (rv.failed()) {
return rv;
}
}
return Result.buildSuc();
}
}

View File

@@ -0,0 +1,75 @@
package com.xiaojukeji.know.streaming.km.biz.broker.impl;
import com.xiaojukeji.know.streaming.km.biz.broker.BrokerManager;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.vo.broker.BrokerBasicVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.log.LogDirVO;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.utils.PaginationUtil;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import org.apache.kafka.clients.admin.LogDirDescription;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.util.*;
@Component
public class BrokerManagerImpl implements BrokerManager {
@Autowired
private BrokerService brokerService;
@Autowired
private ClusterPhyService clusterPhyService;
@Override
public Result<BrokerBasicVO> getBrokerBasic(Long clusterPhyId, Integer brokerId) {
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(clusterPhyId);
if (clusterPhy == null) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getClusterPhyNotExist(clusterPhyId));
}
Broker broker = brokerService.getBroker(clusterPhyId, brokerId);
if (broker == null) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getBrokerNotExist(clusterPhyId, brokerId));
}
return Result.buildSuc(new BrokerBasicVO(brokerId, broker.getHost(), clusterPhy.getName()));
}
@Override
public PaginationResult<LogDirVO> getBrokerLogDirs(Long clusterPhyId, Integer brokerId, PaginationBaseDTO dto) {
Result<Map<String, LogDirDescription>> dirDescResult = brokerService.getBrokerLogDirDescFromKafka(clusterPhyId, brokerId);
if (dirDescResult.failed()) {
return PaginationResult.buildFailure(dirDescResult, dto);
}
Map<String, LogDirDescription> dirDescMap = dirDescResult.hasData()? dirDescResult.getData(): new HashMap<>();
List<LogDirVO> voList = new ArrayList<>();
for (Map.Entry<String, LogDirDescription> entry: dirDescMap.entrySet()) {
entry.getValue().replicaInfos().entrySet().stream().forEach(elem -> {
LogDirVO vo = new LogDirVO();
vo.setDir(entry.getKey());
vo.setTopicName(elem.getKey().topic());
vo.setPartitionId(elem.getKey().partition());
vo.setOffsetLag(elem.getValue().offsetLag());
vo.setLogSizeUnitB(elem.getValue().size());
voList.add(vo);
});
}
return PaginationUtil.pageBySubData(
PaginationUtil.pageByFuzzyFilter(voList, dto.getSearchKeywords(), Arrays.asList("topicName")),
dto
);
}
/**************************************************** private method ****************************************************/
}

View File

@@ -0,0 +1,26 @@
package com.xiaojukeji.know.streaming.km.biz.cluster;
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.ClusterBrokersOverviewDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.res.ClusterBrokersOverviewVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.res.ClusterBrokersStateVO;
/**
* 多集群总体状态
*/
public interface ClusterBrokersManager {
/**
* 获取缓存查询结果 & broker 表查询结果并集
* @param clusterPhyId kafka 物理集群 id
* @param dto 封装分页查询参数对象
* @return 返回获取到的缓存查询结果 & broker 表查询结果并集
*/
PaginationResult<ClusterBrokersOverviewVO> getClusterPhyBrokersOverview(Long clusterPhyId, ClusterBrokersOverviewDTO dto);
/**
* 根据物理集群id获取集群对应broker状态信息
* @param clusterPhyId 物理集群 id
* @return 返回根据物理集群id获取到的集群对应broker状态信息
*/
ClusterBrokersStateVO getClusterPhyBrokersState(Long clusterPhyId);
}

View File

@@ -0,0 +1,12 @@
package com.xiaojukeji.know.streaming.km.biz.cluster;
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.ClusterTopicsOverviewDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.res.ClusterPhyTopicsOverviewVO;
/**
* 多集群总体状态
*/
public interface ClusterTopicsManager {
PaginationResult<ClusterPhyTopicsOverviewVO> getClusterPhyTopicsOverview(Long clusterPhyId, ClusterTopicsOverviewDTO dto);
}

View File

@@ -0,0 +1,24 @@
package com.xiaojukeji.know.streaming.km.biz.cluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhysState;
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.MultiClusterDashboardDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.ClusterPhyDashboardVO;
/**
* 多集群总体状态
*/
public interface MultiClusterPhyManager {
/**
* 获取所有集群的状态
* @return
*/
ClusterPhysState getClusterPhysState();
/**
* 查询多集群大盘
* @param dto 分页信息
* @return
*/
PaginationResult<ClusterPhyDashboardVO> getClusterPhysDashboard(MultiClusterDashboardDTO dto);
}

View File

@@ -0,0 +1,219 @@
package com.xiaojukeji.know.streaming.km.biz.cluster.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.biz.cluster.ClusterBrokersManager;
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.ClusterBrokersOverviewDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationSortDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafkacontroller.KafkaController;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BrokerMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.res.ClusterBrokersOverviewVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.res.ClusterBrokersStateVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.kafkacontroller.KafkaControllerVO;
import com.xiaojukeji.know.streaming.km.common.enums.SortTypeEnum;
import com.xiaojukeji.know.streaming.km.common.utils.PaginationMetricsUtil;
import com.xiaojukeji.know.streaming.km.common.utils.PaginationUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerConfigService;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerMetricService;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.kafkacontroller.KafkaControllerService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
@Service
public class ClusterBrokersManagerImpl implements ClusterBrokersManager {
private static final ILog log = LogFactory.getLog(ClusterBrokersManagerImpl.class);
@Autowired
private TopicService topicService;
@Autowired
private BrokerService brokerService;
@Autowired
private BrokerConfigService brokerConfigService;
@Autowired
private BrokerMetricService brokerMetricService;
@Autowired
private KafkaControllerService kafkaControllerService;
@Override
public PaginationResult<ClusterBrokersOverviewVO> getClusterPhyBrokersOverview(Long clusterPhyId, ClusterBrokersOverviewDTO dto) {
// 获取集群Broker列表
List<Broker> brokerList = brokerService.listAllBrokersFromDB(clusterPhyId);
// 搜索
brokerList = PaginationUtil.pageByFuzzyFilter(brokerList, dto.getSearchKeywords(), Arrays.asList("host"));
// 获取指标
Result<List<BrokerMetrics>> metricsResult = brokerMetricService.getLatestMetricsFromES(
clusterPhyId,
brokerList.stream().filter(elem1 -> elem1.alive()).map(elem2 -> elem2.getBrokerId()).collect(Collectors.toList())
);
// 分页 + 搜索
PaginationResult<Integer> paginationResult = this.pagingBrokers(brokerList, metricsResult.hasData()? metricsResult.getData(): new ArrayList<>(), dto);
// 获取__consumer_offsetsTopic的分布
Topic groupTopic = topicService.getTopic(clusterPhyId, org.apache.kafka.common.internals.Topic.GROUP_METADATA_TOPIC_NAME);
Topic transactionTopic = topicService.getTopic(clusterPhyId, org.apache.kafka.common.internals.Topic.TRANSACTION_STATE_TOPIC_NAME);
// 格式转换
return PaginationResult.buildSuc(
this.convert2ClusterBrokersOverviewVOList(
paginationResult.getData().getBizData(),
brokerList,
metricsResult.getData(),
groupTopic,
transactionTopic
),
paginationResult
);
}
@Override
public ClusterBrokersStateVO getClusterPhyBrokersState(Long clusterPhyId) {
ClusterBrokersStateVO clusterBrokersStateVO = new ClusterBrokersStateVO();
// 获取集群Broker列表
List<Broker> allBrokerList = brokerService.listAllBrokersFromDB(clusterPhyId);
if (allBrokerList == null) {
allBrokerList = new ArrayList<>();
}
// 设置broker数
clusterBrokersStateVO.setBrokerCount(allBrokerList.size());
// 设置版本信息
clusterBrokersStateVO.setBrokerVersionList(
this.getBrokerVersionList(clusterPhyId, allBrokerList.stream().filter(elem -> elem.alive()).collect(Collectors.toList()))
);
// 获取controller信息
KafkaController kafkaController = kafkaControllerService.getKafkaControllerFromDB(clusterPhyId);
// 设置kafka-controller信息
clusterBrokersStateVO.setKafkaControllerAlive(false);
if(null != kafkaController) {
clusterBrokersStateVO.setKafkaController(
this.convert2KafkaControllerVO(
kafkaController,
brokerService.getBroker(clusterPhyId, kafkaController.getBrokerId())
)
);
clusterBrokersStateVO.setKafkaControllerAlive(true);
}
clusterBrokersStateVO.setConfigSimilar(brokerConfigService.countBrokerConfigDiffsFromDB(clusterPhyId, Arrays.asList("broker.id", "listeners", "name", "value")) <= 0);
return clusterBrokersStateVO;
}
/**************************************************** private method ****************************************************/
private PaginationResult<Integer> pagingBrokers(List<Broker> brokerList, List<BrokerMetrics> metricsList, PaginationSortDTO dto) {
if (ValidateUtils.isBlank(dto.getSortField())) {
// 默认排序
return PaginationUtil.pageBySubData(
PaginationUtil.pageBySort(brokerList, "brokerId", SortTypeEnum.ASC.getSortType()).stream().map(elem -> elem.getBrokerId()).collect(Collectors.toList()),
dto
);
}
if (!brokerMetricService.isMetricName(dto.getSortField())) {
// 非指标字段进行排序,分页
return PaginationUtil.pageBySubData(
PaginationUtil.pageBySort(brokerList, dto.getSortField(), dto.getSortType()).stream().map(elem -> elem.getBrokerId()).collect(Collectors.toList()),
dto
);
}
// 指标字段进行排序及分页
Map<Integer, BrokerMetrics> metricsMap = metricsList.stream().collect(Collectors.toMap(BrokerMetrics::getBrokerId, Function.identity()));
brokerList.stream().forEach(elem -> {
metricsMap.putIfAbsent(elem.getBrokerId(), new BrokerMetrics(elem.getClusterPhyId(), elem.getBrokerId()));
});
// 排序
metricsList = (List<BrokerMetrics>) PaginationMetricsUtil.sortMetrics(new ArrayList<>(metricsMap.values()), dto.getSortField(), "brokerId", dto.getSortType());
return PaginationUtil.pageBySubData(
metricsList.stream().map(elem -> elem.getBrokerId()).collect(Collectors.toList()),
dto
);
}
private List<ClusterBrokersOverviewVO> convert2ClusterBrokersOverviewVOList(List<Integer> pagedBrokerIdList,
List<Broker> brokerList,
List<BrokerMetrics> metricsList,
Topic groupTopic,
Topic transactionTopic) {
Map<Integer, BrokerMetrics> metricsMap = metricsList == null? new HashMap<>(): metricsList.stream().collect(Collectors.toMap(BrokerMetrics::getBrokerId, Function.identity()));
Map<Integer, Broker> brokerMap = brokerList == null? new HashMap<>(): brokerList.stream().collect(Collectors.toMap(Broker::getBrokerId, Function.identity()));
List<ClusterBrokersOverviewVO> voList = new ArrayList<>(pagedBrokerIdList.size());
for (Integer brokerId : pagedBrokerIdList) {
Broker broker = brokerMap.get(brokerId);
BrokerMetrics brokerMetrics = metricsMap.get(brokerId);
voList.add(this.convert2ClusterBrokersOverviewVO(brokerId, broker, brokerMetrics, groupTopic, transactionTopic));
}
return voList;
}
private ClusterBrokersOverviewVO convert2ClusterBrokersOverviewVO(Integer brokerId, Broker broker, BrokerMetrics brokerMetrics, Topic groupTopic, Topic transactionTopic) {
ClusterBrokersOverviewVO clusterBrokersOverviewVO = new ClusterBrokersOverviewVO();
clusterBrokersOverviewVO.setBrokerId(brokerId);
if (broker != null) {
clusterBrokersOverviewVO.setHost(broker.getHost());
clusterBrokersOverviewVO.setRack(broker.getRack());
clusterBrokersOverviewVO.setJmxPort(broker.getJmxPort());
clusterBrokersOverviewVO.setAlive(broker.alive());
clusterBrokersOverviewVO.setStartTimeUnitMs(broker.getStartTimestamp());
}
clusterBrokersOverviewVO.setKafkaRoleList(new ArrayList<>());
if (groupTopic != null && groupTopic.getBrokerIdSet().contains(brokerId)) {
clusterBrokersOverviewVO.getKafkaRoleList().add(groupTopic.getTopicName());
}
if (transactionTopic != null && transactionTopic.getBrokerIdSet().contains(brokerId)) {
clusterBrokersOverviewVO.getKafkaRoleList().add(transactionTopic.getTopicName());
}
clusterBrokersOverviewVO.setLatestMetrics(brokerMetrics);
return clusterBrokersOverviewVO;
}
private KafkaControllerVO convert2KafkaControllerVO(KafkaController kafkaController, Broker kafkaControllerBroker) {
if(null != kafkaController && null != kafkaControllerBroker) {
KafkaControllerVO kafkaControllerVO = new KafkaControllerVO();
kafkaControllerVO.setBrokerId(kafkaController.getBrokerId());
kafkaControllerVO.setBrokerHost(kafkaControllerBroker.getHost());
return kafkaControllerVO;
}
return null;
}
private List<String> getBrokerVersionList(Long clusterPhyId, List<Broker> brokerList) {
Set<String> brokerVersionList = new HashSet<>();
for (Broker broker : brokerList) {
brokerVersionList.add(brokerService.getBrokerVersionFromKafka(clusterPhyId, broker.getBrokerId()));
}
brokerVersionList.remove("");
return new ArrayList<>(brokerVersionList);
}
}

View File

@@ -0,0 +1,112 @@
package com.xiaojukeji.know.streaming.km.biz.cluster.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.biz.cluster.ClusterTopicsManager;
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.ClusterTopicsOverviewDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.MetricDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.MetricsTopicDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.TopicMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.res.ClusterPhyTopicsOverviewVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
import com.xiaojukeji.know.streaming.km.common.converter.TopicVOConverter;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.PaginationMetricsUtil;
import com.xiaojukeji.know.streaming.km.common.utils.PaginationUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicMetricService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.*;
import java.util.stream.Collectors;
@Service
public class ClusterTopicsManagerImpl implements ClusterTopicsManager {
private static final ILog log = LogFactory.getLog(ClusterTopicsManagerImpl.class);
@Autowired
private TopicService topicService;
@Autowired
private TopicMetricService topicMetricService;
@Override
public PaginationResult<ClusterPhyTopicsOverviewVO> getClusterPhyTopicsOverview(Long clusterPhyId, ClusterTopicsOverviewDTO dto) {
// 获取集群所有的Topic信息
List<Topic> topicList = topicService.listTopicsFromDB(clusterPhyId);
// 获取集群所有Topic的指标
Map<String, TopicMetrics> metricsMap = topicMetricService.getLatestMetricsFromCacheFirst(clusterPhyId);
// 转换成vo
List<ClusterPhyTopicsOverviewVO> voList = TopicVOConverter.convert2ClusterPhyTopicsOverviewVOList(topicList, metricsMap);
// 请求分页信息
PaginationResult<ClusterPhyTopicsOverviewVO> voPaginationResult = this.pagingTopicInLocal(voList, dto);
if (voPaginationResult.failed()) {
log.error("method=getClusterPhyTopicsOverview||clusterPhyId={}||result={}||errMsg=pagination in local failed", clusterPhyId, voPaginationResult);
return PaginationResult.buildFailure(voPaginationResult, dto);
}
// 查询指标
Result<List<MetricMultiLinesVO>> metricMultiLinesResult = topicMetricService.listTopicMetricsFromES(
clusterPhyId,
this.buildTopicOverviewMetricsDTO(voPaginationResult.getData().getBizData().stream().map(elem -> elem.getTopicName()).collect(Collectors.toList()), dto.getMetricLines())
);
if (metricMultiLinesResult.failed()) {
// 查询ES失败或者ES无数据则ES可能存在问题此时降级返回Topic的基本信息数据
log.error("method=getClusterPhyTopicsOverview||clusterPhyId={}||result={}||errMsg=get metrics from es failed", clusterPhyId, metricMultiLinesResult);
}
return PaginationResult.buildSuc(
TopicVOConverter.supplyMetricLines(
voPaginationResult.getData().getBizData(),
metricMultiLinesResult.getData() == null? new ArrayList<>(): metricMultiLinesResult.getData()
),
voPaginationResult
);
}
/**************************************************** private method ****************************************************/
private MetricsTopicDTO buildTopicOverviewMetricsDTO(List<String> topicNameList, MetricDTO metricDTO) {
MetricsTopicDTO dto = ConvertUtil.obj2Obj(metricDTO, MetricsTopicDTO.class);
dto.setTopics(topicNameList == null? new ArrayList<>(): topicNameList);
return dto;
}
private PaginationResult<ClusterPhyTopicsOverviewVO> pagingTopicInLocal(List<ClusterPhyTopicsOverviewVO> voList, ClusterTopicsOverviewDTO dto) {
List<ClusterPhyTopicsOverviewVO> metricsList = voList.stream().filter(elem -> {
if (dto.getShowInternalTopics() != null && dto.getShowInternalTopics()) {
// 仅展示系统Topic
return KafkaConstant.KAFKA_INTERNAL_TOPICS.contains(elem.getTopicName());
} else {
// 仅展示用户Topic
return !KafkaConstant.KAFKA_INTERNAL_TOPICS.contains(elem.getTopicName());
}
}).collect(Collectors.toList());
// 名称搜索
metricsList = PaginationUtil.pageByFuzzyFilter(metricsList, dto.getSearchKeywords(), Arrays.asList("topicName"));
if (!ValidateUtils.isBlank(dto.getSortField()) && !"createTime".equals(dto.getSortField())) {
// 指标排序
PaginationMetricsUtil.sortMetrics(metricsList, "latestMetrics", dto.getSortField(), "topicName", dto.getSortType());
} else {
// 信息排序
PaginationUtil.pageBySort(metricsList, dto.getSortField(), dto.getSortType(), "topicName", dto.getSortType());
}
return PaginationUtil.pageBySubData(metricsList, dto);
}
}

View File

@@ -0,0 +1,181 @@
package com.xiaojukeji.know.streaming.km.biz.cluster.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.biz.cluster.MultiClusterPhyManager;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.MetricDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.MetricsClusterPhyDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhysState;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.MultiClusterDashboardDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafkacontroller.KafkaController;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ClusterMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.ClusterPhyDashboardVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.converter.ClusterVOConverter;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.PaginationUtil;
import com.xiaojukeji.know.streaming.km.common.utils.PaginationMetricsUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterMetricService;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.kafkacontroller.KafkaControllerService;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.ClusterMetricVersionItems;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
@Service
public class MultiClusterPhyManagerImpl implements MultiClusterPhyManager {
private static final ILog log = LogFactory.getLog(MultiClusterPhyManagerImpl.class);
@Autowired
private ClusterPhyService clusterPhyService;
@Autowired
private ClusterMetricService clusterMetricService;
@Autowired
private KafkaControllerService kafkaControllerService;
@Override
public ClusterPhysState getClusterPhysState() {
List<ClusterPhy> clusterPhyList = clusterPhyService.listAllClusters();
Map<Long, KafkaController> controllerMap = kafkaControllerService.getKafkaControllersFromDB(
clusterPhyList.stream().map(elem -> elem.getId()).collect(Collectors.toList()),
false
);
// TODO 后续产品上,看是否需要增加一个未知的状态,否则新接入的集群,因为新接入的集群,数据存在延迟
ClusterPhysState physState = new ClusterPhysState(0, 0, clusterPhyList.size());
for (ClusterPhy clusterPhy: clusterPhyList) {
KafkaController kafkaController = controllerMap.get(clusterPhy.getId());
if (kafkaController != null && !kafkaController.alive()) {
// 存在明确的信息表示controller挂了
physState.setDownCount(physState.getDownCount() + 1);
} else if ((System.currentTimeMillis() - clusterPhy.getCreateTime().getTime() >= 5 * 60 * 1000) && kafkaController == null) {
// 集群接入时间是在近5分钟内同时kafkaController信息不存在则设置为down
physState.setDownCount(physState.getDownCount() + 1);
} else {
// 其他情况都设置为alive
physState.setLiveCount(physState.getLiveCount() + 1);
}
}
return physState;
}
@Override
public PaginationResult<ClusterPhyDashboardVO> getClusterPhysDashboard(MultiClusterDashboardDTO dto) {
// 获取集群
List<ClusterPhy> clusterPhyList = clusterPhyService.listAllClusters();
// 转为vo格式方便后续进行分页筛选等
List<ClusterPhyDashboardVO> voList = ConvertUtil.list2List(clusterPhyList, ClusterPhyDashboardVO.class);
// TODO 后续产品上,看是否需要增加一个未知的状态,否则新接入的集群,因为新接入的集群,数据存在延迟
// 获取集群controller信息并补充到vo中,
Map<Long, KafkaController> controllerMap = kafkaControllerService.getKafkaControllersFromDB(clusterPhyList.stream().map(elem -> elem.getId()).collect(Collectors.toList()), false);
for (ClusterPhyDashboardVO vo: voList) {
KafkaController kafkaController = controllerMap.get(vo.getId());
if (kafkaController != null && !kafkaController.alive()) {
// 存在明确的信息表示controller挂了
vo.setAlive(Constant.DOWN);
} else if ((System.currentTimeMillis() - vo.getCreateTime().getTime() >= 5 * 60L * 1000L) && kafkaController == null) {
// 集群接入时间是在近5分钟内同时kafkaController信息不存在则设置为down
vo.setAlive(Constant.DOWN);
} else {
// 其他情况都设置为alive
vo.setAlive(Constant.ALIVE);
}
}
// 本地分页过滤
voList = this.getAndPagingDataInLocal(voList, dto);
// ES分页过滤
PaginationResult<ClusterMetrics> latestMetricsResult = this.getAndPagingClusterWithLatestMetricsFromCache(voList, dto);
if (latestMetricsResult.failed()) {
log.error("method=getClusterPhysDashboard||pagingData={}||result={}||errMsg=search es data failed.", dto, latestMetricsResult);
return PaginationResult.buildFailure(latestMetricsResult, dto);
}
// 获取历史指标
Result<List<MetricMultiLinesVO>> linesMetricResult = clusterMetricService.listClusterMetricsFromES(
this.buildMetricsClusterPhyDTO(
latestMetricsResult.getData().getBizData().stream().map(elem -> elem.getClusterPhyId()).collect(Collectors.toList()),
dto.getMetricLines()
));
// 组装最终数据
return PaginationResult.buildSuc(
ClusterVOConverter.convert2ClusterPhyDashboardVOList(voList, linesMetricResult.getData(), latestMetricsResult.getData().getBizData()),
latestMetricsResult
);
}
/**************************************************** private method ****************************************************/
private List<ClusterPhyDashboardVO> getAndPagingDataInLocal(List<ClusterPhyDashboardVO> voList, MultiClusterDashboardDTO dto) {
// 时间排序
if ("createTime".equals(dto.getSortField())) {
voList = PaginationUtil.pageBySort(voList, "createTime", dto.getSortType(), "name", dto.getSortType());
}
// 名称搜索
if (!ValidateUtils.isBlank(dto.getSearchKeywords())) {
voList = PaginationUtil.pageByFuzzyFilter(voList, dto.getSearchKeywords(), Arrays.asList("name"));
}
// 精确搜索
return PaginationUtil.pageByPreciseFilter(voList, dto.getPreciseFilterDTOList());
}
private PaginationResult<ClusterMetrics> getAndPagingClusterWithLatestMetricsFromCache(List<ClusterPhyDashboardVO> voList, MultiClusterDashboardDTO dto) {
// 获取所有的metrics
List<ClusterMetrics> metricsList = new ArrayList<>();
for (ClusterPhyDashboardVO vo: voList) {
ClusterMetrics clusterMetrics = clusterMetricService.getLatestMetricsFromCache(vo.getId());
if (!clusterMetrics.getMetrics().containsKey(ClusterMetricVersionItems.CLUSTER_METRIC_HEALTH_SCORE)) {
Float alive = clusterMetrics.getMetrics().get(ClusterMetricVersionItems.CLUSTER_METRIC_ALIVE);
// 如果集群没有健康分,则设置一个默认的健康分数值
clusterMetrics.putMetric(ClusterMetricVersionItems.CLUSTER_METRIC_HEALTH_SCORE,
(alive != null && alive <= 0)? 0.0f: Constant.DEFAULT_CLUSTER_HEALTH_SCORE.floatValue()
);
}
metricsList.add(clusterMetrics);
}
// 范围搜索
metricsList = (List<ClusterMetrics>) PaginationMetricsUtil.rangeFilterMetrics(metricsList, dto.getRangeFilterDTOList());
// 精确搜索
metricsList = (List<ClusterMetrics>) PaginationMetricsUtil.preciseFilterMetrics(metricsList, dto.getPreciseFilterDTOList());
// 排序
PaginationMetricsUtil.sortMetrics(metricsList, dto.getSortField(), "clusterPhyId", dto.getSortType());
// 分页
return PaginationUtil.pageBySubData(metricsList, dto);
}
private MetricsClusterPhyDTO buildMetricsClusterPhyDTO(List<Long> clusterIdList, MetricDTO metricDTO) {
MetricsClusterPhyDTO dto = ConvertUtil.obj2Obj(metricDTO, MetricsClusterPhyDTO.class);
dto.setClusterPhyIds(clusterIdList);
return dto;
}
}

View File

@@ -0,0 +1,34 @@
package com.xiaojukeji.know.streaming.km.biz.group;
import com.xiaojukeji.know.streaming.km.common.bean.dto.group.GroupOffsetResetDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationSortDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.TopicPartitionKS;
import com.xiaojukeji.know.streaming.km.common.bean.vo.group.GroupTopicConsumedDetailVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.group.GroupTopicOverviewVO;
import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import java.util.List;
import java.util.Set;
public interface GroupManager {
PaginationResult<GroupTopicOverviewVO> pagingGroupMembers(Long clusterPhyId,
String topicName,
String groupName,
String searchTopicKeyword,
String searchGroupKeyword,
PaginationBaseDTO dto);
PaginationResult<GroupTopicConsumedDetailVO> pagingGroupTopicConsumedMetrics(Long clusterPhyId,
String topicName,
String groupName,
List<String> latestMetricNames,
PaginationSortDTO dto)throws NotExistException, AdminOperateException;
Result<Set<TopicPartitionKS>> listClusterPhyGroupPartitions(Long clusterPhyId, String groupName, Long startTime, Long endTime);
Result<Void> resetGroupOffsets(GroupOffsetResetDTO dto, String operator) throws Exception;
}

View File

@@ -0,0 +1,299 @@
package com.xiaojukeji.know.streaming.km.biz.group.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.biz.group.GroupManager;
import com.xiaojukeji.know.streaming.km.common.bean.dto.group.GroupOffsetResetDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationSortDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.partition.PartitionOffsetDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.group.GroupTopic;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.GroupMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.TopicPartitionKS;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupMemberPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.group.GroupTopicConsumedDetailVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.group.GroupTopicOverviewVO;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.enums.AggTypeEnum;
import com.xiaojukeji.know.streaming.km.common.enums.GroupOffsetResetEnum;
import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.PaginationMetricsUtil;
import com.xiaojukeji.know.streaming.km.common.utils.PaginationUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.group.GroupMetricService;
import com.xiaojukeji.know.streaming.km.core.service.group.GroupService;
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.GroupMetricVersionItems;
import com.xiaojukeji.know.streaming.km.persistence.es.dao.GroupMetricESDAO;
import org.apache.kafka.clients.admin.ConsumerGroupDescription;
import org.apache.kafka.clients.admin.MemberDescription;
import org.apache.kafka.clients.admin.OffsetSpec;
import org.apache.kafka.common.ConsumerGroupState;
import org.apache.kafka.common.TopicPartition;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.util.*;
import java.util.stream.Collectors;
@Component
public class GroupManagerImpl implements GroupManager {
private static final ILog log = LogFactory.getLog(GroupManagerImpl.class);
@Autowired
private TopicService topicService;
@Autowired
private GroupService groupService;
@Autowired
private PartitionService partitionService;
@Autowired
private GroupMetricService groupMetricService;
@Autowired
private GroupMetricESDAO groupMetricESDAO;
@Override
public PaginationResult<GroupTopicOverviewVO> pagingGroupMembers(Long clusterPhyId,
String topicName,
String groupName,
String searchTopicKeyword,
String searchGroupKeyword,
PaginationBaseDTO dto) {
PaginationResult<GroupMemberPO> paginationResult = groupService.pagingGroupMembers(clusterPhyId, topicName, groupName, searchTopicKeyword, searchGroupKeyword, dto);
if (paginationResult.failed()) {
return PaginationResult.buildFailure(paginationResult, dto);
}
if (!paginationResult.hasData()) {
return PaginationResult.buildSuc(dto);
}
// 获取指标
Result<List<GroupMetrics>> metricsListResult = groupMetricService.listLatestMetricsAggByGroupTopicFromES(
clusterPhyId,
paginationResult.getData().getBizData().stream().map(elem -> new GroupTopic(elem.getGroupName(), elem.getTopicName())).collect(Collectors.toList()),
Arrays.asList(GroupMetricVersionItems.GROUP_METRIC_LAG),
AggTypeEnum.MAX
);
if (metricsListResult.failed()) {
// 如果查询失败,则输出错误信息,但是依旧进行已有数据的返回
log.error("method=pagingGroupMembers||clusterPhyId={}||topicName={}||groupName={}||result={}||errMsg=search es failed", clusterPhyId, topicName, groupName, metricsListResult);
}
return PaginationResult.buildSuc(
this.convert2GroupTopicOverviewVOList(paginationResult.getData().getBizData(), metricsListResult.getData()),
paginationResult
);
}
@Override
public PaginationResult<GroupTopicConsumedDetailVO> pagingGroupTopicConsumedMetrics(Long clusterPhyId,
String topicName,
String groupName,
List<String> latestMetricNames,
PaginationSortDTO dto) throws NotExistException, AdminOperateException {
// 获取消费组消费的TopicPartition列表
Map<TopicPartition, Long> consumedOffsetMap = groupService.getGroupOffset(clusterPhyId, groupName);
List<Integer> partitionList = consumedOffsetMap.keySet()
.stream()
.filter(elem -> elem.topic().equals(topicName))
.map(elem -> elem.partition())
.collect(Collectors.toList());
Collections.sort(partitionList);
// 获取消费组当前运行信息
ConsumerGroupDescription groupDescription = groupService.getGroupDescription(clusterPhyId, groupName);
// 转换存储格式
Map<TopicPartition, MemberDescription> tpMemberMap = new HashMap<>();
for (MemberDescription description: groupDescription.members()) {
for (TopicPartition tp: description.assignment().topicPartitions()) {
tpMemberMap.put(tp, description);
}
}
// 获取指标
PaginationResult<GroupMetrics> metricsResult = this.pagingGroupTopicPartitionMetrics(clusterPhyId, groupName, topicName, partitionList, latestMetricNames, dto);
if (metricsResult.failed()) {
return PaginationResult.buildFailure(metricsResult, dto);
}
// 数据组装
List<GroupTopicConsumedDetailVO> voList = new ArrayList<>();
for (GroupMetrics groupMetrics: metricsResult.getData().getBizData()) {
GroupTopicConsumedDetailVO vo = new GroupTopicConsumedDetailVO();
vo.setTopicName(topicName);
vo.setPartitionId(groupMetrics.getPartitionId());
MemberDescription memberDescription = tpMemberMap.get(new TopicPartition(topicName, groupMetrics.getPartitionId()));
if (memberDescription != null) {
vo.setMemberId(memberDescription.consumerId());
vo.setHost(memberDescription.host());
vo.setClientId(memberDescription.clientId());
}
vo.setLatestMetrics(groupMetrics);
voList.add(vo);
}
return PaginationResult.buildSuc(voList, metricsResult);
}
@Override
public Result<Set<TopicPartitionKS>> listClusterPhyGroupPartitions(Long clusterPhyId, String groupName, Long startTime, Long endTime) {
try {
return Result.buildSuc(groupMetricESDAO.listGroupTopicPartitions(clusterPhyId, groupName, startTime, endTime));
}catch (Exception e){
return Result.buildFailure(e.getMessage());
}
}
@Override
public Result<Void> resetGroupOffsets(GroupOffsetResetDTO dto, String operator) throws Exception {
Result<Void> rv = this.checkFieldLegal(dto);
if (rv.failed()) {
return rv;
}
ConsumerGroupDescription description = groupService.getGroupDescription(dto.getClusterId(), dto.getGroupName());
if (ConsumerGroupState.DEAD.equals(description.state()) && !dto.isCreateIfNotExist()) {
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, "group不存在, 重置失败");
}
if (!ConsumerGroupState.EMPTY.equals(description.state()) && !ConsumerGroupState.DEAD.equals(description.state())) {
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, String.format("group处于%s, 重置失败(仅Empty情况可重置)", description.state().name()));
}
// 获取offset
Result<Map<TopicPartition, Long>> offsetMapResult = this.getPartitionOffset(dto);
if (offsetMapResult.failed()) {
return Result.buildFromIgnoreData(offsetMapResult);
}
// 重置offset
return groupService.resetGroupOffsets(dto.getClusterId(), dto.getGroupName(), offsetMapResult.getData(), operator);
}
/**************************************************** private method ****************************************************/
private Result<Void> checkFieldLegal(GroupOffsetResetDTO dto) {
if (dto == null) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "参数为空");
}
Topic topic = topicService.getTopic(dto.getClusterId(), dto.getTopicName());
if (topic == null) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getTopicNotExist(dto.getClusterId(), dto.getTopicName()));
}
if (GroupOffsetResetEnum.PRECISE_OFFSET.getResetType() == dto.getResetType()
&& ValidateUtils.isEmptyList(dto.getOffsetList())) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "参数错误指定offset重置需传offset信息");
}
if (GroupOffsetResetEnum.PRECISE_TIMESTAMP.getResetType() == dto.getResetType()
&& ValidateUtils.isNull(dto.getTimestamp())) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "参数错误,指定时间重置需传时间信息");
}
return Result.buildSuc();
}
private Result<Map<TopicPartition, Long>> getPartitionOffset(GroupOffsetResetDTO dto) {
if (GroupOffsetResetEnum.PRECISE_OFFSET.getResetType() == dto.getResetType()) {
return Result.buildSuc(dto.getOffsetList().stream().collect(Collectors.toMap(
elem -> new TopicPartition(dto.getTopicName(), elem.getPartitionId()),
PartitionOffsetDTO::getOffset,
(key1 , key2) -> key2
)));
}
OffsetSpec offsetSpec = null;
if (GroupOffsetResetEnum.PRECISE_TIMESTAMP.getResetType() == dto.getResetType()) {
offsetSpec = OffsetSpec.forTimestamp(dto.getTimestamp());
} else if (GroupOffsetResetEnum.EARLIEST.getResetType() == dto.getResetType()) {
offsetSpec = OffsetSpec.earliest();
} else {
offsetSpec = OffsetSpec.latest();
}
return partitionService.getPartitionOffsetFromKafka(dto.getClusterId(), dto.getTopicName(), offsetSpec, dto.getTimestamp());
}
private List<GroupTopicOverviewVO> convert2GroupTopicOverviewVOList(List<GroupMemberPO> poList, List<GroupMetrics> metricsList) {
if (metricsList == null) {
metricsList = new ArrayList<>();
}
// <GroupName, <TopicName, GroupMetrics>>
Map<String, Map<String, GroupMetrics>> metricsMap = new HashMap<>();
metricsList.stream().forEach(elem -> {
metricsMap.putIfAbsent(elem.getGroup(), new HashMap<>());
metricsMap.get(elem.getGroup()).put(elem.getTopic(), elem);
});
List<GroupTopicOverviewVO> voList = new ArrayList<>();
for (GroupMemberPO po: poList) {
GroupTopicOverviewVO vo = ConvertUtil.obj2Obj(po, GroupTopicOverviewVO.class);
if (vo == null) {
continue;
}
GroupMetrics metrics = metricsMap.getOrDefault(po.getGroupName(), new HashMap<>()).get(po.getTopicName());
if (metrics != null) {
vo.setMaxLag(ConvertUtil.Float2Long(metrics.getMetrics().get(GroupMetricVersionItems.GROUP_METRIC_LAG)));
}
voList.add(vo);
}
return voList;
}
private PaginationResult<GroupMetrics> pagingGroupTopicPartitionMetrics(Long clusterPhyId,
String groupName,
String topicName,
List<Integer> partitionIdList,
List<String> latestMetricNames,
PaginationSortDTO dto) {
// 获取Group指标信息
Result<List<GroupMetrics>> groupMetricsResult = groupMetricService.listPartitionLatestMetricsFromES(
clusterPhyId,
groupName,
topicName,
latestMetricNames == null? Arrays.asList(): latestMetricNames
);
// 转换Group指标
List<GroupMetrics> esGroupMetricsList = groupMetricsResult.hasData()? groupMetricsResult.getData(): new ArrayList<>();
Map<Integer, GroupMetrics> esMetricsMap = new HashMap<>();
for (GroupMetrics groupMetrics: esGroupMetricsList) {
esMetricsMap.put(groupMetrics.getPartitionId(), groupMetrics);
}
List<GroupMetrics> allPartitionGroupMetrics = new ArrayList<>();
for (Integer partitionId: partitionIdList) {
allPartitionGroupMetrics.add(esMetricsMap.getOrDefault(partitionId, new GroupMetrics(clusterPhyId, groupName, topicName, partitionId)));
}
return PaginationUtil.pageBySubData(
(List<GroupMetrics>)PaginationMetricsUtil.sortMetrics(allPartitionGroupMetrics, dto.getSortField(), "partitionId", dto.getSortType()),
dto
);
}
}

View File

@@ -0,0 +1,13 @@
package com.xiaojukeji.know.streaming.km.biz.kafkaacl;
import com.xiaojukeji.know.streaming.km.common.bean.dto.acl.AclAtomDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import java.util.List;
/**
*
*/
public interface KafkaAclManager {
Result<Void> batchCreateKafkaAcl(List<AclAtomDTO> dtoList, String operator);
}

View File

@@ -0,0 +1,36 @@
package com.xiaojukeji.know.streaming.km.biz.kafkaacl.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.biz.kafkaacl.KafkaAclManager;
import com.xiaojukeji.know.streaming.km.common.bean.dto.acl.AclAtomDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.converter.KafkaAclConverter;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.core.service.acl.OpKafkaAclService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.List;
@Service
public class KafkaAclManagerImpl implements KafkaAclManager {
private static final ILog log = LogFactory.getLog(KafkaAclManagerImpl.class);
@Autowired
private OpKafkaAclService opKafkaAclService;
@Override
public Result<Void> batchCreateKafkaAcl(List<AclAtomDTO> dtoList, String operator) {
log.debug("method=batchCreateKafkaAcl||dtoList={}||operator={}", ConvertUtil.obj2Json(dtoList), operator);
for (AclAtomDTO dto: dtoList) {
Result<Void> rv = opKafkaAclService.createKafkaAcl(KafkaAclConverter.convert2ACLAtomParam(dto), operator);
if (rv.failed()) {
return rv;
}
}
return Result.buildSuc();
}
}

View File

@@ -0,0 +1,23 @@
package com.xiaojukeji.know.streaming.km.biz.kafkauser;
import com.xiaojukeji.know.streaming.km.common.bean.dto.kafkauser.ClusterKafkaUserTokenDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.vo.kafkauser.KafkaUserTokenVO;
public interface KafkaUserManager {
/**
* 新增KafkaUser
*/
Result<Void> createKafkaUserWithTokenEncrypted(ClusterKafkaUserTokenDTO dto, String operator);
/**
* 修改KafkaUser
*/
Result<Void> modifyKafkaUserWithTokenEncrypted(ClusterKafkaUserTokenDTO dto, String operator);
/**
* 查看密码
*/
Result<KafkaUserTokenVO> getKafkaUserTokenWithEncrypt(Long clusterPhyId, String kafkaUser);
}

View File

@@ -0,0 +1,99 @@
package com.xiaojukeji.know.streaming.km.biz.kafkauser.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.didiglobal.logi.security.util.PWEncryptUtil;
import com.xiaojukeji.know.streaming.km.biz.kafkauser.KafkaUserManager;
import com.xiaojukeji.know.streaming.km.common.bean.dto.kafkauser.ClusterKafkaUserTokenDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafkauser.KafkaUser;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.kafkauser.KafkaUserReplaceParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.po.KafkaUserPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.kafkauser.KafkaUserTokenVO;
import com.xiaojukeji.know.streaming.km.common.converter.KafkaUserVOConverter;
import com.xiaojukeji.know.streaming.km.common.enums.cluster.ClusterAuthTypeEnum;
import com.xiaojukeji.know.streaming.km.common.utils.AESUtils;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.kafkauser.KafkaUserService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
@Service
public class KafkaUserManagerImpl implements KafkaUserManager {
private static final ILog log = LogFactory.getLog(KafkaUserManagerImpl.class);
@Autowired
private KafkaUserService kafkaUserService;
@Override
public Result<Void> createKafkaUserWithTokenEncrypted(ClusterKafkaUserTokenDTO dto, String operator) {
if (!ClusterAuthTypeEnum.isScram(dto.getAuthType())) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "不支持该认证方式");
}
String rawToken = AESUtils.decrypt(dto.getToken());
if (rawToken == null) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "KafkaUser密钥解密失败");
}
return kafkaUserService.createKafkaUser(new KafkaUserReplaceParam(dto.getClusterId(), dto.getKafkaUser(), rawToken), operator);
}
@Override
public Result<Void> modifyKafkaUserWithTokenEncrypted(ClusterKafkaUserTokenDTO dto, String operator) {
if (!ClusterAuthTypeEnum.isScram(dto.getAuthType())) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "不支持该认证方式");
}
String rawToken = AESUtils.decrypt(dto.getToken());
if (rawToken == null) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "KafkaUser密钥解密失败");
}
return kafkaUserService.modifyKafkaUser(new KafkaUserReplaceParam(dto.getClusterId(), dto.getKafkaUser(), rawToken), operator);
}
@Override
public Result<KafkaUserTokenVO> getKafkaUserTokenWithEncrypt(Long clusterPhyId, String kafkaUser) {
Result<KafkaUserTokenVO> voResult = this.getKafkaUserToken(clusterPhyId, kafkaUser);
if (voResult.failed() || ValidateUtils.isNull(voResult.getData().getToken())) {
// 获取失败 或 无密钥信息,则直接返回
return voResult;
}
// 对Token进行加密
voResult.getData().setToken(AESUtils.encrypt(voResult.getData().getToken()));
return voResult;
}
/**************************************************** private method ****************************************************/
private Result<KafkaUserTokenVO> getKafkaUserToken(Long clusterPhyId, String kafkaUser) {
Result<KafkaUser> kafkaUserResult = kafkaUserService.getKafkaUserFromKafka(clusterPhyId, kafkaUser);
if (kafkaUserResult.failed()) {
return Result.buildFromIgnoreData(kafkaUserResult);
}
KafkaUserPO kafkaUserPO = kafkaUserService.getKafkaUserFromDB(clusterPhyId, kafkaUser);
if (kafkaUserPO == null) {
// DB中无数据则直接返回kafka中查询到的数据
return Result.buildSuc(KafkaUserVOConverter.convert2KafkaUserTokenVO(kafkaUserResult.getData(), false, null));
}
try {
String rawToken = PWEncryptUtil.decode(kafkaUserPO.getToken());
if (kafkaUserService.isTokenEqual2CredentialProps(clusterPhyId, kafkaUserResult.getData().getProps(), rawToken)) {
// 与DB中数据一致
return Result.buildSuc(KafkaUserVOConverter.convert2KafkaUserTokenVO(kafkaUserResult.getData(), true, rawToken));
} else {
// 与DB中数据不一致
return Result.buildSuc(KafkaUserVOConverter.convert2KafkaUserTokenVO(kafkaUserResult.getData(), false, rawToken));
}
} catch (Exception e) {
// DB中数据不一致则直接返回kafka中查询到的数据
return Result.buildSuc(KafkaUserVOConverter.convert2KafkaUserTokenVO(kafkaUserResult.getData(), false, null));
}
}
}

View File

@@ -0,0 +1,33 @@
package com.xiaojukeji.know.streaming.km.biz.reassign;
import com.xiaojukeji.know.streaming.km.common.bean.dto.reassign.change.CreateChangeReplicasPlanDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.reassign.move.CreateMoveReplicaPlanDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.reassign.ReassignTopicOverviewDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.vo.reassign.plan.ReassignPlanVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.reassign.ReassignTopicOverviewVO;
import java.util.List;
public interface ReassignManager {
/**
* 创建迁移计划Json
* @param dtoList
* @return
*/
Result<ReassignPlanVO> createReassignmentPlanJson(List<CreateMoveReplicaPlanDTO> dtoList);
/**
* 创建副本变更Json
* @param dtoList
* @return
*/
Result<ReassignPlanVO> createReplicaChangePlanJson(List<CreateChangeReplicasPlanDTO> dtoList);
/**
* 迁移Topic的信息
* @param dto
* @return
*/
Result<List<ReassignTopicOverviewVO>> getReassignmentTopicsOverview(ReassignTopicOverviewDTO dto);
}

View File

@@ -0,0 +1,165 @@
package com.xiaojukeji.know.streaming.km.biz.reassign.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.biz.reassign.ReassignManager;
import com.xiaojukeji.know.streaming.km.common.bean.dto.reassign.change.CreateChangeReplicasPlanDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.reassign.move.CreateMoveReplicaPlanDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.reassign.ReassignTopicOverviewDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.reassign.ReassignPlan;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.reassign.plan.ReassignPlanVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.reassign.ReassignTopicOverviewVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.reassign.plan.ReassignTopicPlanVO;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.converter.ReassignVOConverter;
import com.xiaojukeji.know.streaming.km.common.enums.AggTypeEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.reassign.ReassignService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicMetricService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.TopicMetricVersionItems;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
@Component
public class ReassignManagerImpl implements ReassignManager {
private static final ILog log = LogFactory.getLog(ReassignManagerImpl.class);
@Autowired
private ReassignService reassignService;
@Autowired
private TopicService topicService;
@Autowired
private TopicMetricService topicMetricService;
@Override
public Result<ReassignPlanVO> createReassignmentPlanJson(List<CreateMoveReplicaPlanDTO> dtoList) {
if (ValidateUtils.isEmptyList(dtoList)) {
return Result.buildSuc(new ReassignPlanVO(new ArrayList<>()));
}
List<ReassignTopicPlanVO> topicPlanList = new ArrayList<>();
for (CreateMoveReplicaPlanDTO planDTO: dtoList) {
Result<ReassignPlan> planResult = reassignService.generateReassignmentJson(
planDTO.getClusterId(),
planDTO.getTopicName(),
planDTO.getPartitionIdList(),
planDTO.getBrokerIdList(),
planDTO.getEnableRackAwareness()
);
if (planResult.failed()) {
// 出错则直接返回错误
return Result.buildFromIgnoreData(planResult);
}
// 转换格式
topicPlanList.add(ReassignVOConverter.convert2ReassignTopicPlanVO(planResult.getData()));
}
return Result.buildSuc(new ReassignPlanVO(topicPlanList));
}
@Override
public Result<ReassignPlanVO> createReplicaChangePlanJson(List<CreateChangeReplicasPlanDTO> dtoList) {
if (ValidateUtils.isEmptyList(dtoList)) {
return Result.buildSuc(new ReassignPlanVO(new ArrayList<>()));
}
List<ReassignTopicPlanVO> topicPlanList = new ArrayList<>();
for (CreateChangeReplicasPlanDTO planDTO: dtoList) {
Result<ReassignPlan> planResult = reassignService.generateReplicaChangeReassignmentJson(
planDTO.getClusterId(),
planDTO.getTopicName(),
planDTO.getNewReplicaNum(),
planDTO.getBrokerIdList()
);
if (planResult.failed()) {
// 出错则直接返回错误
return Result.buildFromIgnoreData(planResult);
}
// 转换格式
topicPlanList.add(ReassignVOConverter.convert2ReassignTopicPlanVO(planResult.getData()));
}
return Result.buildSuc(new ReassignPlanVO(topicPlanList));
}
@Override
public Result<List<ReassignTopicOverviewVO>> getReassignmentTopicsOverview(ReassignTopicOverviewDTO dto) {
Map<String, Topic> topicMap = topicService.listTopicsFromDB(dto.getClusterId()).stream().collect(Collectors.toMap(Topic::getTopicName, Function.identity()));
Map<String, ReassignTopicOverviewVO> voMap = new HashMap<>();
for (String topicName: dto.getTopicNameList()) {
Topic topic = topicMap.get(topicName);
if (topic == null) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getTopicNotExist(dto.getClusterId(), topicName));
}
ReassignTopicOverviewVO vo = ConvertUtil.obj2Obj(topic, ReassignTopicOverviewVO.class);
vo.setPartitionIdList(new ArrayList<>(topic.getPartitionMap().keySet()));
vo.setRetentionMs(topic.getRetentionMs());
vo.setLatestDaysAvgBytesInList(new ArrayList<>());
vo.setLatestDaysMaxBytesInList(new ArrayList<>());
voMap.put(topicName, vo);
}
Long now = System.currentTimeMillis();
// 补充近三天指标
for (int idx = 0; idx < 3; ++idx) {
Long startTime = now - (idx + 1) * 24L * 60L * 60L * 1000L;
Long endTime = now - idx * 24L * 60L * 60L * 1000L;
// 查询avg指标
Result<Map<String, MetricPointVO>> avgMetricMapResult = topicMetricService.getAggMetricPointFromES(
dto.getClusterId(),
dto.getTopicNameList(),
TopicMetricVersionItems.TOPIC_METRIC_BYTES_IN,
AggTypeEnum.AVG,
startTime,
endTime
);
Map<String, MetricPointVO> avgMetricMap = avgMetricMapResult.hasData()? avgMetricMapResult.getData(): new HashMap<>();
avgMetricMap.values().forEach(elem -> elem.setTimeStamp(endTime));
// 查询max指标
Result<Map<String, MetricPointVO>> maxMetricMapResult = topicMetricService.getAggMetricPointFromES(
dto.getClusterId(),
dto.getTopicNameList(),
TopicMetricVersionItems.TOPIC_METRIC_BYTES_IN,
AggTypeEnum.MAX,
startTime,
endTime
);
Map<String, MetricPointVO> maxMetricMap = maxMetricMapResult.hasData()? maxMetricMapResult.getData(): new HashMap<>();
// 补充到vo中
this.supplyLatestMetrics(voMap, avgMetricMap, maxMetricMap);
}
return Result.buildSuc(new ArrayList<>(voMap.values()));
}
/**************************************************** private method ****************************************************/
private void supplyLatestMetrics(Map<String, ReassignTopicOverviewVO> voMap,
Map<String, MetricPointVO> avgMetricMap,
Map<String, MetricPointVO> maxMetricMap) {
for (Map.Entry<String, ReassignTopicOverviewVO> entry: voMap.entrySet()) {
entry.getValue().getLatestDaysAvgBytesInList().add(avgMetricMap.get(entry.getKey()));
entry.getValue().getLatestDaysMaxBytesInList().add(maxMetricMap.get(entry.getKey()));
}
}
}

View File

@@ -0,0 +1,12 @@
package com.xiaojukeji.know.streaming.km.biz.self;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.vo.self.SelfMetricsVO;
import java.util.Properties;
public interface SelfManager {
Result<SelfMetricsVO> metrics();
Result<Properties> version();
}

View File

@@ -0,0 +1,61 @@
package com.xiaojukeji.know.streaming.km.biz.self.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.didiglobal.logi.security.common.vo.user.UserBriefVO;
import com.didiglobal.logi.security.service.UserService;
import com.xiaojukeji.know.streaming.km.biz.self.SelfManager;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.vo.self.SelfMetricsVO;
import com.xiaojukeji.know.streaming.km.common.utils.GitPropUtil;
import com.xiaojukeji.know.streaming.km.common.utils.NetUtils;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.km.KmNodeService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.util.List;
import java.util.Properties;
@Component
public class SelfManagerImpl implements SelfManager {
private static final ILog log = LogFactory.getLog(SelfManagerImpl.class);
@Autowired
private BrokerService brokerService;
@Autowired
private ClusterPhyService clusterPhyService;
@Autowired
private UserService userService;
@Autowired
private KmNodeService kmNodeService;
@Override
public Result<SelfMetricsVO> metrics() {
SelfMetricsVO vo = new SelfMetricsVO();
// ks自身信息
vo.setKsIp(NetUtils.localIp());
vo.setKsClusterKey(NetUtils.localMac());
List<UserBriefVO> userBriefVOList = userService.getAllUserBriefList();
vo.setKsUserCount(ValidateUtils.isNull(userBriefVOList)? 0: userBriefVOList.size());
vo.setKsServerIps(kmNodeService.listKmHosts());
// 纳管集群信息
vo.setKafkaClusterCount(clusterPhyService.listAllClusters().size());
vo.setKafkaBrokerCount(brokerService.countAllBrokers());
return Result.buildSuc(vo);
}
@Override
public Result<Properties> version() {
return Result.buildSuc(GitPropUtil.getProps());
}
}

View File

@@ -0,0 +1,22 @@
package com.xiaojukeji.know.streaming.km.biz.topic;
import com.xiaojukeji.know.streaming.km.common.bean.dto.topic.TopicCreateDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.topic.TopicExpansionDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
public interface OpTopicManager {
/**
* 创建Topic
*/
Result<Void> createTopic(TopicCreateDTO dto, String operator);
/**
* 删除Topic
*/
Result<Void> deleteTopicCombineRes(Long clusterPhyId, String topicName, String operator);
/**
* 扩分区
*/
Result<Void> expandTopic(TopicExpansionDTO dto, String operator);
}

View File

@@ -0,0 +1,15 @@
package com.xiaojukeji.know.streaming.km.biz.topic;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.kafkaconfig.KafkaTopicDefaultConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import java.util.List;
public interface TopicConfigManager {
/**
* 获取Topic默认配置
* @param clusterPhyId 物理集群ID
* @return
*/
Result<List<KafkaTopicDefaultConfig>> getDefaultTopicConfig(Long clusterPhyId);
}

View File

@@ -0,0 +1,25 @@
package com.xiaojukeji.know.streaming.km.biz.topic;
import com.xiaojukeji.know.streaming.km.common.bean.dto.topic.TopicRecordDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicBrokersPartitionsSummaryVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicRecordVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicStateVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.broker.TopicBrokerAllVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.partition.TopicPartitionVO;
import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import java.util.List;
public interface TopicStateManager {
TopicBrokerAllVO getTopicBrokerAll(Long clusterPhyId, String topicName, String searchBrokerHost) throws NotExistException;
Result<List<TopicRecordVO>> getTopicMessages(Long clusterPhyId, String topicName, TopicRecordDTO dto) throws AdminOperateException;
Result<TopicStateVO> getTopicState(Long clusterPhyId, String topicName);
Result<List<TopicPartitionVO>> getTopicPartitions(Long clusterPhyId, String topicName, List<String> metricsNames);
Result<TopicBrokersPartitionsSummaryVO> getTopicBrokersPartitionsSummary(Long clusterPhyId, String topicName);
}

View File

@@ -0,0 +1,173 @@
package com.xiaojukeji.know.streaming.km.biz.topic.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.biz.topic.OpTopicManager;
import com.xiaojukeji.know.streaming.km.common.bean.dto.topic.TopicCreateDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.topic.TopicExpansionDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicCreateParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicPartitionExpandParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.common.utils.kafka.KafkaReplicaAssignUtil;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.topic.OpTopicService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import kafka.admin.AdminUtils;
import kafka.admin.BrokerMetadata;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.transaction.interceptor.TransactionAspectSupport;
import scala.Option;
import scala.collection.Seq;
import scala.jdk.javaapi.CollectionConverters;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
@Component
public class OpTopicManagerImpl implements OpTopicManager {
private static final ILog log = LogFactory.getLog(OpTopicManagerImpl.class);
@Autowired
private TopicService topicService;
@Autowired
private BrokerService brokerService;
@Autowired
private OpTopicService opTopicService;
@Autowired
private ClusterPhyService clusterPhyService;
@Override
public Result<Void> createTopic(TopicCreateDTO dto, String operator) {
log.info("method=createTopic||param={}||operator={}.", dto, operator);
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(dto.getClusterId());
if (clusterPhy == null) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getClusterPhyNotExist(dto.getClusterId()));
}
// 构造assignmentMap
scala.collection.Map<Object, Seq<Object>> rawAssignmentMap = AdminUtils.assignReplicasToBrokers(
this.buildBrokerMetadataSeq(dto.getClusterId(), dto.getBrokerIdList()),
dto.getPartitionNum(),
dto.getReplicaNum(),
-1,
-1
);
// 类型转换
Map<Integer, List<Integer>> assignmentMap = new HashMap<>();
rawAssignmentMap.
toStream().
foreach(elem -> assignmentMap.put(
(Integer) elem._1,
CollectionConverters.asJava(elem._2).stream().map(item -> (Integer)item).collect(Collectors.toList()))
);
// 创建Topic
return opTopicService.createTopic(
new TopicCreateParam(
dto.getClusterId(),
dto.getTopicName(),
new HashMap<String, String>((Map) dto.getProperties()),
assignmentMap,
dto.getDescription()
),
operator
);
}
@Override
public Result<Void> deleteTopicCombineRes(Long clusterPhyId, String topicName, String operator) {
// 删除Topic
Result<Void> rv = opTopicService.deleteTopic(new TopicParam(clusterPhyId, topicName), operator);
if (rv.failed()) {
return rv;
}
// 删除Topic相关的ACL信息
return Result.buildSuc();
}
@Override
@Transactional
public Result<Void> expandTopic(TopicExpansionDTO dto, String operator) {
Topic topic = topicService.getTopic(dto.getClusterId(), dto.getTopicName());
if (topic == null) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getTopicNotExist(dto.getClusterId(), dto.getTopicName()));
}
TopicPartitionExpandParam expandParam = new TopicPartitionExpandParam(
dto.getClusterId(),
dto.getTopicName(),
topic.getPartitionMap(),
this.generateNewPartitionAssignment(dto.getClusterId(), topic, dto.getBrokerIdList(), dto.getIncPartitionNum())
);
// 更新DB分区数信息, 其他信息交由后台任务进行更新
Result<Void> rv = topicService.updatePartitionNum(topic.getClusterPhyId(), topic.getTopicName(), topic.getPartitionNum() + dto.getIncPartitionNum());
if (rv.failed()){
return rv;
}
rv = opTopicService.expandTopic(expandParam, operator);
if (rv.failed()) {
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return rv;
}
return rv;
}
/**************************************************** private method ****************************************************/
private Seq<BrokerMetadata> buildBrokerMetadataSeq(Long clusterPhyId, final List<Integer> selectedBrokerIdList) {
// 选取Broker列表
List<Broker> brokerList = brokerService.listAliveBrokersFromDB(clusterPhyId).stream().filter( elem ->
selectedBrokerIdList == null || selectedBrokerIdList.contains(elem.getBrokerId())
).collect(Collectors.toList());
List<BrokerMetadata> brokerMetadataList = new ArrayList<>();
for (Broker broker: brokerList) {
brokerMetadataList.add(new BrokerMetadata(broker.getBrokerId(), Option.apply(broker.getRack())));
}
return CollectionConverters.asScala(brokerMetadataList);
}
private Map<Integer, List<Integer>> generateNewPartitionAssignment(Long clusterPhyId, Topic topic, List<Integer> brokerIdList, Integer incPartitionNum) {
if (ValidateUtils.isEmptyList(brokerIdList)) {
// 如果brokerId列表为空则获取当前集群存活的Broker列表
brokerIdList = brokerService.listAliveBrokersFromDB(clusterPhyId).stream().map( elem -> elem.getBrokerId()).collect(Collectors.toList());
}
Map<Integer, String> brokerRackMap = new HashMap<>();
for (Broker broker: brokerService.listAliveBrokersFromDB(clusterPhyId)) {
if (brokerIdList != null && !brokerIdList.contains(broker.getBrokerId())) {
continue;
}
brokerRackMap.put(broker.getBrokerId(), broker.getRack() == null? "": broker.getRack());
}
// 生成分配规则
return KafkaReplicaAssignUtil.generateNewPartitionAssignment(brokerRackMap, topic.getPartitionMap(), incPartitionNum);
}
}

View File

@@ -0,0 +1,95 @@
package com.xiaojukeji.know.streaming.km.biz.topic.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.biz.topic.TopicConfigManager;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.kafkaconfig.KafkaConfigDetail;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.kafkaconfig.KafkaTopicDefaultConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.broker.BrokerParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.converter.KafkaConfigConverter;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerConfigService;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicConfigService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import javax.annotation.PostConstruct;
import java.util.*;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.*;
@Component
public class TopicConfigManagerImpl extends BaseVersionControlService implements TopicConfigManager {
private static final ILog log = LogFactory.getLog(TopicConfigManagerImpl.class);
private static final String GET_DEFAULT_TOPIC_CONFIG = "getDefaultTopicConfig";
@Autowired
private BrokerService brokerService;
@Autowired
private BrokerConfigService brokerConfigService;
@Autowired
private TopicConfigService topicConfigService;
@Override
protected VersionItemTypeEnum getVersionItemType() {
return VersionItemTypeEnum.SERVICE_OP_TOPIC_CONFIG;
}
@PostConstruct
private void init() {
registerVCHandler(GET_DEFAULT_TOPIC_CONFIG, V_0_10_0_0, V_0_11_0_0, "getDefaultTopicConfigByLocal", this::getDefaultTopicConfigByLocal);
registerVCHandler(GET_DEFAULT_TOPIC_CONFIG, V_0_11_0_0, V_MAX, "getDefaultTopicConfigByClient", this::getDefaultTopicConfigByClient);
}
@Override
public Result<List<KafkaTopicDefaultConfig>> getDefaultTopicConfig(Long clusterPhyId) {
try {
List<Broker> aliveBrokerList = brokerService.listAliveBrokersFromDB(clusterPhyId);
Integer aliveBrokerId = null;
if (!aliveBrokerList.isEmpty()) {
aliveBrokerId = aliveBrokerList.get(0).getBrokerId();
}
return (Result<List<KafkaTopicDefaultConfig>>) doVCHandler(clusterPhyId, GET_DEFAULT_TOPIC_CONFIG, new BrokerParam(clusterPhyId, aliveBrokerId));
} catch (VCHandlerNotExistException e) {
return Result.buildFailure(e.getResultStatus());
}
}
private Result<List<KafkaTopicDefaultConfig>> getDefaultTopicConfigByLocal(VersionItemParam itemParam) {
BrokerParam brokerParam = (BrokerParam) itemParam;
return Result.buildSuc(KafkaConfigConverter.convert2KafkaTopicDefaultConfigList(
topicConfigService.getConfigNamesAndDocs(brokerParam.getClusterPhyId()),
new HashMap<>()
));
}
private Result<List<KafkaTopicDefaultConfig>> getDefaultTopicConfigByClient(VersionItemParam itemParam) {
BrokerParam brokerParam = (BrokerParam) itemParam;
Result<List<KafkaConfigDetail>> defaultConfigResult = brokerConfigService.getBrokerConfigDetailFromKafka(brokerParam.getClusterPhyId(), brokerParam.getBrokerId());
if (defaultConfigResult.failed()) {
// 获取配置错误,但是不直接返回
log.error("method=getDefaultTopicConfigByClient||param={}||result={}.", brokerParam, defaultConfigResult);
}
return Result.buildSuc(KafkaConfigConverter.convert2KafkaTopicDefaultConfigList(
topicConfigService.getConfigNamesAndDocs(brokerParam.getClusterPhyId()),
!defaultConfigResult.hasData()?
new HashMap<>():
defaultConfigResult.getData().stream().filter(elem -> !ValidateUtils.isNull(elem.getValue())).collect(Collectors.toMap(KafkaConfigDetail::getName, KafkaConfigDetail::getValue))
)
);
}
}

View File

@@ -0,0 +1,378 @@
package com.xiaojukeji.know.streaming.km.biz.topic.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.biz.topic.TopicStateManager;
import com.xiaojukeji.know.streaming.km.common.bean.dto.topic.TopicRecordDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.PartitionMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.TopicMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.partition.Partition;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
import com.xiaojukeji.know.streaming.km.common.bean.vo.broker.BrokerReplicaSummaryVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicBrokersPartitionsSummaryVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicRecordVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicStateVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.broker.TopicBrokerAllVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.broker.TopicBrokerSingleVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.partition.TopicPartitionVO;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.converter.PartitionConverter;
import com.xiaojukeji.know.streaming.km.common.converter.TopicVOConverter;
import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionMetricService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicConfigService;
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicMetricService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.TopicMetricVersionItems;
import org.apache.kafka.clients.admin.OffsetSpec;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.config.TopicConfig;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.time.Duration;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
@Component
public class TopicStateManagerImpl implements TopicStateManager {
private static final ILog log = LogFactory.getLog(TopicStateManagerImpl.class);
@Autowired
private TopicService topicService;
@Autowired
private BrokerService brokerService;
@Autowired
private PartitionService partitionService;
@Autowired
private PartitionMetricService partitionMetricService;
@Autowired
private TopicMetricService topicMetricService;
@Autowired
private ClusterPhyService clusterPhyService;
@Autowired
private TopicConfigService topicConfigService;
@Override
public TopicBrokerAllVO getTopicBrokerAll(Long clusterPhyId, String topicName, String searchBrokerHost) throws NotExistException {
Topic topic = topicService.getTopic(clusterPhyId, topicName);
List<Partition> partitionList = partitionService.listPartitionByTopic(clusterPhyId, topicName);
Map<Integer, List<Partition>> brokerIdPartitionListMap = this.convert2BrokerIdPartitionListMap(partitionList);
Map<Integer, Broker> brokerMap = brokerService.listAllBrokerByTopic(clusterPhyId, topicName).stream().collect(Collectors.toMap(Broker::getBrokerId, Function.identity()));
TopicBrokerAllVO allVO = new TopicBrokerAllVO();
allVO.setTotal(topic.getBrokerIdSet().size());
allVO.setLive((int)brokerMap.values().stream().filter(elem -> elem.alive()).count());
allVO.setDead(allVO.getTotal() - allVO.getLive());
allVO.setPartitionCount(topic.getPartitionNum());
allVO.setBrokerPartitionStateList(new ArrayList<>());
allVO.setUnderReplicatedPartitionIdList(new ArrayList<>());
allVO.setNoLeaderPartitionIdList(new ArrayList<>());
// 补充无Leader及未同步的分区
for (Partition partition: partitionList) {
if (partition.getLeaderBrokerId() == null || Constant.INVALID_CODE == partition.getLeaderBrokerId()) {
allVO.getNoLeaderPartitionIdList().add(partition.getPartitionId());
}
if (partition.getInSyncReplicaList().size() != partition.getAssignReplicaList().size()) {
allVO.getUnderReplicatedPartitionIdList().add(partition.getPartitionId());
}
}
// 补充Broker中分区的详情
for (Integer brokerId: topic.getBrokerIdSet()) {
Broker broker = brokerMap.get(brokerId);
if (!ValidateUtils.isBlank(searchBrokerHost) && (broker == null || !broker.getHost().contains(searchBrokerHost))) {
// 不满足搜索的要求则直接略过该Broker
continue;
}
allVO.getBrokerPartitionStateList().add(this.getTopicBrokerSingle(clusterPhyId, topicName, brokerIdPartitionListMap, brokerId, broker));
}
return allVO;
}
@Override
public Result<List<TopicRecordVO>> getTopicMessages(Long clusterPhyId, String topicName, TopicRecordDTO dto) throws AdminOperateException {
long startTime = System.currentTimeMillis();
// 获取集群
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(clusterPhyId);
if (clusterPhy == null) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getClusterPhyNotExist(clusterPhyId));
}
// 获取分区offset
Result<Map<TopicPartition, Long>> endOffsetsMapResult = partitionService.getPartitionOffsetFromKafka(clusterPhyId, topicName, dto.getFilterPartitionId(), OffsetSpec.latest(), null);
if (endOffsetsMapResult.failed()) {
return Result.buildFromIgnoreData(endOffsetsMapResult);
}
List<TopicRecordVO> voList = new ArrayList<>();
KafkaConsumer<String, String> kafkaConsumer = null;
try {
// 创建kafka-consumer
kafkaConsumer = new KafkaConsumer<>(this.generateClientProperties(clusterPhy, dto.getMaxRecords()));
// 这里需要减去 KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS 是因为poll一次需要耗时如果这里不减去则可能会导致poll之后超过要求的时间
while (System.currentTimeMillis() - startTime + KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS <= dto.getPullTimeoutUnitMs() && voList.size() < dto.getMaxRecords()) {
for (Map.Entry<TopicPartition, Long> entry: endOffsetsMapResult.getData().entrySet()) {
kafkaConsumer.assign(Arrays.asList(entry.getKey()));
kafkaConsumer.seek(entry.getKey(), Math.max(0, entry.getValue() - dto.getMaxRecords()));
ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(Duration.ofMillis(KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS));
for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
if (this.checkIfIgnore(consumerRecord, dto.getFilterKey(), dto.getFilterValue())) {
continue;
}
voList.add(TopicVOConverter.convert2TopicRecordVO(topicName, consumerRecord));
if (voList.size() >= dto.getMaxRecords()) {
break;
}
}
// 超时则返回
if (System.currentTimeMillis() - startTime + KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS > dto.getPullTimeoutUnitMs()
|| voList.size() > dto.getMaxRecords()) {
break;
}
}
}
return Result.buildSuc(voList.subList(0, Math.min(dto.getMaxRecords(), voList.size())));
} catch (Exception e) {
log.error("method=getTopicMessages||clusterPhyId={}||topicName={}||param={}||errMsg=exception", clusterPhyId, topicName, dto, e);
throw new AdminOperateException(e.getMessage(), e, ResultStatus.KAFKA_OPERATE_FAILED);
} finally {
if (kafkaConsumer != null) {
try {
kafkaConsumer.close(Duration.ofMillis(KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS));
} catch (Exception e) {
// ignore
}
}
}
}
@Override
public Result<TopicStateVO> getTopicState(Long clusterPhyId, String topicName) {
Topic topic = topicService.getTopic(clusterPhyId, topicName);
if (topic == null) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getTopicNotExist(clusterPhyId, topicName));
}
List<Partition> partitionList = partitionService.listPartitionByTopic(clusterPhyId, topicName);
if (partitionList == null) {
partitionList = new ArrayList<>();
}
TopicStateVO vo = new TopicStateVO();
// 分区信息
vo.setPartitionCount(topic.getPartitionNum());
vo.setAllPartitionHaveLeader(partitionList.stream().filter(elem -> elem.getLeaderBrokerId().equals(-1)).count() <= 0);
// 副本信息
vo.setReplicaFactor(topic.getReplicaNum());
vo.setAllReplicaInSync(partitionList.stream().filter(elem -> elem.getInSyncReplicaList().size() != topic.getReplicaNum()).count() <= 0);
// 配置信息
Map<String, String> topicConfigMap = new HashMap<>();
Result<Map<String, String>> configResult = topicConfigService.getTopicConfigFromKafka(clusterPhyId, topicName);
if (configResult.hasData()) {
topicConfigMap = configResult.getData();
}
// 最小副本
Integer minIsr = ConvertUtil.string2Integer(topicConfigMap.get(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG));
if (minIsr == null) {
vo.setMinimumIsr(null);
vo.setAllPartitionMatchAtMinIsr(null);
} else {
vo.setMinimumIsr(minIsr);
vo.setAllPartitionMatchAtMinIsr(partitionList.stream().filter(elem -> elem.getInSyncReplicaList().size() < minIsr).count() <= 0);
}
// 压缩方式
String cleanupPolicy = topicConfigMap.get(TopicConfig.CLEANUP_POLICY_CONFIG);
if (ValidateUtils.isBlank(cleanupPolicy)) {
vo.setCompacted(null);
} else {
vo.setCompacted(cleanupPolicy.contains(TopicConfig.CLEANUP_POLICY_COMPACT));
}
return Result.buildSuc(vo);
}
@Override
public Result<List<TopicPartitionVO>> getTopicPartitions(Long clusterPhyId, String topicName, List<String> metricsNames) {
List<Partition> partitionList = partitionService.listPartitionByTopic(clusterPhyId, topicName);
if (ValidateUtils.isEmptyList(partitionList)) {
return Result.buildSuc();
}
Result<List<PartitionMetrics>> metricsResult = partitionMetricService.getLatestMetricsFromES(clusterPhyId, topicName, metricsNames);
if (metricsResult.failed()) {
// 仅打印错误日志,但是不直接返回错误
log.error(
"class=TopicStateManagerImpl||method=getTopicPartitions||clusterPhyId={}||topicName={}||result={}||msg=get metrics from es failed",
clusterPhyId, topicName, metricsResult
);
}
// 转map
Map<Integer, PartitionMetrics> metricsMap = new HashMap<>();
if (metricsResult.hasData()) {
for (PartitionMetrics metrics: metricsResult.getData()) {
metricsMap.put(metrics.getPartitionId(), metrics);
}
}
List<TopicPartitionVO> voList = new ArrayList<>();
for (Partition partition: partitionList) {
voList.add(TopicVOConverter.convert2TopicPartitionVO(partition, metricsMap.get(partition.getPartitionId())));
}
return Result.buildSuc(voList);
}
@Override
public Result<TopicBrokersPartitionsSummaryVO> getTopicBrokersPartitionsSummary(Long clusterPhyId, String topicName) {
List<Partition> partitionList = partitionService.listPartitionByTopic(clusterPhyId, topicName);
Map<Integer, Broker> brokerMap = brokerService.listAllBrokerByTopic(clusterPhyId, topicName).stream().collect(Collectors.toMap(Broker::getBrokerId, Function.identity()));
TopicBrokersPartitionsSummaryVO vo = new TopicBrokersPartitionsSummaryVO();
// Broker统计信息
vo.setBrokerCount(brokerMap.size());
vo.setLiveBrokerCount((int)brokerMap.values().stream().filter(elem -> elem.alive()).count());
vo.setDeadBrokerCount(vo.getBrokerCount() - vo.getLiveBrokerCount());
// Partition统计信息
vo.setPartitionCount(partitionList.size());
vo.setNoLeaderPartitionCount(0);
vo.setUnderReplicatedPartitionCount(0);
// 补充无Leader及未同步的分区
for (Partition partition: partitionList) {
if (partition.getLeaderBrokerId() == null || Constant.INVALID_CODE == partition.getLeaderBrokerId()) {
vo.setNoLeaderPartitionCount(vo.getNoLeaderPartitionCount() + 1);
}
if (partition.getInSyncReplicaList().size() != partition.getAssignReplicaList().size()) {
vo.setUnderReplicatedPartitionCount(vo.getUnderReplicatedPartitionCount() + 1);
}
}
return Result.buildSuc(vo);
}
/**************************************************** private method ****************************************************/
private boolean checkIfIgnore(ConsumerRecord<String, String> consumerRecord, String filterKey, String filterValue) {
if (filterKey != null && consumerRecord.key() == null) {
// ignore
return true;
}
if (filterKey != null && consumerRecord.key() != null && !consumerRecord.key().contains(filterKey)) {
return true;
}
if (filterValue != null && consumerRecord.value() == null) {
// ignore
return true;
}
if (filterValue != null && consumerRecord.value() != null && !consumerRecord.value().contains(filterValue)) {
return true;
}
return false;
}
private TopicBrokerSingleVO getTopicBrokerSingle(Long clusterPhyId,
String topicName,
Map<Integer, List<Partition>> brokerIdPartitionListMap,
Integer brokerId,
Broker broker) {
TopicBrokerSingleVO singleVO = new TopicBrokerSingleVO();
singleVO.setBrokerId(brokerId);
singleVO.setHost(broker != null? broker.getHost(): null);
singleVO.setAlive(broker != null && broker.alive());
TopicMetrics metrics = topicMetricService.getTopicLatestMetricsFromES(clusterPhyId, brokerId, topicName, Arrays.asList(
TopicMetricVersionItems.TOPIC_METRIC_BYTES_IN,
TopicMetricVersionItems.TOPIC_METRIC_BYTES_OUT
));
if (metrics != null) {
singleVO.setBytesInOneMinuteRate(metrics.getMetrics().get(TopicMetricVersionItems.TOPIC_METRIC_BYTES_IN));
singleVO.setBytesOutOneMinuteRate(metrics.getMetrics().get(TopicMetricVersionItems.TOPIC_METRIC_BYTES_OUT));
}
singleVO.setReplicaList(this.getBrokerReplicaSummaries(brokerId, brokerIdPartitionListMap.getOrDefault(brokerId, new ArrayList<>())));
return singleVO;
}
private List<BrokerReplicaSummaryVO> getBrokerReplicaSummaries(Integer brokerId, List<Partition> partitionList) {
List<BrokerReplicaSummaryVO> voList = new ArrayList<>();
for (Partition partition: partitionList) {
BrokerReplicaSummaryVO summaryVO = new BrokerReplicaSummaryVO();
summaryVO.setTopicName(partition.getTopicName());
summaryVO.setPartitionId(partition.getPartitionId());
summaryVO.setLeaderBrokerId(partition.getLeaderBrokerId());
summaryVO.setIsLeaderReplace(brokerId.equals(partition.getLeaderBrokerId()));
summaryVO.setInSync(partition.getInSyncReplicaList().contains(brokerId));
voList.add(summaryVO);
}
return voList;
}
private Map<Integer, List<Partition>> convert2BrokerIdPartitionListMap(List<Partition> partitionList) {
Map<Integer, List<Partition>> brokerIdPartitionListMap = new HashMap<>();
for (Partition partition: partitionList) {
for (Integer brokerId: partition.getAssignReplicaList()) {
brokerIdPartitionListMap.putIfAbsent(brokerId, new ArrayList<>());
brokerIdPartitionListMap.get(brokerId).add(partition);
}
}
return brokerIdPartitionListMap;
}
private Properties generateClientProperties(ClusterPhy clusterPhy, Integer maxPollRecords) {
Properties props = ConvertUtil.str2ObjByJson(clusterPhy.getClientProperties(), Properties.class);
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers());
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, Math.max(2, Math.min(5, maxPollRecords)));
return props;
}
}

View File

@@ -0,0 +1,52 @@
package com.xiaojukeji.know.streaming.km.biz.version;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.UserMetricConfigDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.vo.config.metric.UserMetricConfigVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.version.VersionItemVO;
import java.util.List;
import java.util.Map;
public interface VersionControlManager {
/**
* 查询当前所有的兼容性(指标、前端操作)配置信息
* @return
*/
Result<Map<String, VersionItemVO>> listAllVersionItem();
/**
* 获取当前ks所有支持的kafka版本
* @return
*/
Result<Map<String, Long>> listAllVersions();
/**
* 获取全部集群 clusterId 中类型为 type 的指标,不论支持不支持
* @param clusterId
* @param type
* @return
*/
Result<List<VersionItemVO>> listClusterVersionControlItem(Long clusterId, Integer type);
/**
* 获取当前用户设置的用于展示的指标配置
* @param clusterId
* @param type
* @param operator
* @return
*/
Result<List<UserMetricConfigVO>> listUserMetricItem(Long clusterId, Integer type, String operator);
/**
* 更新用户配置的指标项
* @param clusterId
* @param type
* @param userMetricConfigDTO
* @param operator
* @return
*/
Result<Void> updateUserMetricItem(Long clusterId, Integer type,
UserMetricConfigDTO userMetricConfigDTO, String operator);
}

View File

@@ -0,0 +1,247 @@
package com.xiaojukeji.know.streaming.km.biz.version.impl;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.TypeReference;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.didiglobal.logi.security.common.dto.config.ConfigDTO;
import com.didiglobal.logi.security.service.ConfigService;
import com.xiaojukeji.know.streaming.km.biz.version.VersionControlManager;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.UserMetricConfigDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.metric.UserMetricConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionControlItem;
import com.xiaojukeji.know.streaming.km.common.bean.vo.config.metric.UserMetricConfigVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.version.VersionItemVO;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.VersionUtil;
import com.xiaojukeji.know.streaming.km.core.service.version.VersionControlService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.util.StringUtils;
import javax.annotation.PostConstruct;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.V_MAX;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.BrokerMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.ClusterMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.GroupMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.TopicMetricVersionItems.*;
@Service
public class VersionControlManagerImpl implements VersionControlManager {
protected static final ILog LOGGER = LogFactory.getLog(VersionControlManagerImpl.class);
private static final String NOT_SUPPORT_DESC = "(该指标只支持%s及以上的版本)";
private static final String NOT_SUPPORT_DESC1 = "(该指标只支持%s及以上和%s以下的版本)";
private static final String CONFIG_GROUP = "UserMetricConfig";
Set<UserMetricConfig> defaultMetrics = new HashSet<>();
@PostConstruct
public void init(){
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_HEALTH_SCORE, true));
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_TOTAL_PRODUCE_REQUESTS, true));
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_FAILED_FETCH_REQ, true));
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_FAILED_PRODUCE_REQ, true));
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_MESSAGE_IN, true));
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_UNDER_REPLICA_PARTITIONS, true));
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_BYTES_IN, true));
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_BYTES_OUT, true));
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_BYTES_REJECTED, true));
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_HEALTH_SCORE, true));
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_REQ_QUEUE_SIZE, true));
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_RES_QUEUE_SIZE, true));
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_ACTIVE_CONTROLLER_COUNT, true));
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_PRODUCE_REQ, true));
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_TOTAL_LOG_SIZE, true));
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_CONNECTIONS, true));
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_MESSAGES_IN, true));
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_BYTES_IN, true));
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_BYTES_OUT, true));
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_GROUP_REBALANCES, true));
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_JOB_RUNNING, true));
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_PARTITIONS_NO_LEADER, true));
defaultMetrics.add(new UserMetricConfig(METRIC_CLUSTER.getCode(), CLUSTER_METRIC_PARTITION_URP, true));
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_OFFSET_CONSUMED, true));
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_LAG, true));
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_STATE, true));
defaultMetrics.add(new UserMetricConfig(METRIC_GROUP.getCode(), GROUP_METRIC_HEALTH_SCORE, true));
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_HEALTH_SCORE, true));
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_TOTAL_REQ_QUEUE, true));
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_TOTAL_RES_QUEUE, true));
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_MESSAGE_IN, true));
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_TOTAL_PRODUCE_REQ, true));
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_NETWORK_RPO_AVG_IDLE, true));
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_REQ_AVG_IDLE, true));
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_CONNECTION_COUNT, true));
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_BYTES_IN, true));
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_BYTES_OUT, true));
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_PARTITIONS_SKEW, true));
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_LEADERS_SKEW, true));
defaultMetrics.add(new UserMetricConfig(METRIC_BROKER.getCode(), BROKER_METRIC_UNDER_REPLICATE_PARTITION, true));
}
@Autowired
private VersionControlService versionControlService;
@Autowired
private ConfigService configService;
@Override
public Result<Map<String, VersionItemVO>> listAllVersionItem() {
List<VersionItemVO> allVersionItemVO = new ArrayList<>();
allVersionItemVO.addAll(ConvertUtil.list2List(versionControlService.listVersionControlItem(METRIC_TOPIC.getCode()), VersionItemVO.class));
allVersionItemVO.addAll(ConvertUtil.list2List(versionControlService.listVersionControlItem(METRIC_CLUSTER.getCode()), VersionItemVO.class));
allVersionItemVO.addAll(ConvertUtil.list2List(versionControlService.listVersionControlItem(METRIC_GROUP.getCode()), VersionItemVO.class));
allVersionItemVO.addAll(ConvertUtil.list2List(versionControlService.listVersionControlItem(METRIC_BROKER.getCode()), VersionItemVO.class));
allVersionItemVO.addAll(ConvertUtil.list2List(versionControlService.listVersionControlItem(METRIC_PARTITION.getCode()), VersionItemVO.class));
allVersionItemVO.addAll(ConvertUtil.list2List(versionControlService.listVersionControlItem(METRIC_REPLICATION.getCode()), VersionItemVO.class));
allVersionItemVO.addAll(ConvertUtil.list2List(versionControlService.listVersionControlItem(WEB_OP.getCode()), VersionItemVO.class));
Map<String, VersionItemVO> map = allVersionItemVO.stream().collect(
Collectors.toMap(u -> u.getType() + "@" + u.getName(), Function.identity() ));
return Result.buildSuc(map);
}
@Override
public Result<Map<String, Long>> listAllVersions() {
return Result.buildSuc(VersionEnum.allVersionsWithOutMax());
}
@Override
public Result<List<VersionItemVO>> listClusterVersionControlItem(Long clusterId, Integer type) {
List<VersionControlItem> allItem = versionControlService.listVersionControlItem(type);
List<VersionItemVO> versionItemVOS = new ArrayList<>();
for (VersionControlItem item : allItem){
VersionItemVO itemVO = ConvertUtil.obj2Obj(item, VersionItemVO.class);
boolean support = versionControlService.isClusterSupport(clusterId, item);
itemVO.setSupport(support);
itemVO.setDesc(itemSupportDesc(item, support));
versionItemVOS.add(itemVO);
}
return Result.buildSuc(versionItemVOS);
}
@Override
public Result<List<UserMetricConfigVO>> listUserMetricItem(Long clusterId, Integer type, String operator) {
Result<List<VersionItemVO>> ret = listClusterVersionControlItem(clusterId, type);
if(null == ret || ret.failed()){
return Result.buildFail();
}
List<UserMetricConfigVO> userMetricConfigVOS = new ArrayList<>();
List<VersionItemVO> allVersionItemVOs = ret.getData();
Set<UserMetricConfig> userMetricConfigs = getUserMetricConfig(operator);
Map<String, UserMetricConfig> userMetricConfigMap = userMetricConfigs.stream().collect(
Collectors.toMap(u -> u.getType() + "@" + u.getMetric(), Function.identity() ));
for(VersionItemVO itemVO : allVersionItemVOs){
UserMetricConfigVO userMetricConfigVO = new UserMetricConfigVO();
int itemType = itemVO.getType();
String metric = itemVO.getName();
UserMetricConfig umc = userMetricConfigMap.get(itemType + "@" + metric);
userMetricConfigVO.setSet(null != umc && umc.isSet());
userMetricConfigVO.setName(itemVO.getName());
userMetricConfigVO.setType(itemVO.getType());
userMetricConfigVO.setDesc(itemVO.getDesc());
userMetricConfigVO.setMinVersion(itemVO.getMinVersion());
userMetricConfigVO.setMaxVersion(itemVO.getMaxVersion());
userMetricConfigVO.setSupport(itemVO.getSupport());
userMetricConfigVOS.add(userMetricConfigVO);
}
LOGGER.debug("method=listUserMetricItem||clusterId={}||type={}||operator={}||userMetricConfigs={}||userMetricConfigVO={}",
clusterId, type, operator, JSON.toJSONString(userMetricConfigs), JSON.toJSONString(userMetricConfigVOS));
return Result.buildSuc(userMetricConfigVOS);
}
@Override
public Result<Void> updateUserMetricItem(Long clusterId, Integer type, UserMetricConfigDTO dto, String operator) {
Map<String, Boolean> metricsSetMap = dto.getMetricsSet();
if(null == metricsSetMap || metricsSetMap.isEmpty()){
return Result.buildSuc();
}
Set<UserMetricConfig> userMetricConfigs = getUserMetricConfig(operator);
for(Map.Entry<String, Boolean> metricAndShowEntry : metricsSetMap.entrySet()){
UserMetricConfig userMetricConfig = new UserMetricConfig(type, metricAndShowEntry.getKey(), metricAndShowEntry.getValue());
userMetricConfigs.remove(userMetricConfig);
userMetricConfigs.add(userMetricConfig);
}
ConfigDTO configDTO = new ConfigDTO();
configDTO.setValueGroup(CONFIG_GROUP);
configDTO.setValueName(operator);
configDTO.setValue(JSON.toJSONString(userMetricConfigs));
configDTO.setOperator(operator);
configDTO.setStatus(1);
com.didiglobal.logi.security.common.Result<Void> result = configService.editConfig(configDTO, operator);
LOGGER.debug("method=updateUserMetricItem||clusterId={}||type={}||operator={}||userMetricConfigs={}||metricsSetMap={}",
clusterId, type, operator, JSON.toJSONString(userMetricConfigs), JSON.toJSONString(metricsSetMap));
return Result.build(result.successed());
}
/**************************************************** private method ****************************************************/
private String itemSupportDesc(VersionControlItem item, boolean support){
if(support){return item.getDesc();}
boolean bMaxVersion = (item.getMaxVersion() == V_MAX.getVersionL().longValue());
String minVersion = VersionUtil.dNormailze(item.getMinVersion());
String maxVersion = VersionUtil.dNormailze(item.getMaxVersion());
if(bMaxVersion){
return item.getDesc() + String.format(NOT_SUPPORT_DESC, minVersion);
}
return item.getDesc() + String.format(NOT_SUPPORT_DESC1, minVersion, maxVersion);
}
private Set<UserMetricConfig> getUserMetricConfig(String operator){
String value = configService.stringSetting(CONFIG_GROUP, operator, "");
if(StringUtils.isEmpty(value)){
return defaultMetrics;
}
return JSON.parseObject(value, new TypeReference<Set<UserMetricConfig>>(){});
}
public static void main(String[] args){
Set<UserMetricConfig> defaultMetrics = new HashSet<>();
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_BYTES_IN, true));
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_MESSAGES, true));
defaultMetrics.add(new UserMetricConfig(METRIC_TOPIC.getCode(), TOPIC_METRIC_MESSAGES, true));
String value = JSON.toJSONString(defaultMetrics);
Set<UserMetricConfig> userMetricConfigs = JSON.parseObject(value, new TypeReference<Set<UserMetricConfig>>(){});
System.out.println(value);
}
}