Connect相关代码

This commit is contained in:
zengqiao
2022-12-06 18:13:44 +08:00
committed by EricZeng
parent fb5964af84
commit e8652d5db5
169 changed files with 6472 additions and 317 deletions

View File

@@ -24,6 +24,17 @@ public class CollectedMetricsLocalCache {
.maximumSize(10000)
.build();
private static final Cache<String, Float> connectClusterMetricsCache = Caffeine.newBuilder()
.expireAfterWrite(90, TimeUnit.SECONDS)
.maximumSize(10000)
.build();
private static final Cache<String, Float> connectorMetricsCache = Caffeine.newBuilder()
.expireAfterWrite(90, TimeUnit.SECONDS)
.maximumSize(10000)
.build();
public static Float getBrokerMetrics(String brokerMetricKey) {
return brokerMetricsCache.getIfPresent(brokerMetricKey);
}
@@ -59,6 +70,28 @@ public class CollectedMetricsLocalCache {
partitionMetricsCache.put(partitionMetricsKey, metricsList);
}
public static void putConnectClusterMetrics(String connectClusterMetricKey, Float value) {
if (value == null) {
return;
}
connectClusterMetricsCache.put(connectClusterMetricKey, value);
}
public static Float getConnectClusterMetrics(String connectClusterMetricKey) {
return connectClusterMetricsCache.getIfPresent(connectClusterMetricKey);
}
public static void putConnectorMetrics(String connectClusterMetricKey, Float value) {
if (value == null) {
return;
}
connectorMetricsCache.put(connectClusterMetricKey, value);
}
public static Float getConnectorMetrics(String connectClusterMetricKey) {
return connectorMetricsCache.getIfPresent(connectClusterMetricKey);
}
public static String genBrokerMetricKey(Long clusterPhyId, Integer brokerId, String metricName) {
return clusterPhyId + "@" + brokerId + "@" + metricName;
}
@@ -71,6 +104,16 @@ public class CollectedMetricsLocalCache {
return clusterPhyId + "@" + brokerId + "@" + topicName + "@" + partitionId + "@" + metricName;
}
public static String genConnectClusterMetricCacheKey(Long connectClusterId, String metricName) {
return connectClusterId + "@" + metricName;
}
public static String genConnectorMetricCacheKey(Long connectClusterId, String connectorName, String metricName) {
return connectClusterId + "@" + connectorName + '@' + metricName;
}
/**************************************************** private method ****************************************************/
private CollectedMetricsLocalCache() {
}
}

View File

@@ -17,6 +17,7 @@ import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistExcept
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.acl.KafkaAclService;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
@@ -47,7 +48,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.
@Service
public class KafkaAclServiceImpl extends BaseVersionControlService implements KafkaAclService {
public class KafkaAclServiceImpl extends BaseKafkaVersionControlService implements KafkaAclService {
private static final ILog log = LogFactory.getLog(KafkaAclServiceImpl.class);
private static final String ACL_GET_FROM_KAFKA = "getAclFromKafka";

View File

@@ -19,6 +19,7 @@ import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.core.service.acl.OpKafkaAclService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
@@ -47,7 +48,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.
@Service
public class OpKafkaAclServiceImpl extends BaseVersionControlService implements OpKafkaAclService {
public class OpKafkaAclServiceImpl extends BaseKafkaVersionControlService implements OpKafkaAclService {
private static final ILog log = LogFactory.getLog(OpKafkaAclServiceImpl.class);
private static final String ACL_CREATE = "createKafkaAcl";

View File

@@ -22,6 +22,7 @@ import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistExcept
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerConfigService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
@@ -42,7 +43,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.
@Service
public class BrokerConfigServiceImpl extends BaseVersionControlService implements BrokerConfigService {
public class BrokerConfigServiceImpl extends BaseKafkaVersionControlService implements BrokerConfigService {
private static final ILog log = LogFactory.getLog(BrokerConfigServiceImpl.class);
private static final String GET_BROKER_CONFIG = "getBrokerConfig";

View File

@@ -8,8 +8,8 @@ import com.github.benmanes.caffeine.cache.Caffeine;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.JmxConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.broker.BrokerParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.broker.BrokerParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
@@ -26,12 +26,12 @@ import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.jmx.JmxDAO;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaJMXClient;
import com.xiaojukeji.know.streaming.km.persistence.mysql.broker.BrokerDAO;
import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO;
import com.xiaojukeji.know.streaming.km.persistence.mysql.broker.BrokerDAO;
import kafka.zk.BrokerIdsZNode;
import org.apache.kafka.clients.admin.*;
import org.apache.kafka.common.Node;
@@ -54,7 +54,7 @@ import static com.xiaojukeji.know.streaming.km.common.jmx.JmxAttribute.VERSION;
import static com.xiaojukeji.know.streaming.km.common.jmx.JmxName.JMX_SERVER_APP_INFO;
@Service
public class BrokerServiceImpl extends BaseVersionControlService implements BrokerService {
public class BrokerServiceImpl extends BaseKafkaVersionControlService implements BrokerService {
private static final ILog log = LogFactory.getLog(BrokerServiceImpl.class);
private static final String BROKER_LOG_DIR = "getLogDir";

View File

@@ -16,7 +16,6 @@ import com.xiaojukeji.know.streaming.km.persistence.jmx.JmxDAO;
import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO;
import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.impl.KafkaZKDAOImpl;
import kafka.server.KafkaConfig;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.*;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
@@ -35,7 +34,6 @@ import java.util.*;
* @author zengqiao
* @date 22/02/28
*/
@Slf4j
@Service
public class ClusterValidateServiceImpl implements ClusterValidateService {
private static final ILog logger = LogFactory.getLog(KafkaZKDAOImpl.class);

View File

@@ -4,7 +4,6 @@ import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationSor
import com.xiaojukeji.know.streaming.km.common.bean.po.ControllerChangeLogPO;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ControllerChangeLogService;
import com.xiaojukeji.know.streaming.km.persistence.mysql.ControllerChangeLogDAO;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
@@ -12,7 +11,6 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
@Slf4j
@Service
public class ControllerChangeLogServiceImpl implements ControllerChangeLogService {

View File

@@ -0,0 +1,27 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.cluster;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.connect.MetricsConnectClustersDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectClusterMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
import java.util.List;
/**
* @author didi
*/
public interface ConnectClusterMetricService {
/**
* 从Kafka获取指标
*/
Result<ConnectClusterMetrics> collectConnectClusterMetricsFromKafkaWithCacheFirst(Long connectClusterPhyId, String metricName);
Result<ConnectClusterMetrics> collectConnectClusterMetricsFromKafka(Long connectClusterPhyId, String metricName);
/**
* 从ES中获取一段时间内聚合计算之后的指标线
*/
Result<List<MetricMultiLinesVO>> listConnectClusterMetricsFromES(Long clusterPhyId, MetricsConnectClustersDTO dto);
boolean isMetricName(String str);
}

View File

@@ -0,0 +1,34 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.cluster;
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.cluster.ConnectClusterDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectClusterMetadata;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import java.util.List;
/**
* Connect-Cluster
*/
public interface ConnectClusterService {
Long replaceAndReturnIdInDB(ConnectClusterMetadata metadata);
List<ConnectCluster> listByKafkaCluster(Long kafkaClusterPhyId);
List<ConnectCluster> listAllClusters();
ConnectCluster getById(Long connectClusterId);
ConnectCluster getByName(Long clusterPhyId, String connectClusterName);
String getClusterVersion(Long connectClusterId);
String getClusterName(Long connectClusterId);
Result<Void> deleteInDB(Long connectClusterId, String operator);
Result<Void> batchModifyInDB(List<ConnectClusterDTO> dtoList, String operator);
Boolean existConnectClusterDown(Long kafkaClusterPhyId);
}

View File

@@ -0,0 +1,270 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.cluster.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.google.common.collect.Table;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.connect.MetricsConnectClustersDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectClusterMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectWorkerMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.metric.connect.ConnectClusterMetricParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionJmxInfo;
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.BrokerMetricPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricLineVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.common.jmx.JmxConnectorWrap;
import com.xiaojukeji.know.streaming.km.common.utils.BeanUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.cache.CollectedMetricsLocalCache;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterMetricService;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseConnectorMetricService;
import com.xiaojukeji.know.streaming.km.persistence.connect.ConnectJMXClient;
import com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.ConnectClusterMetricESDAO;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
import javax.management.ObjectName;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus.*;
/**
* @author didi
*/
@Service
public class ConnectClusterMetricServiceImpl extends BaseConnectorMetricService implements ConnectClusterMetricService {
protected static final ILog LOGGER = LogFactory.getLog(ConnectClusterMetricServiceImpl.class);
public static final String CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_AVG = "getWorkerMetricAvg";
public static final String CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM = "getWorkerMetricSum";
public static final String CONNECT_CLUSTER_METHOD_DO_NOTHING = "doNothing";
@Autowired
private ConnectClusterService connectClusterService;
@Autowired
private ConnectClusterMetricESDAO connectClusterMetricESDAO;
@Autowired
private ConnectJMXClient connectJMXClient;
@Autowired
private WorkerService workerService;
@Override
protected VersionItemTypeEnum getVersionItemType() {
return VersionItemTypeEnum.METRIC_CONNECT_CLUSTER;
}
@Override
protected List<String> listMetricPOFields() {
return BeanUtil.listBeanFields(BrokerMetricPO.class);
}
@Override
protected void initRegisterVCHandler() {
registerVCHandler(CONNECT_CLUSTER_METHOD_DO_NOTHING, this::doNothing);
registerVCHandler(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_AVG, this::getConnectWorkerMetricAvg);
registerVCHandler(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM, this::getConnectWorkerMetricSum);
}
@Override
public Result<ConnectClusterMetrics> collectConnectClusterMetricsFromKafkaWithCacheFirst(Long connectClusterPhyId, String metric) {
String connectClusterMetricKey = CollectedMetricsLocalCache.genConnectClusterMetricCacheKey(connectClusterPhyId, metric);
Float keyValue = CollectedMetricsLocalCache.getConnectClusterMetrics(connectClusterMetricKey);
if (keyValue != null) {
ConnectClusterMetrics connectClusterMetrics = ConnectClusterMetrics.initWithMetric(connectClusterPhyId,metric,keyValue);
return Result.buildSuc(connectClusterMetrics);
}
Result<ConnectClusterMetrics> ret = this.collectConnectClusterMetricsFromKafka(connectClusterPhyId, metric);
if (ret == null || !ret.hasData()) {
return ret;
}
Map<String, Float> metricsMap = ret.getData().getMetrics();
for (Map.Entry<String, Float> entry : metricsMap.entrySet()) {
CollectedMetricsLocalCache.putConnectClusterMetrics(entry.getKey(), entry.getValue());
}
return ret;
}
@Override
public Result<ConnectClusterMetrics> collectConnectClusterMetricsFromKafka( Long connectClusterPhyId, String metric) {
try {
ConnectClusterMetricParam metricParam = new ConnectClusterMetricParam(connectClusterPhyId, metric);
return (Result<ConnectClusterMetrics>) doVCHandler(connectClusterPhyId, metric, metricParam);
} catch (VCHandlerNotExistException e) {
return Result.buildFailure(VC_HANDLE_NOT_EXIST);
}
}
@Override
public Result<List<MetricMultiLinesVO>> listConnectClusterMetricsFromES(Long clusterPhyId, MetricsConnectClustersDTO dto) {
Long startTime = dto.getStartTime();
Long endTime = dto.getEndTime();
Integer topN = dto.getTopNu();
String aggType = dto.getAggType();
List<Long> connectClusterIdList = dto.getConnectClusterIdList();
List<String> metricNameList = dto.getMetricsNames();
Table<String, Long, List<MetricPointVO>> retTable;
if (ValidateUtils.isEmptyList(connectClusterIdList)) {
// 按照TopN的方式去获取
List<Long> defaultConnectClusterIdList = this.listTopNConnectClusterIdList(clusterPhyId, topN);
retTable = connectClusterMetricESDAO.listMetricsByTop(clusterPhyId, defaultConnectClusterIdList, metricNameList, aggType, topN, startTime, endTime);
} else {
// 制定集群ID去获取
retTable = connectClusterMetricESDAO.listMetricsByConnectClusterIdList(clusterPhyId, metricNameList, aggType, connectClusterIdList, startTime, endTime);
}
return Result.buildSuc(this.metricMap2VO(clusterPhyId, retTable.rowMap()));
}
@Override
public boolean isMetricName(String str) {
return super.isMetricName(str);
}
/**************************************************** private method ****************************************************/
private Result<ConnectClusterMetrics> doNothing(VersionItemParam metricParam) {
ConnectClusterMetricParam param = (ConnectClusterMetricParam) metricParam;
return Result.buildSuc(new ConnectClusterMetrics(null, param.getConnectClusterId()));
}
private Result<ConnectClusterMetrics> getConnectWorkerMetricAvg(VersionItemParam metricParam) {
ConnectClusterMetricParam param = (ConnectClusterMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String metric = param.getMetric();
Result<List<ConnectWorkerMetrics>> ret = this.getConnectWorkerMetricsByJMX(connectClusterId, metric);
if (ret == null || !ret.hasData() || ret.getData().isEmpty()) {
return Result.buildFailure(NOT_EXIST);
}
//求均值
Float value = ret.getData().stream().map(elem -> elem.getMetric(metric) == null ? 0 : elem.getMetric(metric)).reduce(Float::sum).get();
ConnectClusterMetrics connectClusterMetrics = new ConnectClusterMetrics(null, connectClusterId);
connectClusterMetrics.putMetric(metric, value / ret.getData().size());
return Result.buildSuc(connectClusterMetrics);
}
private Result<ConnectClusterMetrics> getConnectWorkerMetricSum(VersionItemParam metricParam) {
ConnectClusterMetricParam param = (ConnectClusterMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String metric = param.getMetric();
Result<List<ConnectWorkerMetrics>> ret = this.getConnectWorkerMetricsByJMX(connectClusterId, metric);
if (ret == null || !ret.hasData() || ret.getData().isEmpty()) {
return Result.buildFailure(NOT_EXIST);
}
//求和
Float value = ret.getData().stream().map(elem -> elem.getMetric(metric) == null ? 0 : elem.getMetric(metric)).reduce(Float::sum).get();
ConnectClusterMetrics connectClusterMetrics = new ConnectClusterMetrics(null, connectClusterId);
connectClusterMetrics.putMetric(metric, value);
return Result.buildSuc(connectClusterMetrics);
}
//获取workermetric列表
private Result<List<ConnectWorkerMetrics>> getConnectWorkerMetricsByJMX(Long connectClusterId, String metric) {
List<String> workerIdList = workerService.listFromDB(connectClusterId).stream().map(elem -> elem.getWorkerId()).collect(Collectors.toList());
List<ConnectWorkerMetrics> workerMetricsList = new ArrayList<>();
for (String workerId : workerIdList) {
Result<ConnectWorkerMetrics> ret = this.getConnectWorkerMetricByJMX(connectClusterId, workerId, metric);
if (ret == null || !ret.hasData() || ret.getData().getMetric(metric) == null) {
continue;
}
workerMetricsList.add(ret.getData());
}
return Result.buildSuc(workerMetricsList);
}
private Result<ConnectWorkerMetrics> getConnectWorkerMetricByJMX(Long connectClusterId, String workerId, String metric) {
VersionJmxInfo jmxInfo = getJMXInfo(connectClusterId, metric);
if (null == jmxInfo) {
return Result.buildFailure(VC_ITEM_JMX_NOT_EXIST);
}
JmxConnectorWrap jmxConnectorWrap = connectJMXClient.getClientWithCheck(connectClusterId, workerId);
if (ValidateUtils.isNull(jmxConnectorWrap)) {
return Result.buildFailure(VC_JMX_INIT_ERROR);
}
try {
//2、获取jmx指标
String value = jmxConnectorWrap.getAttribute(new ObjectName(jmxInfo.getJmxObjectName()), jmxInfo.getJmxAttribute()).toString();
ConnectWorkerMetrics connectWorkerMetrics = ConnectWorkerMetrics.initWithMetric(connectClusterId, workerId, metric, Float.valueOf(value));
return Result.buildSuc(connectWorkerMetrics);
} catch (Exception e) {
LOGGER.error("method=getConnectWorkerMetricsByJMX||connectClusterId={}||workerId={}||metrics={}||jmx={}||msg={}",
connectClusterId, workerId, metric, jmxInfo.getJmxObjectName(), e.getClass().getName());
return Result.buildFailure(VC_JMX_CONNECT_ERROR);
}
}
private List<Long> listTopNConnectClusterIdList(Long clusterPhyId, Integer topN) {
List<ConnectCluster> connectClusters = connectClusterService.listByKafkaCluster(clusterPhyId);
if (CollectionUtils.isEmpty(connectClusters)) {
return new ArrayList<>();
}
return connectClusters.subList(0, Math.min(topN, connectClusters.size()))
.stream()
.map(b -> b.getId().longValue())
.collect(Collectors.toList());
}
protected List<MetricMultiLinesVO> metricMap2VO(Long connectClusterId,
Map<String/*metric*/, Map<Long, List<MetricPointVO>>> map){
List<MetricMultiLinesVO> multiLinesVOS = new ArrayList<>();
if (map == null || map.isEmpty()) {
// 如果为空,则直接返回
return multiLinesVOS;
}
for(String metric : map.keySet()){
try {
MetricMultiLinesVO multiLinesVO = new MetricMultiLinesVO();
multiLinesVO.setMetricName(metric);
List<MetricLineVO> metricLines = new ArrayList<>();
Map<Long, List<MetricPointVO>> metricPointMap = map.get(metric);
if(null == metricPointMap || metricPointMap.isEmpty()){continue;}
for(Map.Entry<Long, List<MetricPointVO>> entry : metricPointMap.entrySet()){
MetricLineVO metricLineVO = new MetricLineVO();
metricLineVO.setName(entry.getKey().toString());
metricLineVO.setMetricName(metric);
metricLineVO.setMetricPoints(entry.getValue());
metricLines.add(metricLineVO);
}
multiLinesVO.setMetricLines(metricLines);
multiLinesVOS.add(multiLinesVO);
}catch (Exception e){
LOGGER.error("method=metricMap2VO||connectClusterId={}||msg=exception!", connectClusterId, e);
}
}
return multiLinesVOS;
}
}

View File

@@ -0,0 +1,243 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.cluster.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.didiglobal.logi.security.common.dto.oplog.OplogDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.cluster.ConnectClusterDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectClusterMetadata;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectClusterPO;
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupStateEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.ModuleEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.ConnectClusterDAO;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.transaction.interceptor.TransactionAspectSupport;
import java.util.List;
@Service
public class ConnectClusterServiceImpl implements ConnectClusterService {
private static final ILog LOGGER = LogFactory.getLog(ConnectClusterServiceImpl.class);
@Autowired
private ConnectClusterDAO connectClusterDAO;
@Autowired
private OpLogWrapService opLogWrapService;
@Override
public Long replaceAndReturnIdInDB(ConnectClusterMetadata metadata) {
//url去斜杠
String clusterUrl = metadata.getMemberLeaderUrl();
if (clusterUrl.charAt(clusterUrl.length() - 1) == '/') {
clusterUrl = clusterUrl.substring(0, clusterUrl.length() - 1);
}
ConnectClusterPO oldPO = this.getPOFromDB(metadata.getKafkaClusterPhyId(), metadata.getGroupName());
if (oldPO == null) {
oldPO = new ConnectClusterPO();
oldPO.setKafkaClusterPhyId(metadata.getKafkaClusterPhyId());
oldPO.setGroupName(metadata.getGroupName());
oldPO.setName(metadata.getGroupName());
oldPO.setState(metadata.getState().getCode());
oldPO.setMemberLeaderUrl(metadata.getMemberLeaderUrl());
oldPO.setClusterUrl(clusterUrl);
oldPO.setVersion(KafkaConstant.DEFAULT_CONNECT_VERSION);
connectClusterDAO.insert(oldPO);
oldPO = this.getPOFromDB(metadata.getKafkaClusterPhyId(), metadata.getGroupName());
return oldPO == null? null: oldPO.getId();
}
oldPO.setKafkaClusterPhyId(metadata.getKafkaClusterPhyId());
oldPO.setGroupName(metadata.getGroupName());
oldPO.setState(metadata.getState().getCode());
oldPO.setMemberLeaderUrl(metadata.getMemberLeaderUrl());
if (ValidateUtils.isBlank(oldPO.getVersion())) {
oldPO.setVersion(KafkaConstant.DEFAULT_CONNECT_VERSION);
}
if (ValidateUtils.isBlank(oldPO.getClusterUrl())) {
oldPO.setClusterUrl(metadata.getMemberLeaderUrl());
}
connectClusterDAO.updateById(oldPO);
return oldPO.getId();
}
@Override
public List<ConnectCluster> listByKafkaCluster(Long kafkaClusterPhyId) {
LambdaQueryWrapper<ConnectClusterPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectClusterPO::getKafkaClusterPhyId, kafkaClusterPhyId);
return ConvertUtil.list2List(connectClusterDAO.selectList(lambdaQueryWrapper), ConnectCluster.class);
}
@Override
public List<ConnectCluster> listAllClusters() {
List<ConnectClusterPO> connectClusterPOList = connectClusterDAO.selectList(null);
return ConvertUtil.list2List(connectClusterPOList, ConnectCluster.class);
}
@Override
public ConnectCluster getById(Long connectClusterId) {
return ConvertUtil.obj2Obj(connectClusterDAO.selectById(connectClusterId), ConnectCluster.class);
}
@Override
public ConnectCluster getByName(Long clusterPhyId, String connectClusterName) {
LambdaQueryWrapper<ConnectClusterPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectClusterPO::getKafkaClusterPhyId, clusterPhyId);
lambdaQueryWrapper.eq(ConnectClusterPO::getName, connectClusterName);
return ConvertUtil.obj2Obj(connectClusterDAO.selectOne(lambdaQueryWrapper), ConnectCluster.class);
}
@Override
public String getClusterVersion(Long connectClusterId) {
ConnectClusterPO connectClusterPO = connectClusterDAO.selectById(connectClusterId);
return null != connectClusterPO ? connectClusterPO.getVersion() : "";
}
@Override
public String getClusterName(Long connectClusterId) {
ConnectClusterPO connectClusterPO = connectClusterDAO.selectById(connectClusterId);
return null != connectClusterPO ? connectClusterPO.getName() : "";
}
@Override
public Result<Void> deleteInDB(Long connectClusterId, String operator) {
ConnectCluster connectCluster = this.getById(connectClusterId);
if (connectCluster == null) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
if (!GroupStateEnum.DEAD.getCode().equals(connectCluster.getState())) {
return Result.buildFromRSAndMsg(ResultStatus.OPERATION_FORBIDDEN, "只有集群处于Dead状态才允许删除");
}
connectClusterDAO.deleteById(connectClusterId);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.DELETE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectClusterBizStr(connectCluster.getId(), connectCluster.getName()),
ConvertUtil.obj2Json(connectCluster)
));
return Result.buildSuc();
}
@Override
@Transactional
public Result<Void> batchModifyInDB(List<ConnectClusterDTO> dtoList, String operator) {
LOGGER.info("method=batchModifyInDB||data={}||operator={}", dtoList, operator);
for (ConnectClusterDTO dto: dtoList) {
if (!dto.getClusterUrl().startsWith("http://") && !dto.getClusterUrl().startsWith("https://")) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "clusterUrl必须以http或者https开头");
}
}
for (ConnectClusterDTO dto: dtoList) {
try {
ConnectClusterPO po = this.getRowById(dto.getId());
if (po == null) {
// 回滚事务
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(dto.getId()));
}
if (!ValidateUtils.isNull(dto.getName())) {
po.setName(dto.getName());
}
if (!ValidateUtils.isNull(dto.getClusterUrl())) {
String clusterUrl = dto.getClusterUrl();
if (clusterUrl.charAt(clusterUrl.length() - 1) == '/') {
clusterUrl = clusterUrl.substring(0, clusterUrl.length() - 1);
}
po.setClusterUrl(clusterUrl);
}
if (!ValidateUtils.isNull(dto.getVersion())) {
po.setVersion(dto.getVersion());
}
if (!ValidateUtils.isNull(dto.getJmxProperties())) {
po.setJmxProperties(dto.getJmxProperties());
}
connectClusterDAO.updateById(po);
// 记录操作
opLogWrapService.saveOplogAndIgnoreException(
new OplogDTO(
operator,
OperationEnum.EDIT.getDesc(),
ModuleEnum.KAFKA_CONNECT_CLUSTER.getDesc(),
MsgConstant.getConnectClusterBizStr(dto.getId(), dto.getName()),
ConvertUtil.obj2Json(po)
)
);
} catch (DuplicateKeyException dke) {
LOGGER.error(
"method=batchModifyInDB||data={}||operator={}||errMsg=connectCluster name duplicate",
dtoList, operator
);
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "connect集群name重复");
} catch (Exception e) {
LOGGER.error(
"method=batchModifyInDB||data={}||operator={}||errMsg=exception",
dtoList, operator, e
);
// 回滚事务
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return Result.buildFromRSAndMsg(ResultStatus.MYSQL_OPERATE_FAILED, e.getMessage());
}
}
return Result.buildSuc();
}
@Override
public Boolean existConnectClusterDown(Long kafkaClusterPhyId) {
List<ConnectCluster> connectClusters = this.listByKafkaCluster(kafkaClusterPhyId);
for (ConnectCluster connectCluster : connectClusters) {
if (GroupStateEnum.getByState(String.valueOf(connectCluster.getState())) == GroupStateEnum.DEAD)
return true;
}
return false;
}
/**************************************************** private method ****************************************************/
private ConnectClusterPO getPOFromDB(Long kafkaClusterPhyId, String groupName) {
LambdaQueryWrapper<ConnectClusterPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectClusterPO::getGroupName, groupName);
lambdaQueryWrapper.eq(ConnectClusterPO::getKafkaClusterPhyId, kafkaClusterPhyId);
return connectClusterDAO.selectOne(lambdaQueryWrapper);
}
public ConnectClusterPO getRowById(Long connectClusterId) {
return connectClusterDAO.selectById(connectClusterId);
}
}

View File

@@ -0,0 +1,36 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.connector;
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.ClusterConnectorDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.connect.MetricsConnectorsDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectorMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum;
import java.util.List;
/**
* @author didi
*/
public interface ConnectorMetricService {
/**
* 从Kafka获取指标
*/
Result<ConnectorMetrics> collectConnectClusterMetricsFromKafkaWithCacheFirst(Long connectClusterPhyId, String connectorName, String metricName);
Result<ConnectorMetrics> collectConnectClusterMetricsFromKafka(Long connectClusterPhyId, String connectorName, String metricName);
Result<ConnectorMetrics> collectConnectClusterMetricsFromKafka(Long connectClusterPhyId, String connectorName, String metricName, ConnectorTypeEnum connectorType);
/**
* 从ES中获取一段时间内聚合计算之后的指标线
*/
Result<List<MetricMultiLinesVO>> listConnectClusterMetricsFromES(Long clusterPhyId, MetricsConnectorsDTO dto);
Result<List<ConnectorMetrics>> getLatestMetricsFromES(Long clusterPhyId, List<ClusterConnectorDTO> connectorNameList, List<String> metricNameList);
Result<ConnectorMetrics> getLatestMetricsFromES(Long connectClusterId, String connectorName, List<String> metricsNames);
boolean isMetricName(String str);
}

View File

@@ -0,0 +1,59 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.connector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorStateInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum;
import java.util.List;
import java.util.Properties;
import java.util.Set;
/**
* 查看Connector
*/
public interface ConnectorService {
Result<KSConnectorInfo> createConnector(Long connectClusterId, String connectorName, Properties configs, String operator);
/**
* 获取所有的连接器名称列表
*/
Result<List<String>> listConnectorsFromCluster(Long connectClusterId);
/**
* 获取单个连接器信息
*/
Result<KSConnectorInfo> getConnectorInfoFromCluster(Long connectClusterId, String connectorName);
Result<List<String>> getConnectorTopicsFromCluster(Long connectClusterId, String connectorName);
Result<KSConnectorStateInfo> getConnectorStateInfoFromCluster(Long connectClusterId, String connectorName);
Result<KSConnector> getAllConnectorInfoFromCluster(Long connectClusterId, String connectorName);
Result<Void> resumeConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> restartConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> stopConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> deleteConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> updateConnectorConfig(Long connectClusterId, String connectorName, Properties configs, String operator);
void batchReplace(Long kafkaClusterPhyId, Long connectClusterId, List<KSConnector> connectorList, Set<String> allConnectorNameSet);
void addNewToDB(KSConnector connector);
List<ConnectorPO> listByKafkaClusterIdFromDB(Long kafkaClusterPhyId);
List<ConnectorPO> listByConnectClusterIdFromDB(Long connectClusterId);
int countByConnectClusterIdFromDB(Long connectClusterId);
ConnectorPO getConnectorFromDB(Long connectClusterId, String connectorName);
ConnectorTypeEnum getConnectorType(Long connectClusterId, String connectorName);
}

View File

@@ -0,0 +1,443 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.connector.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.google.common.collect.Table;
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.ClusterConnectorDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.connect.MetricsConnectorsDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.WorkerConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectorMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectorTaskMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.metric.connect.ConnectorMetricParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionConnectJmxInfo;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.BrokerMetricPO;
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.connect.ConnectorMetricPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricLineVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.common.jmx.JmxConnectorWrap;
import com.xiaojukeji.know.streaming.km.common.utils.BeanUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.cache.CollectedMetricsLocalCache;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorMetricService;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerService;
import com.xiaojukeji.know.streaming.km.core.service.health.state.HealthStateService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseConnectorMetricService;
import com.xiaojukeji.know.streaming.km.persistence.connect.ConnectJMXClient;
import com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.ConnectorMetricESDAO;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
import javax.management.InstanceNotFoundException;
import javax.management.ObjectName;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus.*;
/**
* @author didi
*/
@Service
public class ConnectorMetricServiceImpl extends BaseConnectorMetricService implements ConnectorMetricService {
protected static final ILog LOGGER = LogFactory.getLog(ConnectorMetricServiceImpl.class);
public static final String CONNECTOR_METHOD_DO_NOTHING = "getConnectWorkerMetricSum";
public static final String CONNECTOR_METHOD_GET_CONNECT_WORKER_METRIC_SUM = "getConnectWorkerMetricSum";
public static final String CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG = "getConnectorTaskMetricsAvg";
public static final String CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_MAX = "getConnectorTaskMetricsMax";
public static final String CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM = "getConnectorTaskMetricsSum";
public static final String CONNECTOR_METHOD_GET_METRIC_HEALTH_SCORE = "getMetricHealthScore";
@Autowired
private ConnectorMetricESDAO connectorMetricESDAO;
@Autowired
private ConnectJMXClient connectJMXClient;
@Autowired
private WorkerService workerService;
@Autowired
private ConnectorService connectorService;
@Autowired
private WorkerConnectorService workerConnectorService;
@Autowired
private HealthStateService healthStateService;
@Override
protected VersionItemTypeEnum getVersionItemType() {
return VersionItemTypeEnum.METRIC_CONNECT_CONNECTOR;
}
@Override
protected List<String> listMetricPOFields() {
return BeanUtil.listBeanFields(BrokerMetricPO.class);
}
@Override
protected void initRegisterVCHandler() {
registerVCHandler(CONNECTOR_METHOD_DO_NOTHING, this::doNothing);
registerVCHandler(CONNECTOR_METHOD_GET_CONNECT_WORKER_METRIC_SUM, this::getConnectWorkerMetricSum);
registerVCHandler(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG, this::getConnectorTaskMetricsAvg);
registerVCHandler(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_MAX, this::getConnectorTaskMetricsMax);
registerVCHandler(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, this::getConnectorTaskMetricsSum);
registerVCHandler(CONNECTOR_METHOD_GET_METRIC_HEALTH_SCORE, this::getMetricHealthScore);
}
@Override
public Result<ConnectorMetrics> collectConnectClusterMetricsFromKafkaWithCacheFirst(Long connectClusterPhyId, String connectorName, String metric) {
String connectorMetricKey = CollectedMetricsLocalCache.genConnectorMetricCacheKey(connectClusterPhyId, connectorName, metric);
Float keyValue = CollectedMetricsLocalCache.getConnectorMetrics(connectorMetricKey);
if (null != keyValue) {
ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterPhyId, connectorName, metric, keyValue);
return Result.buildSuc(connectorMetrics);
}
Result<ConnectorMetrics> ret = this.collectConnectClusterMetricsFromKafka(connectClusterPhyId, connectorName, metric);
if (ret == null || !ret.hasData()) {
return ret;
}
Map<String, Float> metricMap = ret.getData().getMetrics();
for (Map.Entry<String, Float> entry : metricMap.entrySet()) {
CollectedMetricsLocalCache.putConnectorMetrics(entry.getKey(), entry.getValue());
}
return ret;
}
@Override
public Result<ConnectorMetrics> collectConnectClusterMetricsFromKafka(Long connectClusterPhyId, String connectorName, String metricName) {
try {
ConnectorMetricParam metricParam = new ConnectorMetricParam(connectClusterPhyId, connectorName, metricName, null);
return (Result<ConnectorMetrics>) doVCHandler(connectClusterPhyId, metricName, metricParam);
} catch (VCHandlerNotExistException e) {
return Result.buildFailure(VC_HANDLE_NOT_EXIST);
}
}
@Override
public Result<ConnectorMetrics> collectConnectClusterMetricsFromKafka(Long connectClusterPhyId, String connectorName, String metricName, ConnectorTypeEnum connectorType) {
try {
ConnectorMetricParam metricParam = new ConnectorMetricParam(connectClusterPhyId, connectorName, metricName, connectorType);
return (Result<ConnectorMetrics>) doVCHandler(connectClusterPhyId, metricName, metricParam);
} catch (VCHandlerNotExistException e) {
return Result.buildFailure(VC_HANDLE_NOT_EXIST);
}
}
@Override
public Result<List<MetricMultiLinesVO>> listConnectClusterMetricsFromES(Long clusterPhyId, MetricsConnectorsDTO dto) {
Long startTime = dto.getStartTime();
Long endTime = dto.getEndTime();
Integer topN = dto.getTopNu();
String aggType = dto.getAggType();
List<String> metricNameList = dto.getMetricsNames();
List<Tuple<Long, String>> connectorList = new ArrayList<>();
if(!CollectionUtils.isEmpty(dto.getConnectorNameList())){
connectorList = dto.getConnectorNameList().stream()
.map(c -> new Tuple<>(c.getConnectClusterId(), c.getConnectorName()))
.collect(Collectors.toList());
}
Table<String/*metric*/, Tuple<Long, String>, List<MetricPointVO>> retTable;
if(ValidateUtils.isEmptyList(connectorList)) {
// 按照TopN的方式去获取
List<Tuple<Long, String>> defaultConnectorList = this.listTopNConnectorList(clusterPhyId, topN);
retTable = connectorMetricESDAO.listMetricsByTopN(clusterPhyId, defaultConnectorList, metricNameList, aggType, topN, startTime, endTime);
} else {
// 制定集群ID去获取
retTable = connectorMetricESDAO.listMetricsByConnectors(clusterPhyId, metricNameList, aggType, connectorList, startTime, endTime);
}
return Result.buildSuc(this.metricMap2VO(clusterPhyId, retTable.rowMap()));
}
@Override
public Result<List<ConnectorMetrics>> getLatestMetricsFromES(Long clusterPhyId, List<ClusterConnectorDTO> connectorNameList, List<String> metricsNames) {
List<Tuple<Long, String>> connectClusterIdAndConnectorNameList = connectorNameList
.stream()
.map(elem -> new Tuple<>(elem.getConnectClusterId(), elem.getConnectorName()))
.collect(Collectors.toList());
List<ConnectorMetricPO> poList =
connectorMetricESDAO.getConnectorLatestMetric(clusterPhyId, connectClusterIdAndConnectorNameList, metricsNames);
return Result.buildSuc(ConvertUtil.list2List(poList, ConnectorMetrics.class));
}
@Override
public Result<ConnectorMetrics> getLatestMetricsFromES(Long connectClusterId, String connectorName, List<String> metricsNames) {
ConnectorMetricPO connectorMetricPO = connectorMetricESDAO.getConnectorLatestMetric(
null, connectClusterId, connectorName, metricsNames);
return Result.buildSuc(ConvertUtil.obj2Obj(connectorMetricPO, ConnectorMetrics.class));
}
@Override
public boolean isMetricName(String str) {
return super.isMetricName(str);
}
/**************************************************** private method ****************************************************/
private Result<ConnectorMetrics> doNothing(VersionItemParam metricParam){
ConnectorMetricParam param = (ConnectorMetricParam) metricParam;
return Result.buildSuc(new ConnectorMetrics(param.getConnectClusterId(), param.getConnectorName()));
}
private Result<ConnectorMetrics> getMetricHealthScore(VersionItemParam metricParam) {
ConnectorMetricParam param = (ConnectorMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String connectorName = param.getConnectorName();
ConnectorMetrics metrics = healthStateService.calConnectorHealthMetrics(connectClusterId, connectorName);
return Result.buildSuc(metrics);
}
private Result<ConnectorMetrics> getConnectWorkerMetricSum(VersionItemParam metricParam) {
ConnectorMetricParam param = (ConnectorMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String connectorName = param.getConnectorName();
String metric = param.getMetricName();
ConnectorTypeEnum connectorType = param.getConnectorType();
float sum = 0;
boolean isCollected = false;
//根据connectClusterId获取connectMemberId列表
List<String> workerIdList = workerService.listFromDB(connectClusterId).stream().map(elem -> elem.getWorkerId()).collect(Collectors.toList());
for (String workerId : workerIdList) {
Result<ConnectorMetrics> ret = this.getConnectorMetric(connectClusterId, workerId, connectorName, metric, connectorType);
if (ret == null || !ret.hasData() || ret.getData().getMetric(metric) == null) {
continue;
}
isCollected = true;
sum += ret.getData().getMetric(metric);
}
if (!isCollected) {
return Result.buildFailure(NOT_EXIST);
}
return Result.buildSuc(ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, sum));
}
//kafka.connect:type=connect-worker-metrics,connector="{connector}" 指标
private Result<ConnectorMetrics> getConnectorMetric(Long connectClusterId, String workerId, String connectorName, String metric, ConnectorTypeEnum connectorType) {
VersionConnectJmxInfo jmxInfo = getJMXInfo(connectClusterId, metric);
if (jmxInfo.getType() != null) {
if (connectorType == null) {
connectorType = connectorService.getConnectorType(connectClusterId, connectorName);
}
if (connectorType != jmxInfo.getType()) {
return Result.buildFailure(VC_JMX_INSTANCE_NOT_FOUND);
}
}
if (null == jmxInfo) {
return Result.buildFailure(VC_ITEM_JMX_NOT_EXIST);
}
String jmxObjectName = String.format(jmxInfo.getJmxObjectName(), connectorName);
JmxConnectorWrap jmxConnectorWrap = connectJMXClient.getClientWithCheck(connectClusterId, workerId);
if (ValidateUtils.isNull(jmxConnectorWrap)) {
return Result.buildFailure(VC_JMX_INIT_ERROR);
}
try {
//2、获取jmx指标
String value = jmxConnectorWrap.getAttribute(new ObjectName(jmxObjectName), jmxInfo.getJmxAttribute()).toString();
ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, Float.valueOf(value));
return Result.buildSuc(connectorMetrics);
} catch (InstanceNotFoundException e) {
// 忽略该错误该错误出现的原因是该指标在JMX中不存在
return Result.buildSuc(new ConnectorMetrics(connectClusterId, connectorName));
} catch (Exception e) {
LOGGER.error("method=getConnectorMetric||connectClusterId={}||workerId={}||connectorName={}||metrics={}||jmx={}||msg={}",
connectClusterId, workerId, connectorName, metric, jmxObjectName, e.getClass().getName());
return Result.buildFailure(VC_JMX_CONNECT_ERROR);
}
}
private Result<ConnectorMetrics> getConnectorTaskMetricsAvg(VersionItemParam metricParam){
ConnectorMetricParam param = (ConnectorMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String connectorName = param.getConnectorName();
String metric = param.getMetricName();
ConnectorTypeEnum connectorType = param.getConnectorType();
Result<List<ConnectorTaskMetrics>> ret = this.getConnectorTaskMetricList(connectClusterId, connectorName, metric, connectorType);
if (ret == null || !ret.hasData() || ret.getData().isEmpty()) {
return Result.buildFailure(NOT_EXIST);
}
Float sum = ret.getData().stream().map(elem -> elem.getMetric(metric)).reduce(Float::sum).get();
ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, sum / ret.getData().size());
return Result.buildSuc(connectorMetrics);
}
private Result<ConnectorMetrics> getConnectorTaskMetricsMax(VersionItemParam metricParam){
ConnectorMetricParam param = (ConnectorMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String connectorName = param.getConnectorName();
String metric = param.getMetricName();
ConnectorTypeEnum connectorType = param.getConnectorType();
Result<List<ConnectorTaskMetrics>> ret = this.getConnectorTaskMetricList(connectClusterId, connectorName, metric, connectorType);
if (ret == null || !ret.hasData() || ret.getData().isEmpty()) {
return Result.buildFailure(NOT_EXIST);
}
Float sum = ret.getData().stream().max((a, b) -> a.getMetric(metric).compareTo(b.getMetric(metric))).get().getMetric(metric);
ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, sum / ret.getData().size());
return Result.buildSuc(connectorMetrics);
}
private Result<ConnectorMetrics> getConnectorTaskMetricsSum(VersionItemParam metricParam){
ConnectorMetricParam param = (ConnectorMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String connectorName = param.getConnectorName();
String metric = param.getMetricName();
ConnectorTypeEnum connectorType = param.getConnectorType();
Result<List<ConnectorTaskMetrics>> ret = this.getConnectorTaskMetricList(connectClusterId, connectorName, metric, connectorType);
if (ret == null || !ret.hasData() || ret.getData().isEmpty()) {
return Result.buildFailure(NOT_EXIST);
}
Float sum = ret.getData().stream().map(elem -> elem.getMetric(metric)).reduce(Float::sum).get();
ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, sum);
return Result.buildSuc(connectorMetrics);
}
private Result<List<ConnectorTaskMetrics>> getConnectorTaskMetricList(Long connectClusterId, String connectorName, String metricName, ConnectorTypeEnum connectorType) {
List<ConnectorTaskMetrics> connectorTaskMetricsList = new ArrayList<>();
List<WorkerConnector> workerConnectorList = workerConnectorService.listFromDB(connectClusterId).stream().filter(elem -> elem.getConnectorName().equals(connectorName)).collect(Collectors.toList());
if (workerConnectorList.isEmpty()) {
return Result.buildFailure(NOT_EXIST);
}
for (WorkerConnector workerConnector : workerConnectorList) {
Result<ConnectorTaskMetrics> ret = getConnectorTaskMetric(connectClusterId, workerConnector.getWorkerId(), connectorName, workerConnector.getTaskId(), metricName, connectorType);
if (ret == null || !ret.hasData() || ret.getData().getMetric(metricName) == null) {
continue;
}
connectorTaskMetricsList.add(ret.getData());
}
return Result.buildSuc(connectorTaskMetricsList);
}
private Result<ConnectorTaskMetrics> getConnectorTaskMetric(Long connectClusterId, String workerId, String connectorName, Integer taskId, String metric, ConnectorTypeEnum connectorType) {
VersionConnectJmxInfo jmxInfo = getJMXInfo(connectClusterId, metric);
if (jmxInfo.getType() != null) {
if (connectorType == null) {
connectorType = connectorService.getConnectorType(connectClusterId, connectorName);
}
if (connectorType != jmxInfo.getType()) {
return Result.buildFailure(VC_JMX_INSTANCE_NOT_FOUND);
}
}
if (null == jmxInfo) {
return Result.buildFailure(VC_ITEM_JMX_NOT_EXIST);
}
String jmxObjectName=String.format(jmxInfo.getJmxObjectName(), connectorName, taskId);
JmxConnectorWrap jmxConnectorWrap = connectJMXClient.getClientWithCheck(connectClusterId, workerId);
if (ValidateUtils.isNull(jmxConnectorWrap)) {
return Result.buildFailure(VC_JMX_INIT_ERROR);
}
try {
//2、获取jmx指标
String value = jmxConnectorWrap.getAttribute(new ObjectName(jmxObjectName), jmxInfo.getJmxAttribute()).toString();
ConnectorTaskMetrics connectorTaskMetrics = ConnectorTaskMetrics.initWithMetric(connectClusterId, connectorName, taskId, metric, Float.valueOf(value));
return Result.buildSuc(connectorTaskMetrics);
} catch (Exception e) {
LOGGER.error("method=getConnectorTaskMetric||connectClusterId={}||workerId={}||connectorName={}||taskId={}||metrics={}||jmx={}||msg={}",
connectClusterId, workerId, connectorName, taskId, metric, jmxObjectName, e.getClass().getName());
return Result.buildFailure(VC_JMX_CONNECT_ERROR);
}
}
private List<Tuple<Long, String>> listTopNConnectorList(Long clusterPhyId, Integer topN) {
List<ConnectorPO> connectorPOS = connectorService.listByKafkaClusterIdFromDB(clusterPhyId);
if (CollectionUtils.isEmpty(connectorPOS)) {
return new ArrayList<>();
}
return connectorPOS.subList(0, Math.min(topN, connectorPOS.size()))
.stream()
.map( c -> new Tuple<>(c.getId(), c.getConnectorName()) )
.collect(Collectors.toList());
}
protected List<MetricMultiLinesVO> metricMap2VO(Long connectClusterId,
Map<String/*metric*/, Map<Tuple<Long, String>, List<MetricPointVO>>> map){
List<MetricMultiLinesVO> multiLinesVOS = new ArrayList<>();
if (map == null || map.isEmpty()) {
// 如果为空,则直接返回
return multiLinesVOS;
}
for(String metric : map.keySet()){
try {
MetricMultiLinesVO multiLinesVO = new MetricMultiLinesVO();
multiLinesVO.setMetricName(metric);
List<MetricLineVO> metricLines = new ArrayList<>();
Map<Tuple<Long, String>, List<MetricPointVO>> metricPointMap = map.get(metric);
if(null == metricPointMap || metricPointMap.isEmpty()){continue;}
for(Map.Entry<Tuple<Long, String>, List<MetricPointVO>> entry : metricPointMap.entrySet()){
MetricLineVO metricLineVO = new MetricLineVO();
metricLineVO.setName(entry.getKey().getV1() + "#" + entry.getKey().getV2());
metricLineVO.setMetricName(metric);
metricLineVO.setMetricPoints(entry.getValue());
metricLines.add(metricLineVO);
}
multiLinesVO.setMetricLines(metricLines);
multiLinesVOS.add(multiLinesVO);
}catch (Exception e){
LOGGER.error("method=metricMap2VO||connectClusterId={}||msg=exception!", connectClusterId, e);
}
}
return multiLinesVOS;
}
}

View File

@@ -0,0 +1,581 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.connector.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.didiglobal.logi.security.common.dto.oplog.OplogDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorStateInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
import com.xiaojukeji.know.streaming.km.common.component.RestTool;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.converter.ConnectConverter;
import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.ModuleEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.utils.BackoffUtils;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.ConnectorDAO;
import org.apache.kafka.connect.runtime.rest.entities.ActiveTopicsInfo;
import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import java.util.*;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.SERVICE_OP_CONNECT_CONNECTOR;
@Service
public class ConnectorServiceImpl extends BaseVersionControlService implements ConnectorService {
private static final ILog LOGGER = LogFactory.getLog(ConnectorServiceImpl.class);
@Autowired
private RestTool restTool;
@Autowired
private ConnectorDAO connectorDAO;
@Autowired
private ConnectClusterService connectClusterService;
@Autowired
private OpLogWrapService opLogWrapService;
private static final String LIST_CONNECTORS_URI = "/connectors";
private static final String GET_CONNECTOR_INFO_PREFIX_URI = "/connectors";
private static final String GET_CONNECTOR_TOPICS_URI = "/connectors/%s/topics";
private static final String GET_CONNECTOR_STATUS_URI = "/connectors/%s/status";
private static final String CREATE_CONNECTOR_URI = "/connectors";
private static final String RESUME_CONNECTOR_URI = "/connectors/%s/resume";
private static final String RESTART_CONNECTOR_URI = "/connectors/%s/restart";
private static final String PAUSE_CONNECTOR_URI = "/connectors/%s/pause";
private static final String DELETE_CONNECTOR_URI = "/connectors/%s";
private static final String UPDATE_CONNECTOR_CONFIG_URI = "/connectors/%s/config";
@Override
protected VersionItemTypeEnum getVersionItemType() {
return SERVICE_OP_CONNECT_CONNECTOR;
}
@Override
public Result<KSConnectorInfo> createConnector(Long connectClusterId, String connectorName, Properties configs, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
// 构造参数
Properties props = new Properties();
props.put("name", connectorName);
props.put("config", configs);
ConnectorInfo connectorInfo = restTool.postObjectWithJsonContent(
connectCluster.getClusterUrl() + CREATE_CONNECTOR_URI,
props,
ConnectorInfo.class
);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.ADD.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
ConvertUtil.obj2Json(configs)
));
KSConnectorInfo connector = new KSConnectorInfo();
connector.setConnectClusterId(connectClusterId);
connector.setConfig(connectorInfo.config());
connector.setName(connectorInfo.name());
connector.setTasks(connectorInfo.tasks());
connector.setType(connectorInfo.type());
return Result.buildSuc(connector);
} catch (Exception e) {
LOGGER.error(
"method=createConnector||connectClusterId={}||connectorName={}||configs={}||operator={}||errMsg=exception",
connectClusterId, connectorName, configs, operator, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<List<String>> listConnectorsFromCluster(Long connectClusterId) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
List<String> nameList = restTool.getArrayObjectWithJsonContent(
connectCluster.getClusterUrl() + LIST_CONNECTORS_URI,
new HashMap<>(),
String.class
);
return Result.buildSuc(nameList);
} catch (Exception e) {
LOGGER.error(
"method=listConnectorsFromCluster||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<KSConnectorInfo> getConnectorInfoFromCluster(Long connectClusterId, String connectorName) {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
return this.getConnectorInfoFromCluster(connectCluster, connectorName);
}
@Override
public Result<List<String>> getConnectorTopicsFromCluster(Long connectClusterId, String connectorName) {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
return this.getConnectorTopicsFromCluster(connectCluster, connectorName);
}
@Override
public Result<KSConnectorStateInfo> getConnectorStateInfoFromCluster(Long connectClusterId, String connectorName) {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
return this.getConnectorStateInfoFromCluster(connectCluster, connectorName);
}
@Override
public Result<KSConnector> getAllConnectorInfoFromCluster(Long connectClusterId, String connectorName) {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
Result<KSConnectorInfo> connectorResult = this.getConnectorInfoFromCluster(connectCluster, connectorName);
if (connectorResult.failed()) {
LOGGER.error(
"method=getAllConnectorInfoFromCluster||connectClusterId={}||connectorName={}||result={}",
connectClusterId, connectorName, connectorResult
);
return Result.buildFromIgnoreData(connectorResult);
}
Result<List<String>> topicNameListResult = this.getConnectorTopicsFromCluster(connectCluster, connectorName);
if (topicNameListResult.failed()) {
LOGGER.error(
"method=getAllConnectorInfoFromCluster||connectClusterId={}||connectorName={}||result={}",
connectClusterId, connectorName, connectorResult
);
}
Result<KSConnectorStateInfo> stateInfoResult = this.getConnectorStateInfoFromCluster(connectCluster, connectorName);
if (stateInfoResult.failed()) {
LOGGER.error(
"method=getAllConnectorInfoFromCluster||connectClusterId={}||connectorName={}||result={}",
connectClusterId, connectorName, connectorResult
);
}
return Result.buildSuc(ConnectConverter.convert2KSConnector(
connectCluster.getKafkaClusterPhyId(),
connectCluster.getId(),
connectorResult.getData(),
stateInfoResult.getData(),
topicNameListResult.getData()
));
}
@Override
public Result<Void> resumeConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.putJsonForObject(
connectCluster.getClusterUrl() + String.format(RESUME_CONNECTOR_URI, connectorName),
new HashMap<>(),
String.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.ENABLE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"class=ConnectorServiceImpl||method=resumeConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> restartConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.postObjectWithJsonContent(
connectCluster.getClusterUrl() + String.format(RESTART_CONNECTOR_URI, connectorName),
new HashMap<>(),
String.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.RESTART.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=restartConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> stopConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.putJsonForObject(
connectCluster.getClusterUrl() + String.format(PAUSE_CONNECTOR_URI, connectorName),
new HashMap<>(),
String.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.DISABLE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=stopConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> deleteConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.deleteWithParamsAndHeader(
connectCluster.getClusterUrl() + String.format(DELETE_CONNECTOR_URI, connectorName),
new HashMap<>(),
new HashMap<>(),
String.class
);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.DELETE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
this.deleteConnectorInDB(connectClusterId, connectorName);
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=deleteConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> updateConnectorConfig(Long connectClusterId, String connectorName, Properties configs, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
ConnectorInfo connectorInfo = restTool.putJsonForObject(
connectCluster.getClusterUrl() + String.format(UPDATE_CONNECTOR_CONFIG_URI, connectorName),
configs,
org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.EDIT.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
ConvertUtil.obj2Json(configs)
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=updateConnectorConfig||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public void batchReplace(Long kafkaClusterPhyId, Long connectClusterId, List<KSConnector> connectorList, Set<String> allConnectorNameSet) {
List<ConnectorPO> poList = this.listByConnectClusterIdFromDB(connectClusterId);
Map<String, ConnectorPO> oldPOMap = new HashMap<>();
poList.forEach(elem -> oldPOMap.put(elem.getConnectorName(), elem));
for (KSConnector connector: connectorList) {
try {
ConnectorPO oldPO = oldPOMap.remove(connector.getConnectorName());
if (oldPO == null) {
oldPO = ConvertUtil.obj2Obj(connector, ConnectorPO.class);
connectorDAO.insert(oldPO);
} else {
ConnectorPO newPO = ConvertUtil.obj2Obj(connector, ConnectorPO.class);
newPO.setId(oldPO.getId());
connectorDAO.updateById(newPO);
}
} catch (DuplicateKeyException dke) {
// ignore
}
}
try {
oldPOMap.values().forEach(elem -> {
if (allConnectorNameSet.contains(elem.getConnectorName())) {
// 当前connector还存在
return;
}
// 当前connector不存在了则进行删除
connectorDAO.deleteById(elem.getId());
});
} catch (Exception e) {
// ignore
}
}
@Override
public void addNewToDB(KSConnector connector) {
try {
connectorDAO.insert(ConvertUtil.obj2Obj(connector, ConnectorPO.class));
} catch (DuplicateKeyException dke) {
// ignore
}
}
@Override
public List<ConnectorPO> listByKafkaClusterIdFromDB(Long kafkaClusterPhyId) {
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getKafkaClusterPhyId, kafkaClusterPhyId);
return connectorDAO.selectList(lambdaQueryWrapper);
}
@Override
public List<ConnectorPO> listByConnectClusterIdFromDB(Long connectClusterId) {
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId);
return connectorDAO.selectList(lambdaQueryWrapper);
}
@Override
public int countByConnectClusterIdFromDB(Long connectClusterId) {
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId);
return connectorDAO.selectCount(lambdaQueryWrapper);
}
@Override
public ConnectorPO getConnectorFromDB(Long connectClusterId, String connectorName) {
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId);
lambdaQueryWrapper.eq(ConnectorPO::getConnectorName, connectorName);
return connectorDAO.selectOne(lambdaQueryWrapper);
}
@Override
public ConnectorTypeEnum getConnectorType(Long connectClusterId, String connectorName) {
ConnectorTypeEnum connectorType = ConnectorTypeEnum.UNKNOWN;
ConnectorPO connector = this.getConnectorFromDB(connectClusterId, connectorName);
if (connector != null) {
connectorType = ConnectorTypeEnum.getByName(connector.getConnectorType());
}
return connectorType;
}
/**************************************************** private method ****************************************************/
private int deleteConnectorInDB(Long connectClusterId, String connectorName) {
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId);
lambdaQueryWrapper.eq(ConnectorPO::getConnectorName, connectorName);
return connectorDAO.delete(lambdaQueryWrapper);
}
private Result<KSConnectorInfo> getConnectorInfoFromCluster(ConnectCluster connectCluster, String connectorName) {
try {
ConnectorInfo connectorInfo = restTool.getForObject(
connectCluster.getClusterUrl() + GET_CONNECTOR_INFO_PREFIX_URI + "/" + connectorName,
new HashMap<>(),
ConnectorInfo.class
);
KSConnectorInfo connector = new KSConnectorInfo();
connector.setConnectClusterId(connectCluster.getId());
connector.setConfig(connectorInfo.config());
connector.setName(connectorInfo.name());
connector.setTasks(connectorInfo.tasks());
connector.setType(connectorInfo.type());
return Result.buildSuc(connector);
} catch (Exception e) {
LOGGER.error(
"method=getConnectorInfoFromCluster||connectClusterId={}||connectorName={}||errMsg=exception",
connectCluster.getId(), connectorName, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
private Result<List<String>> getConnectorTopicsFromCluster(ConnectCluster connectCluster, String connectorName) {
try {
Properties properties = restTool.getForObject(
connectCluster.getClusterUrl() + String.format(GET_CONNECTOR_TOPICS_URI, connectorName),
new HashMap<>(),
Properties.class
);
ActiveTopicsInfo activeTopicsInfo = ConvertUtil.toObj(ConvertUtil.obj2Json(properties.get(connectorName)), ActiveTopicsInfo.class);
return Result.buildSuc(new ArrayList<>(activeTopicsInfo.topics()));
} catch (Exception e) {
LOGGER.error(
"method=getConnectorTopicsFromCluster||connectClusterId={}||connectorName={}||errMsg=exception",
connectCluster.getId(), connectorName, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
private Result<KSConnectorStateInfo> getConnectorStateInfoFromCluster(ConnectCluster connectCluster, String connectorName) {
try {
KSConnectorStateInfo connectorStateInfo = restTool.getForObject(
connectCluster.getClusterUrl() + String.format(GET_CONNECTOR_STATUS_URI, connectorName),
new HashMap<>(),
KSConnectorStateInfo.class
);
return Result.buildSuc(connectorStateInfo);
} catch (Exception e) {
LOGGER.error(
"method=getConnectorStateInfoFromCluster||connectClusterId={}||connectorName={}||errMsg=exception",
connectCluster.getId(), connectorName, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
private void updateStatus(ConnectCluster connectCluster, Long connectClusterId, String connectorName) {
try {
// 延迟3秒
BackoffUtils.backoff(2000);
Result<KSConnectorStateInfo> stateInfoResult = this.getConnectorStateInfoFromCluster(connectCluster, connectorName);
if (stateInfoResult.failed()) {
return;
}
ConnectorPO po = new ConnectorPO();
po.setConnectClusterId(connectClusterId);
po.setConnectorName(connectorName);
po.setState(stateInfoResult.getData().getConnector().getState());
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId);
lambdaQueryWrapper.eq(ConnectorPO::getConnectorName, connectorName);
connectorDAO.update(po, lambdaQueryWrapper);
} catch (Exception e) {
LOGGER.error(
"method=updateStatus||connectClusterId={}||connectorName={}||errMsg=exception",
connectClusterId, connectorName, e
);
}
}
}

View File

@@ -0,0 +1,20 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.plugin;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.config.ConnectConfigInfos;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.plugin.ConnectPluginBasic;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import java.util.List;
import java.util.Properties;
/**
* 查看Connector
*/
public interface PluginService {
Result<ConnectConfigInfos> getConfig(Long connectClusterId, String pluginName);
Result<ConnectConfigInfos> validateConfig(Long connectClusterId, Properties props);
Result<List<ConnectPluginBasic>> listPluginsFromCluster(Long connectClusterId);
}

View File

@@ -0,0 +1,112 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.plugin.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.config.ConnectConfigInfos;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.plugin.ConnectPluginBasic;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.component.RestTool;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.connect.plugin.PluginService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import org.apache.kafka.connect.runtime.rest.entities.ConfigInfos;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.HashMap;
import java.util.List;
import java.util.Properties;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.SERVICE_OP_CONNECT_PLUGIN;
@Service
public class PluginServiceImpl extends BaseVersionControlService implements PluginService {
private static final ILog LOGGER = LogFactory.getLog(PluginServiceImpl.class);
@Autowired
private RestTool restTool;
@Autowired
private ConnectClusterService connectClusterService;
private static final String GET_PLUGIN_CONFIG_DESC_URI = "/connector-plugins/%s/config/validate";
private static final String GET_ALL_PLUGINS_URI = "/connector-plugins";
@Override
protected VersionItemTypeEnum getVersionItemType() {
return SERVICE_OP_CONNECT_PLUGIN;
}
@Override
public Result<ConnectConfigInfos> getConfig(Long connectClusterId, String pluginName) {
Properties props = new Properties();
props.put(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME, pluginName);
props.put(KafkaConnectConstant.CONNECTOR_TOPICS_FILED_NAME, KafkaConnectConstant.CONNECTOR_TOPICS_FILED_ERROR_VALUE);
return this.validateConfig(connectClusterId, props);
}
@Override
public Result<ConnectConfigInfos> validateConfig(Long connectClusterId, Properties props) {
try {
if (ValidateUtils.isBlank(props.getProperty(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME))) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "参数错误, connector.class字段数据不允许不存在或者为空");
}
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
// 通过参数检查接口,获取插件配置
ConfigInfos configInfos = restTool.putJsonForObject(
connectCluster.getClusterUrl() + String.format(GET_PLUGIN_CONFIG_DESC_URI, props.getProperty(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME)),
props,
ConfigInfos.class
);
return Result.buildSuc(new ConnectConfigInfos(configInfos));
} catch (Exception e) {
LOGGER.error(
"method=validateConfig||connectClusterId={}||pluginName={}||errMsg=exception",
connectClusterId,
props.getProperty(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME),
e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<List<ConnectPluginBasic>> listPluginsFromCluster(Long connectClusterId) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
// 通过参数检查接口,获取插件配置
List<ConnectPluginBasic> pluginList = restTool.getArrayObjectWithJsonContent(
connectCluster.getClusterUrl() + GET_ALL_PLUGINS_URI,
new HashMap<>(),
ConnectPluginBasic.class
);
return Result.buildSuc(pluginList);
} catch (Exception e) {
LOGGER.error(
"method=listPluginsFromCluster||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
}

View File

@@ -0,0 +1,23 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.worker;
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.task.TaskActionDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.WorkerConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import java.util.List;
/**
* Worker
*/
public interface WorkerConnectorService {
void batchReplaceInDB(Long connectClusterId, List<WorkerConnector> workerList);
List<WorkerConnector> listFromDB(Long connectClusterId);
List<WorkerConnector> listByKafkaClusterIdFromDB(Long kafkaClusterPhyId);
Result<Void> actionTask(TaskActionDTO dto);
List<WorkerConnector> getWorkerConnectorListFromCluster(ConnectCluster connectCluster, String connectorName);
}

View File

@@ -0,0 +1,38 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.worker;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectWorker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.connector.ClusterWorkerOverviewVO;
import java.util.List;
/**
* Worker
* @author didi
*/
public interface WorkerService {
/**
* 批量插入数据库
* @param connectClusterId
* @param workerList
*/
void batchReplaceInDB(Long connectClusterId, List<ConnectWorker> workerList);
/**
* 从数据库中获取
* @param connectClusterId
* @return
*/
List<ConnectWorker> listFromDB(Long connectClusterId);
/**
* 分页获取
* @param kafkaClusterPhyId
* @param dto
* @return
*/
PaginationResult<ClusterWorkerOverviewVO> pageWorkByKafkaClusterPhy(Long kafkaClusterPhyId, PaginationBaseDTO dto);
List<ConnectWorker> listByKafkaClusterIdFromDB(Long kafkaClusterPhyId);
}

View File

@@ -0,0 +1,143 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.worker.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.task.TaskActionDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectWorker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.WorkerConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorStateInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSTaskState;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.WorkerConnectorPO;
import com.xiaojukeji.know.streaming.km.common.component.RestTool;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerService;
import com.xiaojukeji.know.streaming.km.persistence.connect.cache.LoadedConnectClusterCache;
import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.WorkerConnectorDAO;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectActionEnum.RESTART;
@Service
public class WorkerConnectorServiceImpl implements WorkerConnectorService {
protected static final ILog LOGGER = LogFactory.getLog(WorkerConnectorServiceImpl.class);
@Autowired
private WorkerConnectorDAO workerConnectorDAO;
@Autowired
private RestTool restTool;
@Autowired
private ConnectorService connectorService;
@Autowired
private WorkerService workerService;
private static final String RESTART_TASK_URI = "%s/connectors/%s/tasks/%d/restart";
@Override
public void batchReplaceInDB(Long connectClusterId, List<WorkerConnector> workerList) {
Map<String, WorkerConnectorPO> oldMap = new HashMap<>();
for (WorkerConnectorPO oldPO : this.listPOSFromDB(connectClusterId)) {
oldMap.put(oldPO.getConnectorName() + oldPO.getWorkerId() + oldPO.getTaskId() + oldPO.getState(), oldPO);
}
for (WorkerConnector workerConnector : workerList) {
try {
String key = workerConnector.getConnectorName() + workerConnector.getWorkerId() + workerConnector.getTaskId() + workerConnector.getState();
WorkerConnectorPO oldPO = oldMap.remove(key);
if (oldPO == null) {
workerConnectorDAO.insert(ConvertUtil.obj2Obj(workerConnector, WorkerConnectorPO.class));
} else {
// 如果该数据已经存在,则不需要进行操作
}
} catch (DuplicateKeyException dke) {
// ignore
}
}
try {
oldMap.values().forEach(elem -> workerConnectorDAO.deleteById(elem.getId()));
} catch (Exception e) {
// ignore
}
}
@Override
public List<WorkerConnector> listFromDB(Long connectClusterId) {
return ConvertUtil.list2List(this.listPOSFromDB(connectClusterId), WorkerConnector.class);
}
@Override
public List<WorkerConnector> listByKafkaClusterIdFromDB(Long kafkaClusterPhyId) {
LambdaQueryWrapper<WorkerConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(WorkerConnectorPO::getKafkaClusterPhyId, kafkaClusterPhyId);
return ConvertUtil.list2List(workerConnectorDAO.selectList(lambdaQueryWrapper), WorkerConnector.class);
}
@Override
public Result<Void> actionTask(TaskActionDTO dto) {
if (!dto.getAction().equals(RESTART.getValue())) {
return Result.buildFailure(ResultStatus.OPERATION_FORBIDDEN);
}
ConnectCluster connectCluster = LoadedConnectClusterCache.getByPhyId(dto.getConnectClusterId());
if (connectCluster == null) {
return Result.buildFailure(ResultStatus.NOT_EXIST);
}
String url = String.format(RESTART_TASK_URI, connectCluster.getClusterUrl(), dto.getConnectorName(), dto.getTaskId());
try {
restTool.postObjectWithJsonContent(url, null, String.class);
} catch (Exception e) {
LOGGER.error("method=actionTask||connectClusterId={}||connectorName={}||taskId={}||restart failed||msg=exception",
dto.getConnectClusterId(), dto.getConnectorName(), dto.getTaskId(), e);
}
return Result.buildSuc();
}
@Override
public List<WorkerConnector> getWorkerConnectorListFromCluster(ConnectCluster connectCluster, String connectorName) {
Map<String, ConnectWorker> workerMap = workerService.listFromDB(connectCluster.getId()).stream().collect(Collectors.toMap(elem -> elem.getWorkerId(), Function.identity()));
List<WorkerConnector> workerConnectorList = new ArrayList<>();
Result<KSConnectorStateInfo> ret = connectorService.getConnectorStateInfoFromCluster(connectCluster.getId(), connectorName);
if (!ret.hasData()) {
return workerConnectorList;
}
KSConnectorStateInfo ksConnectorStateInfo = ret.getData();
for (KSTaskState task : ksConnectorStateInfo.getTasks()) {
WorkerConnector workerConnector = new WorkerConnector(connectCluster.getKafkaClusterPhyId(), connectCluster.getId(), ksConnectorStateInfo.getName(), workerMap.get(task.getWorkerId()).getMemberId(), task.getId(), task.getState(), task.getWorkerId(), task.getTrace());
workerConnectorList.add(workerConnector);
}
return workerConnectorList;
}
private List<WorkerConnectorPO> listPOSFromDB(Long connectClusterId) {
LambdaQueryWrapper<WorkerConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(WorkerConnectorPO::getConnectClusterId, connectClusterId);
return workerConnectorDAO.selectList(lambdaQueryWrapper);
}
}

View File

@@ -0,0 +1,114 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.worker.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectWorker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectWorkerPO;
import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupMemberPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.connector.ClusterWorkerOverviewVO;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerService;
import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.ConnectWorkerDAO;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@Service
public class WorkerServiceImpl implements WorkerService {
@Autowired
private ConnectWorkerDAO connectWorkerDAO;
@Autowired
private ConnectorService connectorService;
@Autowired
private ConnectClusterService connectClusterService;
@Override
public void batchReplaceInDB(Long connectClusterId, List<ConnectWorker> workerList) {
Map<String, ConnectWorkerPO> oldMap = new HashMap<>();
for (ConnectWorkerPO oldPO: this.listPOSFromDB(connectClusterId)) {
oldMap.put(oldPO.getMemberId(), oldPO);
}
for (ConnectWorker worker: workerList) {
try {
ConnectWorkerPO newPO = ConvertUtil.obj2Obj(worker, ConnectWorkerPO.class);
ConnectWorkerPO oldPO = oldMap.remove(newPO.getMemberId());
if (oldPO == null) {
connectWorkerDAO.insert(newPO);
} else {
newPO.setId(oldPO.getId());
connectWorkerDAO.updateById(newPO);
}
} catch (DuplicateKeyException dke) {
// ignore
}
}
try {
oldMap.values().forEach(elem -> connectWorkerDAO.deleteById(elem.getId()));
} catch (Exception e) {
// ignore
}
}
@Override
public List<ConnectWorker> listFromDB(Long connectClusterId) {
return ConvertUtil.list2List(this.listPOSFromDB(connectClusterId), ConnectWorker.class);
}
@Override
public PaginationResult<ClusterWorkerOverviewVO> pageWorkByKafkaClusterPhy(Long kafkaClusterPhyId, PaginationBaseDTO dto) {
IPage<ConnectWorkerPO> pageInfo = new Page<>(dto.getPageNo(), dto.getPageSize());
LambdaQueryWrapper<ConnectWorkerPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectWorkerPO::getKafkaClusterPhyId, kafkaClusterPhyId);
lambdaQueryWrapper.like(!ValidateUtils.isBlank(dto.getSearchKeywords()), ConnectWorkerPO::getHost, dto.getSearchKeywords());
connectWorkerDAO.selectPage(pageInfo, lambdaQueryWrapper);
List<ConnectWorkerPO> connectWorkerPOS = pageInfo.getRecords();
List<ClusterWorkerOverviewVO> clusterWorkerOverviewVOS = new ArrayList<>();
for(ConnectWorkerPO connectWorkerPO : connectWorkerPOS){
Long connectClusterId = connectWorkerPO.getConnectClusterId();
ClusterWorkerOverviewVO clusterWorkerOverviewVO = new ClusterWorkerOverviewVO();
clusterWorkerOverviewVO.setConnectClusterId(connectClusterId);
clusterWorkerOverviewVO.setWorkerHost(connectWorkerPO.getHost());
clusterWorkerOverviewVO.setConnectorCount(connectorService.countByConnectClusterIdFromDB(connectClusterId));
clusterWorkerOverviewVO.setConnectClusterName(connectClusterService.getClusterName(connectClusterId));
clusterWorkerOverviewVO.setTaskCount(1);
clusterWorkerOverviewVOS.add(clusterWorkerOverviewVO);
}
return PaginationResult.buildSuc(clusterWorkerOverviewVOS, pageInfo);
}
@Override
public List<ConnectWorker> listByKafkaClusterIdFromDB(Long kafkaClusterPhyId) {
LambdaQueryWrapper<ConnectWorkerPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectWorkerPO::getKafkaClusterPhyId, kafkaClusterPhyId);
return ConvertUtil.list2List(connectWorkerDAO.selectList(lambdaQueryWrapper), ConnectWorker.class);
}
/**************************************************** private method ****************************************************/
private List<ConnectWorkerPO> listPOSFromDB(Long connectClusterId) {
LambdaQueryWrapper<ConnectWorkerPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectWorkerPO::getConnectClusterId, connectClusterId);
return connectWorkerDAO.selectList(lambdaQueryWrapper);
}
}

View File

@@ -1,14 +1,15 @@
package com.xiaojukeji.know.streaming.km.core.service.group;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.group.Group;
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafka.KSGroupDescription;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupMemberPO;
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupStateEnum;
import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import org.apache.kafka.clients.admin.ConsumerGroupDescription;
import org.apache.kafka.common.TopicPartition;
import java.util.Date;
@@ -19,16 +20,16 @@ public interface GroupService {
/**
* 从Kafka中获取消费组名称列表
*/
List<String> listGroupsFromKafka(Long clusterPhyId) throws NotExistException, AdminOperateException;
List<String> listGroupsFromKafka(ClusterPhy clusterPhy) throws AdminOperateException;
/**
* 从Kafka中获取消费组详细信息
*/
Group getGroupFromKafka(Long clusterPhyId, String groupName) throws NotExistException, AdminOperateException;
Group getGroupFromKafka(ClusterPhy clusterPhy, String groupName) throws NotExistException, AdminOperateException;
Map<TopicPartition, Long> getGroupOffsetFromKafka(Long clusterPhyId, String groupName) throws NotExistException, AdminOperateException;
ConsumerGroupDescription getGroupDescriptionFromKafka(Long clusterPhyId, String groupName) throws NotExistException, AdminOperateException;
KSGroupDescription getGroupDescriptionFromKafka(ClusterPhy clusterPhy, String groupName) throws AdminOperateException;
Result<Void> resetGroupOffsets(Long clusterPhyId, String groupName, Map<TopicPartition, Long> offsetMap, String operator) throws NotExistException, AdminOperateException;

View File

@@ -7,8 +7,10 @@ import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.didiglobal.logi.security.common.dto.oplog.OplogDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.group.Group;
import com.xiaojukeji.know.streaming.km.common.bean.entity.group.GroupTopicMember;
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafka.*;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
@@ -17,6 +19,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupPO;
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
import com.xiaojukeji.know.streaming.km.common.converter.GroupConverter;
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupStateEnum;
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupTypeEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.ModuleEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
@@ -24,9 +27,10 @@ import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.common.utils.kafka.KSPartialKafkaAdminClient;
import com.xiaojukeji.know.streaming.km.core.service.group.GroupService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.mysql.group.GroupDAO;
import com.xiaojukeji.know.streaming.km.persistence.mysql.group.GroupMemberDAO;
@@ -36,6 +40,7 @@ import org.apache.kafka.common.TopicPartition;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.time.Duration;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
@@ -43,7 +48,7 @@ import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.SERVICE_SEARCH_GROUP;
@Service
public class GroupServiceImpl extends BaseVersionControlService implements GroupService {
public class GroupServiceImpl extends BaseKafkaVersionControlService implements GroupService {
private static final ILog log = LogFactory.getLog(GroupServiceImpl.class);
@Autowired
@@ -64,11 +69,18 @@ public class GroupServiceImpl extends BaseVersionControlService implements Group
}
@Override
public List<String> listGroupsFromKafka(Long clusterPhyId) throws NotExistException, AdminOperateException {
AdminClient adminClient = kafkaAdminClient.getClient(clusterPhyId);
public List<String> listGroupsFromKafka(ClusterPhy clusterPhy) throws AdminOperateException {
KSPartialKafkaAdminClient adminClient = null;
try {
ListConsumerGroupsResult listConsumerGroupsResult = adminClient.listConsumerGroups(
Properties props = ConvertUtil.str2ObjByJson(clusterPhy.getClientProperties(), Properties.class);
if (props == null) {
props = new Properties();
}
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers());
adminClient = KSPartialKafkaAdminClient.create(props);
KSListGroupsResult listConsumerGroupsResult = adminClient.listConsumerGroups(
new ListConsumerGroupsOptions()
.timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS)
);
@@ -80,33 +92,46 @@ public class GroupServiceImpl extends BaseVersionControlService implements Group
return groupNameList;
} catch (Exception e) {
log.error("method=getGroupsFromKafka||clusterPhyId={}||errMsg=exception!", clusterPhyId, e);
log.error("method=listGroupsFromKafka||clusterPhyId={}||errMsg=exception!", clusterPhy.getId(), e);
throw new AdminOperateException(e.getMessage(), e, ResultStatus.KAFKA_OPERATE_FAILED);
} finally {
if (adminClient != null) {
try {
adminClient.close(Duration.ofSeconds(10));
} catch (Exception e) {
// ignore
}
}
}
}
@Override
public Group getGroupFromKafka(Long clusterPhyId, String groupName) throws NotExistException, AdminOperateException {
public Group getGroupFromKafka(ClusterPhy clusterPhy, String groupName) throws NotExistException, AdminOperateException {
// 获取消费组的详细信息
ConsumerGroupDescription groupDescription = this.getGroupDescriptionFromKafka(clusterPhyId, groupName);
KSGroupDescription groupDescription = this.getGroupDescriptionFromKafka(clusterPhy, groupName);
if (groupDescription == null) {
return null;
}
Group group = new Group(clusterPhyId, groupName, groupDescription);
Group group = new Group(clusterPhy.getId(), groupName, groupDescription);
// 获取消费组消费过哪些Topic
Map<String, GroupTopicMember> memberMap = new HashMap<>();
for (TopicPartition tp : this.getGroupOffsetFromKafka(clusterPhyId, groupName).keySet()) {
for (TopicPartition tp : this.getGroupOffsetFromKafka(clusterPhy.getId(), groupName).keySet()) {
memberMap.putIfAbsent(tp.topic(), new GroupTopicMember(tp.topic(), 0));
}
// 记录成员信息
for (MemberDescription memberDescription : groupDescription.members()) {
for (KSMemberDescription memberDescription : groupDescription.members()) {
if (group.getType() == GroupTypeEnum.CONNECT_CLUSTER) {
continue;
}
Set<TopicPartition> partitionList = new HashSet<>();
if (!ValidateUtils.isNull(memberDescription.assignment().topicPartitions())) {
partitionList = memberDescription.assignment().topicPartitions();
KSMemberConsumerAssignment assignment = (KSMemberConsumerAssignment) memberDescription.assignment();
if (!ValidateUtils.isNull(assignment.topicPartitions())) {
partitionList = assignment.topicPartitions();
}
Set<String> topicNameSet = partitionList.stream().map(elem -> elem.topic()).collect(Collectors.toSet());
@@ -143,20 +168,36 @@ public class GroupServiceImpl extends BaseVersionControlService implements Group
}
@Override
public ConsumerGroupDescription getGroupDescriptionFromKafka(Long clusterPhyId, String groupName) throws NotExistException, AdminOperateException {
AdminClient adminClient = kafkaAdminClient.getClient(clusterPhyId);
public KSGroupDescription getGroupDescriptionFromKafka(ClusterPhy clusterPhy, String groupName) throws AdminOperateException {
KSPartialKafkaAdminClient adminClient = null;
try {
DescribeConsumerGroupsResult describeConsumerGroupsResult = adminClient.describeConsumerGroups(
Properties props = ConvertUtil.str2ObjByJson(clusterPhy.getClientProperties(), Properties.class);
if (props == null) {
props = new Properties();
}
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers());
adminClient = KSPartialKafkaAdminClient.create(props);
KSDescribeGroupsResult describeGroupsResult = adminClient.describeConsumerGroups(
Arrays.asList(groupName),
new DescribeConsumerGroupsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS).includeAuthorizedOperations(false)
);
return describeConsumerGroupsResult.all().get().get(groupName);
return describeGroupsResult.all().get().get(groupName);
} catch(Exception e){
log.error("method=getGroupDescription||clusterPhyId={}|groupName={}||errMsg=exception!", clusterPhyId, groupName, e);
log.error("method=getGroupDescription||clusterPhyId={}|groupName={}||errMsg=exception!", clusterPhy.getId(), groupName, e);
throw new AdminOperateException(e.getMessage(), e, ResultStatus.KAFKA_OPERATE_FAILED);
} finally {
if (adminClient != null) {
try {
adminClient.close(Duration.ofSeconds(10));
} catch (Exception e) {
// ignore
}
}
}
}

View File

@@ -24,7 +24,7 @@ public abstract class AbstractHealthCheckService {
Function<Tuple<ClusterParam, BaseClusterHealthConfig>, HealthCheckResult>
> functionMap = new ConcurrentHashMap<>();
public abstract List<ClusterParam> getResList(Long clusterPhyId);
public abstract List<ClusterParam> getResList(Long clusterId);
public abstract HealthCheckDimensionEnum getHealthCheckDimensionEnum();

View File

@@ -0,0 +1,95 @@
package com.xiaojukeji.know.streaming.km.core.service.health.checker.connect;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.BaseClusterHealthConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.HealthCompareValueConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectClusterMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ConnectClusterParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckNameEnum;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterMetricService;
import com.xiaojukeji.know.streaming.km.core.service.health.checker.AbstractHealthCheckService;
import com.xiaojukeji.know.streaming.km.persistence.connect.cache.LoadedConnectClusterCache;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import javax.annotation.PostConstruct;
import java.util.ArrayList;
import java.util.List;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect.ConnectClusterMetricVersionItems.CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_FAILURE_PERCENTAGE;
/**
* @author wyb
* @date 2022/11/9
*/
@Service
public class HealthCheckConnectClusterService extends AbstractHealthCheckService {
private static final ILog log = LogFactory.getLog(HealthCheckConnectClusterService.class);
@Autowired
private ConnectClusterMetricService connectClusterMetricService;
@PostConstruct
private void init() {
functionMap.putIfAbsent(HealthCheckNameEnum.CONNECT_CLUSTER_TASK_STARTUP_FAILURE_PERCENTAGE.getConfigName(), this::checkStartupFailurePercentage);
}
@Override
public List<ClusterParam> getResList(Long connectClusterId) {
List<ClusterParam> paramList = new ArrayList<>();
if (LoadedConnectClusterCache.containsByPhyId(connectClusterId)) {
paramList.add(new ConnectClusterParam(connectClusterId));
}
return paramList;
}
@Override
public HealthCheckDimensionEnum getHealthCheckDimensionEnum() {
return HealthCheckDimensionEnum.CONNECT_CLUSTER;
}
private HealthCheckResult checkStartupFailurePercentage(Tuple<ClusterParam, BaseClusterHealthConfig> paramTuple) {
ConnectClusterParam param = (ConnectClusterParam) paramTuple.getV1();
HealthCompareValueConfig compareConfig = (HealthCompareValueConfig) paramTuple.getV2();
Long connectClusterId = param.getConnectClusterId();
String metricName = CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_FAILURE_PERCENTAGE;
Result<ConnectClusterMetrics> ret = connectClusterMetricService.collectConnectClusterMetricsFromKafka(connectClusterId, metricName);
if (!ret.hasData()) {
log.error("method=checkStartupFailurePercentage||connectClusterId={}||metricName={}||errMsg=get metrics failed",
param.getConnectClusterId(), metricName);
return null;
}
Float value = ret.getData().getMetric(metricName);
if (value == null) {
log.error("method=checkStartupFailurePercentage||connectClusterId={}||metricName={}||errMsg=get metrics failed",
param.getConnectClusterId(), metricName);
return null;
}
ConnectCluster connectCluster = LoadedConnectClusterCache.getByPhyId(connectClusterId);
HealthCheckResult checkResult = new HealthCheckResult(
HealthCheckDimensionEnum.CONNECT_CLUSTER.getDimension(),
HealthCheckNameEnum.CONNECT_CLUSTER_TASK_STARTUP_FAILURE_PERCENTAGE.getConfigName(),
connectCluster.getKafkaClusterPhyId(),
String.valueOf(connectClusterId)
);
checkResult.setPassed(value <= compareConfig.getValue() ? Constant.YES : Constant.NO);
return checkResult;
}
}

View File

@@ -0,0 +1,122 @@
package com.xiaojukeji.know.streaming.km.core.service.health.checker.connect;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.BaseClusterHealthConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.HealthCompareValueConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectorMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.connect.ConnectorParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckNameEnum;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorMetricService;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.health.checker.AbstractHealthCheckService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import javax.annotation.PostConstruct;
import java.util.ArrayList;
import java.util.List;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect.ConnectorMetricVersionItems.CONNECTOR_METRIC_CONNECTOR_FAILED_TASK_COUNT;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect.ConnectorMetricVersionItems.CONNECTOR_METRIC_CONNECTOR_UNASSIGNED_TASK_COUNT;
/**
* @author wyb
* @date 2022/11/8
*/
@Service
public class HealthCheckConnectorService extends AbstractHealthCheckService {
private static final ILog log = LogFactory.getLog(HealthCheckConnectorService.class);
@Autowired
private ConnectorService connectorService;
@Autowired
private ConnectorMetricService connectorMetricService;
@PostConstruct
private void init() {
functionMap.putIfAbsent(HealthCheckNameEnum.CONNECTOR_FAILED_TASK_COUNT.getConfigName(), this::checkFailedTaskCount);
functionMap.putIfAbsent(HealthCheckNameEnum.CONNECTOR_UNASSIGNED_TASK_COUNT.getConfigName(), this::checkUnassignedTaskCount);
}
@Override
public List<ClusterParam> getResList(Long connectClusterId) {
List<ClusterParam> paramList = new ArrayList<>();
Result<List<String>> ret = connectorService.listConnectorsFromCluster(connectClusterId);
if (!ret.hasData()) {
return paramList;
}
for (String connectorName : ret.getData()) {
paramList.add(new ConnectorParam(connectClusterId, connectorName));
}
return paramList;
}
@Override
public HealthCheckDimensionEnum getHealthCheckDimensionEnum() {
return HealthCheckDimensionEnum.CONNECTOR;
}
private HealthCheckResult checkFailedTaskCount(Tuple<ClusterParam, BaseClusterHealthConfig> paramTuple) {
ConnectorParam param = (ConnectorParam) paramTuple.getV1();
HealthCompareValueConfig compareConfig = (HealthCompareValueConfig) paramTuple.getV2();
Long connectClusterId = param.getConnectClusterId();
String connectorName = param.getConnectorName();
Double compareValue = compareConfig.getValue();
return this.getHealthCompareResult(connectClusterId, connectorName, CONNECTOR_METRIC_CONNECTOR_FAILED_TASK_COUNT, HealthCheckNameEnum.CONNECTOR_FAILED_TASK_COUNT, compareValue);
}
private HealthCheckResult checkUnassignedTaskCount(Tuple<ClusterParam, BaseClusterHealthConfig> paramTuple) {
ConnectorParam param = (ConnectorParam) paramTuple.getV1();
HealthCompareValueConfig compareConfig = (HealthCompareValueConfig) paramTuple.getV2();
Long connectClusterId = param.getConnectClusterId();
String connectorName = param.getConnectorName();
Double compareValue = compareConfig.getValue();
return this.getHealthCompareResult(connectClusterId, connectorName, CONNECTOR_METRIC_CONNECTOR_UNASSIGNED_TASK_COUNT, HealthCheckNameEnum.CONNECTOR_UNASSIGNED_TASK_COUNT, compareValue);
}
private HealthCheckResult getHealthCompareResult(Long connectClusterId, String connectorName, String metricName, HealthCheckNameEnum healthCheckNameEnum, Double compareValue) {
Result<ConnectorMetrics> ret = connectorMetricService.collectConnectClusterMetricsFromKafka(connectClusterId, connectorName, metricName);
if (!ret.hasData()) {
log.error("method=getHealthCompareResult||connectClusterId={}||connectorName={}||metricName={}||errMsg=get metrics failed",
connectClusterId, connectorName, metricName);
return null;
}
Float value = ret.getData().getMetric(metricName);
if (value == null) {
log.error("method=getHealthCompareResult||connectClusterId={}||connectorName={}||metricName={}||errMsg=get metrics failed",
connectClusterId, connectorName, metricName);
return null;
}
HealthCheckResult checkResult = new HealthCheckResult(
HealthCheckDimensionEnum.CONNECTOR.getDimension(),
healthCheckNameEnum.getConfigName(),
connectClusterId,
connectorName
);
checkResult.setPassed(compareValue >= value ? Constant.YES : Constant.NO);
return checkResult;
}
}

View File

@@ -9,6 +9,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.Cluster
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.GroupParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchTerm;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupStateEnum;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckNameEnum;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum;
@@ -78,7 +79,7 @@ public class HealthCheckGroupService extends AbstractHealthCheckService {
return null;
}
checkResult.setPassed(countResult.getData() >= singleConfig.getDetectedTimes()? 0: 1);
checkResult.setPassed(countResult.getData() >= singleConfig.getDetectedTimes() ? Constant.NO : Constant.YES);
return checkResult;
}

View File

@@ -109,7 +109,7 @@ public class HealthCheckTopicService extends AbstractHealthCheckService {
param.getTopicName()
);
checkResult.setPassed(partitionList.stream().filter(elem -> elem.getLeaderBrokerId().equals(Constant.INVALID_CODE)).count() >= valueConfig.getValue()? 0: 1);
checkResult.setPassed(partitionList.stream().filter(elem -> elem.getLeaderBrokerId().equals(Constant.INVALID_CODE)).count() >= valueConfig.getValue() ? Constant.NO : Constant.YES);
return checkResult;
}

View File

@@ -24,4 +24,6 @@ public interface HealthCheckResultService {
Map<String, BaseClusterHealthConfig> getClusterHealthConfig(Long clusterPhyId);
void batchReplace(Long clusterPhyId, Integer dimension, List<HealthCheckResult> healthCheckResults);
List<HealthCheckResultPO> getConnectorHealthCheckResult(Long clusterPhyId);
}

View File

@@ -7,20 +7,26 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.Ba
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckAggResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult;
import com.xiaojukeji.know.streaming.km.common.bean.po.config.PlatformClusterConfigPO;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectClusterPO;
import com.xiaojukeji.know.streaming.km.common.bean.po.health.HealthCheckResultPO;
import com.xiaojukeji.know.streaming.km.common.enums.config.ConfigGroupEnum;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckNameEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.core.cache.DataBaseDataLocalCache;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.config.PlatformClusterConfigService;
import com.xiaojukeji.know.streaming.km.core.service.health.checkresult.HealthCheckResultService;
import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.ConnectClusterDAO;
import com.xiaojukeji.know.streaming.km.persistence.mysql.health.HealthCheckResultDAO;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import java.util.*;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum.CONNECTOR;
@Service
public class HealthCheckResultServiceImpl implements HealthCheckResultService {
@@ -29,6 +35,9 @@ public class HealthCheckResultServiceImpl implements HealthCheckResultService {
@Autowired
private HealthCheckResultDAO healthCheckResultDAO;
@Autowired
private ConnectClusterDAO connectClusterDAO;
@Autowired
private PlatformClusterConfigService platformClusterConfigService;
@@ -122,6 +131,25 @@ public class HealthCheckResultServiceImpl implements HealthCheckResultService {
return configMap;
}
@Override
public List<HealthCheckResultPO> getConnectorHealthCheckResult(Long clusterPhyId) {
List<HealthCheckResultPO> resultPOList = new ArrayList<>();
//查找connect集群
LambdaQueryWrapper<ConnectClusterPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectClusterPO::getKafkaClusterPhyId, clusterPhyId);
List<Long> connectClusterIdList = connectClusterDAO.selectList(lambdaQueryWrapper).stream().map(elem -> elem.getId()).collect(Collectors.toList());
if (ValidateUtils.isEmptyList(connectClusterIdList)) {
return resultPOList;
}
LambdaQueryWrapper<HealthCheckResultPO> wrapper = new LambdaQueryWrapper<>();
wrapper.eq(HealthCheckResultPO::getDimension, CONNECTOR.getDimension());
wrapper.in(HealthCheckResultPO::getClusterPhyId, connectClusterIdList);
resultPOList.addAll(healthCheckResultDAO.selectList(wrapper));
return resultPOList;
}
@Override
public void batchReplace(Long clusterPhyId, Integer dimension, List<HealthCheckResult> healthCheckResults) {
List<HealthCheckResultPO> inDBList = this.listCheckResult(clusterPhyId, dimension);

View File

@@ -2,6 +2,7 @@ package com.xiaojukeji.know.streaming.km.core.service.health.state;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthScoreResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.*;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectorMetrics;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum;
import java.util.List;
@@ -16,11 +17,13 @@ public interface HealthStateService {
TopicMetrics calTopicHealthMetrics(Long clusterPhyId, String topicName);
GroupMetrics calGroupHealthMetrics(Long clusterPhyId, String groupName);
ZookeeperMetrics calZookeeperHealthMetrics(Long clusterPhyId);
ConnectorMetrics calConnectorHealthMetrics(Long connectClusterId, String connectorName);
/**
* 获取集群健康检查结果
*/
List<HealthScoreResult> getClusterHealthResult(Long clusterPhyId);
List<HealthScoreResult> getDimensionHealthResult(Long clusterPhyId, HealthCheckDimensionEnum dimensionEnum);
List<HealthScoreResult> getResHealthResult(Long clusterPhyId, Integer dimension, String resNme);
List<HealthScoreResult> getDimensionHealthResult(Long clusterPhyId, List<Integer> dimensionCodeList);
List<HealthScoreResult> getResHealthResult(Long clusterPhyId, Long clusterId, Integer dimension, String resNme);
}

View File

@@ -2,24 +2,31 @@ package com.xiaojukeji.know.streaming.km.core.service.health.state.impl;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.BaseClusterHealthConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckAggResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthScoreResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.*;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectorMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.po.health.HealthCheckResultPO;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckNameEnum;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthStateEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.health.checkresult.HealthCheckResultService;
import com.xiaojukeji.know.streaming.km.core.service.health.state.HealthStateService;
import com.xiaojukeji.know.streaming.km.core.service.zookeeper.ZookeeperService;
import com.xiaojukeji.know.streaming.km.persistence.connect.cache.LoadedConnectClusterCache;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.*;
import java.util.stream.Collectors;
import java.util.List;
import static com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect.ConnectorMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.BrokerMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.ClusterMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.GroupMetricVersionItems.*;
@@ -38,6 +45,9 @@ public class HealthStateServiceImpl implements HealthStateService {
@Autowired
private BrokerService brokerService;
@Autowired
private ConnectClusterService connectClusterService;
@Override
public ClusterMetrics calClusterHealthMetrics(Long clusterPhyId) {
ClusterMetrics metrics = new ClusterMetrics(clusterPhyId);
@@ -59,6 +69,7 @@ public class HealthStateServiceImpl implements HealthStateService {
metrics.putMetric(this.calClusterTopicsHealthMetrics(clusterPhyId).getMetrics());
metrics.putMetric(this.calClusterGroupsHealthMetrics(clusterPhyId).getMetrics());
metrics.putMetric(this.calZookeeperHealthMetrics(clusterPhyId).getMetrics());
metrics.putMetric(this.calClusterConnectsHealthMetrics(clusterPhyId).getMetrics());
// 统计最终结果
Float passed = 0.0f;
@@ -67,6 +78,7 @@ public class HealthStateServiceImpl implements HealthStateService {
passed += metrics.getMetric(CLUSTER_METRIC_HEALTH_CHECK_PASSED_BROKERS);
passed += metrics.getMetric(CLUSTER_METRIC_HEALTH_CHECK_PASSED_GROUPS);
passed += metrics.getMetric(CLUSTER_METRIC_HEALTH_CHECK_PASSED_CLUSTER);
passed += metrics.getMetric(CLUSTER_METRIC_HEALTH_CHECK_PASSED_CONNECTOR);
Float total = 0.0f;
total += metrics.getMetric(ZOOKEEPER_METRIC_HEALTH_CHECK_TOTAL);
@@ -74,6 +86,7 @@ public class HealthStateServiceImpl implements HealthStateService {
total += metrics.getMetric(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_BROKERS);
total += metrics.getMetric(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_GROUPS);
total += metrics.getMetric(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_CLUSTER);
total += metrics.getMetric(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_CONNECTOR);
// 状态
Float state = 0.0f;
@@ -82,6 +95,7 @@ public class HealthStateServiceImpl implements HealthStateService {
state = Math.max(state, metrics.getMetric(CLUSTER_METRIC_HEALTH_STATE_BROKERS));
state = Math.max(state, metrics.getMetric(CLUSTER_METRIC_HEALTH_STATE_GROUPS));
state = Math.max(state, metrics.getMetric(CLUSTER_METRIC_HEALTH_STATE_CLUSTER));
state = Math.max(state, metrics.getMetric(CLUSTER_METRIC_HEALTH_STATE_CONNECTOR));
metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_PASSED, passed);
metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_TOTAL, total);
@@ -184,6 +198,31 @@ public class HealthStateServiceImpl implements HealthStateService {
return metrics;
}
@Override
public ConnectorMetrics calConnectorHealthMetrics(Long connectClusterId, String connectorName) {
ConnectCluster connectCluster = LoadedConnectClusterCache.getByPhyId(connectClusterId);
ConnectorMetrics metrics = new ConnectorMetrics(connectClusterId, connectorName);
// 找不到connect集群
if (connectCluster == null) {
metrics.putMetric(CONNECTOR_METRIC_HEALTH_STATE, (float) HealthStateEnum.DEAD.getDimension());
return metrics;
}
List<HealthCheckAggResult> resultList = healthCheckResultService.getHealthCheckAggResult(connectClusterId, HealthCheckDimensionEnum.CONNECTOR, connectorName);
if (ValidateUtils.isEmptyList(resultList)) {
metrics.getMetrics().put(CONNECTOR_METRIC_HEALTH_CHECK_PASSED, 0.0f);
metrics.getMetrics().put(CONNECTOR_METRIC_HEALTH_CHECK_TOTAL, 0.0f);
} else {
metrics.getMetrics().put(CONNECTOR_METRIC_HEALTH_CHECK_PASSED, this.getHealthCheckPassed(resultList));
metrics.getMetrics().put(CONNECTOR_METRIC_HEALTH_CHECK_TOTAL, (float) resultList.size());
}
metrics.putMetric(CONNECTOR_METRIC_HEALTH_STATE, (float) this.calHealthState(resultList).getDimension());
return metrics;
}
@Override
public List<HealthScoreResult> getClusterHealthResult(Long clusterPhyId) {
List<HealthCheckResultPO> poList = healthCheckResultService.listCheckResult(clusterPhyId);
@@ -199,8 +238,36 @@ public class HealthStateServiceImpl implements HealthStateService {
}
@Override
public List<HealthScoreResult> getResHealthResult(Long clusterPhyId, Integer dimension, String resNme) {
List<HealthCheckResultPO> poList = healthCheckResultService.listCheckResult(clusterPhyId, dimension, resNme);
public List<HealthScoreResult> getDimensionHealthResult(Long clusterPhyId, List<Integer> dimensionCodeList) {
//查找健康巡查结果
List<HealthCheckResultPO> poList = new ArrayList<>();
for (Integer dimensionCode : dimensionCodeList) {
HealthCheckDimensionEnum dimensionEnum = HealthCheckDimensionEnum.getByCode(dimensionCode);
if (dimensionEnum.equals(HealthCheckDimensionEnum.UNKNOWN)) {
continue;
}
if (dimensionEnum.equals(HealthCheckDimensionEnum.CONNECTOR)) {
poList.addAll(healthCheckResultService.getConnectorHealthCheckResult(clusterPhyId));
} else {
poList.addAll(healthCheckResultService.listCheckResult(clusterPhyId, dimensionEnum.getDimension()));
}
}
List<HealthScoreResult> resultList = this.getResHealthResult(clusterPhyId, dimensionCodeList, poList);
return resultList;
}
@Override
public List<HealthScoreResult> getResHealthResult(Long clusterPhyId, Long clusterId, Integer dimension, String resNme) {
List<HealthCheckResultPO> poList = healthCheckResultService.listCheckResult(clusterId, dimension, resNme);
Map<String, List<HealthCheckResultPO>> checkResultMap = new HashMap<>();
for (HealthCheckResultPO po: poList) {
checkResultMap.putIfAbsent(po.getConfigName(), new ArrayList<>());
checkResultMap.get(po.getConfigName()).add(po);
}
return this.convert2HealthScoreResultList(clusterPhyId, poList, dimension);
}
@@ -272,6 +339,36 @@ public class HealthStateServiceImpl implements HealthStateService {
return metrics;
}
private ClusterMetrics calClusterConnectsHealthMetrics(Long clusterPhyId) {
//获取健康巡检结果
List<HealthCheckResultPO> connectHealthCheckResult = healthCheckResultService.getConnectorHealthCheckResult(clusterPhyId);
connectHealthCheckResult.addAll(healthCheckResultService.listCheckResult(clusterPhyId, CONNECT_CLUSTER.getDimension()));
List<Integer> dimensionCodeList = Arrays.asList(CONNECTOR.getDimension(), CONNECT_CLUSTER.getDimension());
List<HealthCheckAggResult> resultList = this.getDimensionHealthCheckAggResult(connectHealthCheckResult, dimensionCodeList);
ClusterMetrics metrics = new ClusterMetrics(clusterPhyId);
if (ValidateUtils.isEmptyList(resultList)) {
metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_PASSED_CONNECTOR, 0.0f);
metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_CONNECTOR, 0.0f);
} else {
metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_PASSED_CONNECTOR, this.getHealthCheckPassed(resultList));
metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_CONNECTOR, (float) resultList.size());
}
// 先根据connect集群状态判断
if (connectClusterService.existConnectClusterDown(clusterPhyId)) {
metrics.putMetric(CLUSTER_METRIC_HEALTH_STATE_CONNECTOR, (float) HealthStateEnum.POOR.getDimension());
return metrics;
}
metrics.putMetric(CLUSTER_METRIC_HEALTH_STATE_CONNECTOR, (float) this.calHealthState(resultList).getDimension());
return metrics;
}
/**************************************************** 聚合数据 ****************************************************/
@@ -305,6 +402,61 @@ public class HealthStateServiceImpl implements HealthStateService {
/**************************************************** 计算指标 ****************************************************/
private List<HealthCheckAggResult> getDimensionHealthCheckAggResult(List<HealthCheckResultPO> poList, List<Integer> dimensionCodeList) {
Map<String /*检查名*/, List<HealthCheckResultPO> /*检查结果列表*/> checkResultMap = new HashMap<>();
for (HealthCheckResultPO po : poList) {
checkResultMap.putIfAbsent(po.getConfigName(), new ArrayList<>());
checkResultMap.get(po.getConfigName()).add(po);
}
List<HealthCheckAggResult> stateList = new ArrayList<>();
for (Integer dimensionCode : dimensionCodeList) {
HealthCheckDimensionEnum dimensionEnum = HealthCheckDimensionEnum.getByCode(dimensionCode);
if (dimensionEnum.equals(UNKNOWN)) {
continue;
}
for (HealthCheckNameEnum nameEnum : HealthCheckNameEnum.getByDimension(dimensionEnum)) {
stateList.add(new HealthCheckAggResult(nameEnum, checkResultMap.getOrDefault(nameEnum.getConfigName(), new ArrayList<>())));
}
}
return stateList;
}
private List<HealthScoreResult> getResHealthResult(Long clusterPhyId, List<Integer> dimensionCodeList, List<HealthCheckResultPO> poList) {
Map<String /*检查名*/, List<HealthCheckResultPO> /*检查结果列表*/> checkResultMap = new HashMap<>();
for (HealthCheckResultPO po : poList) {
checkResultMap.putIfAbsent(po.getConfigName(), new ArrayList<>());
checkResultMap.get(po.getConfigName()).add(po);
}
Map<String, BaseClusterHealthConfig> configMap = healthCheckResultService.getClusterHealthConfig(clusterPhyId);
List<HealthScoreResult> healthScoreResultList = new ArrayList<>();
for (Integer dimensionCode : dimensionCodeList) {
HealthCheckDimensionEnum dimensionEnum = HealthCheckDimensionEnum.getByCode(dimensionCode);
//该维度不存在,则跳过
if (dimensionEnum.equals(HealthCheckDimensionEnum.UNKNOWN)){
continue;
}
for (HealthCheckNameEnum nameEnum : HealthCheckNameEnum.getByDimension(dimensionEnum)) {
BaseClusterHealthConfig baseConfig = configMap.get(nameEnum.getConfigName());
if (baseConfig == null) {
continue;
}
healthScoreResultList.add(new HealthScoreResult(nameEnum, baseConfig, checkResultMap.getOrDefault(nameEnum.getConfigName(), new ArrayList<>())));
}
}
return healthScoreResultList;
}
private float getHealthCheckPassed(List<HealthCheckAggResult> aggResultList){
if(ValidateUtils.isEmptyList(aggResultList)) {
return 0f;

View File

@@ -27,6 +27,7 @@ import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.kafkauser.KafkaUserService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
@@ -54,7 +55,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.
@Service
public class KafkaUserServiceImpl extends BaseVersionControlService implements KafkaUserService {
public class KafkaUserServiceImpl extends BaseKafkaVersionControlService implements KafkaUserService {
private static final ILog log = LogFactory.getLog(KafkaUserServiceImpl.class);
private static final String KAFKA_USER_REPLACE = "replaceKafkaUser";

View File

@@ -10,7 +10,7 @@ import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.core.service.partition.OpPartitionService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
import kafka.zk.KafkaZkClient;
@@ -36,7 +36,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemT
* @author didi
*/
@Service
public class OpPartitionServiceImpl extends BaseVersionControlService implements OpPartitionService {
public class OpPartitionServiceImpl extends BaseKafkaVersionControlService implements OpPartitionService {
private static final ILog LOGGER = LogFactory.getLog(OpPartitionServiceImpl.class);
@Autowired

View File

@@ -3,7 +3,6 @@ package com.xiaojukeji.know.streaming.km.core.service.partition.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.PartitionMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.offset.KSOffsetSpec;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.metric.TopicMetricParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.partition.Partition;

View File

@@ -26,7 +26,7 @@ import com.xiaojukeji.know.streaming.km.core.cache.DataBaseDataLocalCache;
import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.brokers.PartitionMap;
import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.brokers.PartitionState;
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaConsumerClient;
import com.xiaojukeji.know.streaming.km.persistence.mysql.partition.PartitionDAO;
@@ -57,7 +57,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemT
* @author didi
*/
@Service
public class PartitionServiceImpl extends BaseVersionControlService implements PartitionService {
public class PartitionServiceImpl extends BaseKafkaVersionControlService implements PartitionService {
private static final ILog log = LogFactory.getLog(PartitionServiceImpl.class);
private static final String PARTITION_OFFSET_GET = "getPartitionOffset";

View File

@@ -19,7 +19,7 @@ import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.reassign.ReassignService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
import kafka.admin.ReassignPartitionsCommand;
@@ -42,7 +42,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.SERVICE_OP_REASSIGNMENT;
@Service
public class ReassignServiceImpl extends BaseVersionControlService implements ReassignService {
public class ReassignServiceImpl extends BaseKafkaVersionControlService implements ReassignService {
private static final ILog log = LogFactory.getLog(ReassignServiceImpl.class);
private static final String EXECUTE_TASK = "executeTask";

View File

@@ -20,7 +20,7 @@ import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistExcept
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.topic.OpTopicService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO;
@@ -48,7 +48,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemT
* @author didi
*/
@Service
public class OpTopicServiceImpl extends BaseVersionControlService implements OpTopicService {
public class OpTopicServiceImpl extends BaseKafkaVersionControlService implements OpTopicService {
private static final ILog log = LogFactory.getLog(TopicConfigServiceImpl.class);
private static final String TOPIC_CREATE = "createTopic";

View File

@@ -26,7 +26,7 @@ import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicConfigService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO;
@@ -46,7 +46,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.
@Service
public class TopicConfigServiceImpl extends BaseVersionControlService implements TopicConfigService {
public class TopicConfigServiceImpl extends BaseKafkaVersionControlService implements TopicConfigService {
private static final ILog log = LogFactory.getLog(TopicConfigServiceImpl.class);
private static final String GET_TOPIC_CONFIG = "getTopicConfig";

View File

@@ -0,0 +1,74 @@
package com.xiaojukeji.know.streaming.km.core.service.version;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchQuery;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricLineVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import org.springframework.util.CollectionUtils;
import javax.annotation.PostConstruct;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* @author wyb
* @date 2022/11/9
*/
public abstract class BaseConnectorMetricService extends BaseConnectorVersionControlService{
private static final ILog LOGGER = LogFactory.getLog(BaseMetricService.class);
private List<String> metricNames = new ArrayList<>();
private List<String> metricFields = new ArrayList<>();
@PostConstruct
public void init(){
initMetricFieldAndNameList();
initRegisterVCHandler();
}
protected void initMetricFieldAndNameList(){
metricNames = listVersionControlItems().stream().map(v -> v.getName()).collect(Collectors.toList());
metricFields = listMetricPOFields();
}
protected abstract List<String> listMetricPOFields();
protected abstract void initRegisterVCHandler();
/**
* 检查 str 是不是一个 metricName
* @param str
*/
protected boolean isMetricName(String str){
return metricNames.contains(str);
}
/**
* 检查 str 是不是一个 fieldName
* @param str
*/
protected boolean isMetricField(String str){
return metricFields.contains(str);
}
protected void setQueryMetricFlag(SearchQuery query){
if(null == query){return;}
String fieldName = query.getQueryName();
query.setMetric(isMetricName(fieldName));
query.setField(isMetricField(fieldName));
}
protected <T extends SearchQuery> void setQueryMetricFlag(List<T> matches){
if(CollectionUtils.isEmpty(matches)){return;}
for (SearchQuery match : matches){
setQueryMetricFlag(match);
}
}
}

View File

@@ -0,0 +1,55 @@
package com.xiaojukeji.know.streaming.km.core.service.version;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionConnectJmxInfo;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import org.springframework.beans.factory.annotation.Autowired;
import javax.annotation.Nullable;
/**
* @author wyb
* @date 2022/11/8
*/
public abstract class BaseConnectorVersionControlService extends BaseVersionControlService {
@Autowired
ConnectClusterService connectClusterService;
@Nullable
protected Object doVCHandler(Long connectClusterId, String action, VersionItemParam param) throws VCHandlerNotExistException {
String versionStr = connectClusterService.getClusterVersion(connectClusterId);
LOGGER.debug(
"method=doVCHandler||connectClusterId={}||action={}||type={}||param={}",
connectClusterId, action, getVersionItemType().getMessage(), ConvertUtil.obj2Json(param)
);
Tuple<Object, String> ret = doVCHandler(versionStr, action, param);
LOGGER.debug(
"method=doVCHandler||clusterId={}||action={}||methodName={}||type={}||param={}||ret={}!",
connectClusterId, action, ret != null ?ret.getV2(): "", getVersionItemType().getMessage(), ConvertUtil.obj2Json(param), ConvertUtil.obj2Json(ret)
);
return ret == null? null: ret.getV1();
}
@Nullable
protected String getMethodName(Long connectClusterId, String action) {
String versionStr = connectClusterService.getClusterVersion(connectClusterId);
return getMethodName(versionStr, action);
}
@Nullable
protected VersionConnectJmxInfo getJMXInfo(Long connectClusterId, String action) {
String versionStr = connectClusterService.getClusterVersion(connectClusterId);
return (VersionConnectJmxInfo) getJMXInfo(versionStr, action);
}
}

View File

@@ -0,0 +1,52 @@
package com.xiaojukeji.know.streaming.km.core.service.version;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionJmxInfo;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import org.springframework.beans.factory.annotation.Autowired;
import javax.annotation.Nullable;
/**
* @author didi
*/
public abstract class BaseKafkaVersionControlService extends BaseVersionControlService{
@Autowired
private ClusterPhyService clusterPhyService;
@Nullable
protected Object doVCHandler(Long clusterPhyId, String action, VersionItemParam param) throws VCHandlerNotExistException {
String versionStr = clusterPhyService.getVersionFromCacheFirst(clusterPhyId);
LOGGER.info(
"method=doVCHandler||clusterId={}||action={}||type={}||param={}",
clusterPhyId, action, getVersionItemType().getMessage(), ConvertUtil.obj2Json(param)
);
Tuple<Object, String> ret = doVCHandler(versionStr, action, param);
LOGGER.debug(
"method=doVCHandler||clusterId={}||action={}||methodName={}||type={}||param={}||ret={}!",
clusterPhyId, action, ret != null ?ret.getV2(): "", getVersionItemType().getMessage(), ConvertUtil.obj2Json(param), ConvertUtil.obj2Json(ret)
);
return ret == null? null: ret.getV1();
}
@Nullable
protected String getMethodName(Long clusterPhyId, String action) {
String versionStr = clusterPhyService.getVersionFromCacheFirst(clusterPhyId);
return getMethodName(versionStr, action);
}
@Nullable
protected VersionJmxInfo getJMXInfo(Long clusterPhyId, String action){
String versionStr = clusterPhyService.getVersionFromCacheFirst(clusterPhyId);
return getJMXInfo(versionStr, action);
}
}

View File

@@ -17,7 +17,7 @@ import java.util.stream.Collectors;
/**
* @author didi
*/
public abstract class BaseMetricService extends BaseVersionControlService {
public abstract class BaseMetricService extends BaseKafkaVersionControlService {
private static final ILog LOGGER = LogFactory.getLog(BaseMetricService.class);
private List<String> metricNames = new ArrayList<>();

View File

@@ -1,6 +1,5 @@
package com.xiaojukeji.know.streaming.km.core.service.version;
import com.alibaba.fastjson.JSON;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
@@ -10,6 +9,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMethod
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.DependsOn;
import org.springframework.util.CollectionUtils;
@@ -56,20 +56,14 @@ public abstract class BaseVersionControlService {
}
@Nullable
protected Object doVCHandler(Long clusterPhyId, String action, VersionItemParam param) throws VCHandlerNotExistException {
String methodName = getMethodName(clusterPhyId, action);
Object ret = versionControlService.doHandler(getVersionItemType(), methodName, param);
protected Tuple<Object, String> doVCHandler(String version, String action, VersionItemParam param) throws VCHandlerNotExistException {
String methodName = getMethodName(version, action);
LOGGER.debug(
"method=doVCHandler||clusterId={}||action={}||methodName={}||type={}param={}||ret={}!",
clusterPhyId, action, methodName, getVersionItemType().getMessage(), JSON.toJSONString(param), JSON.toJSONString(ret)
);
return ret;
return new Tuple<>(versionControlService.doHandler(getVersionItemType(), methodName, param), methodName);
}
protected String getMethodName(Long clusterId, String action) {
VersionControlItem item = versionControlService.getVersionControlItem(clusterId, getVersionItemType().getCode(), action);
protected String getMethodName(String version, String action) {
VersionControlItem item = versionControlService.getVersionControlItem(version, getVersionItemType().getCode(), action);
if (null == item) {
return "";
}
@@ -81,8 +75,8 @@ public abstract class BaseVersionControlService {
return "";
}
protected VersionJmxInfo getJMXInfo(Long clusterId, String action){
VersionControlItem item = versionControlService.getVersionControlItem(clusterId, getVersionItemType().getCode(), action);
protected VersionJmxInfo getJMXInfo(String version, String action){
VersionControlItem item = versionControlService.getVersionControlItem(version, getVersionItemType().getCode(), action);
if (null == item) {
return null;
}

View File

@@ -6,7 +6,6 @@ import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
/**
@@ -45,11 +44,11 @@ public interface VersionControlService {
/**
* 获取对应集群的版本兼容项
* @param clusterId
* @param version
* @param type
* @return
*/
List<VersionControlItem> listVersionControlItem(Long clusterId, Integer type);
List<VersionControlItem> listVersionControlItem(String version, Integer type);
/**
* 获取对应type所有的的版本兼容项
@@ -68,27 +67,18 @@ public interface VersionControlService {
/**
* 查询对应指标的版本兼容项
* @param clusterId
* @param version
* @param type
* @param itemName
* @return
*/
VersionControlItem getVersionControlItem(Long clusterId, Integer type, String itemName);
VersionControlItem getVersionControlItem(String version, Integer type, String itemName);
/**
* 判断 item 是否被 clusterId 对应的版本支持
* @param clusterId
* @param version
* @param item
* @return
*/
boolean isClusterSupport(Long clusterId, VersionControlItem item);
/**
* 查询对应指标的版本兼容项
* @param clusterId
* @param type
* @param itemNames
* @return
*/
Map<String, VersionControlItem> getVersionControlItems(Long clusterId, Integer type, List<String> itemNames);
boolean isClusterSupport(String version, VersionControlItem item);
}

View File

@@ -7,11 +7,8 @@ import com.xiaojukeji.know.streaming.km.common.component.SpringTool;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.VersionUtil;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.version.VersionControlMetricService;
import com.xiaojukeji.know.streaming.km.core.service.version.VersionControlService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.DependsOn;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
@@ -26,18 +23,24 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.Function;
@Slf4j
@DependsOn("springTool")
@Service("versionControlService")
public class VersionControlServiceImpl implements VersionControlService {
/**
* keyversionItemType
*/
private final Map<Integer, List<VersionControlItem>> versionItemMap = new ConcurrentHashMap<>();
@Autowired
private ClusterPhyService clusterPhyService;
/**
* keyversionItemType
* key1metricName
*/
private final Map<Integer, Map<String, List<VersionControlItem>>> versionItemMetricNameMap = new ConcurrentHashMap<>();
private final Map<Integer, List<VersionControlItem>> versionItemMap = new ConcurrentHashMap<>();
private final Map<Integer, Map<String, List<VersionControlItem>>> versionItemMetricNameMap = new ConcurrentHashMap<>();
private final Map<String, Function<VersionItemParam, Object>> functionMap = new ConcurrentHashMap<>();
/**
* key : VersionItemTypeEnum.code@methodName
*/
private final Map<String, Function<VersionItemParam, Object>> functionMap = new ConcurrentHashMap<>();
@PostConstruct
public void init(){
@@ -51,7 +54,7 @@ public class VersionControlServiceImpl implements VersionControlService {
@Override
public void registerHandler(VersionItemTypeEnum typeEnum, String methodName, Function<VersionItemParam, Object> func){
functionMap.put(typeEnum.getCode() + "@" + methodName , func);
functionMap.put(versionFunctionKey(typeEnum.getCode(), methodName), func);
}
@Override
@@ -76,24 +79,23 @@ public class VersionControlServiceImpl implements VersionControlService {
itemMap.put(action, controlItems);
versionItemMetricNameMap.put(typeCode, itemMap);
functionMap.put(typeCode + "@" + methodName , func);
functionMap.put(versionFunctionKey(typeCode, methodName), func);
}
@Nullable
@Override
public Object doHandler(VersionItemTypeEnum typeEnum, String methodName, VersionItemParam param) throws VCHandlerNotExistException {
Function<VersionItemParam, Object> func = functionMap.get(typeEnum.getCode() + "@" + methodName);
Function<VersionItemParam, Object> func = functionMap.get(versionFunctionKey(typeEnum.getCode(), methodName));
if(null == func) {
throw new VCHandlerNotExistException(typeEnum.getCode() + "@" + methodName);
throw new VCHandlerNotExistException(versionFunctionKey(typeEnum.getCode(), methodName));
}
return func.apply(param);
}
@Override
public List<VersionControlItem> listVersionControlItem(Long clusterId, Integer type) {
String versionStr = clusterPhyService.getVersionFromCacheFirst(clusterId);
long versionLong = VersionUtil.normailze(versionStr);
public List<VersionControlItem> listVersionControlItem(String version, Integer type) {
long versionLong = VersionUtil.normailze(version);
List<VersionControlItem> items = versionItemMap.get(type);
if(CollectionUtils.isEmpty(items)) {
@@ -122,8 +124,8 @@ public class VersionControlServiceImpl implements VersionControlService {
}
@Override
public VersionControlItem getVersionControlItem(Long clusterId, Integer type, String itemName) {
List<VersionControlItem> items = listVersionControlItem(clusterId, type);
public VersionControlItem getVersionControlItem(String version, Integer type, String itemName) {
List<VersionControlItem> items = listVersionControlItem(version, type);
for(VersionControlItem item : items){
if(itemName.equals(item.getName())){
@@ -135,24 +137,13 @@ public class VersionControlServiceImpl implements VersionControlService {
}
@Override
public boolean isClusterSupport(Long clusterId, VersionControlItem item){
String versionStr = clusterPhyService.getVersionFromCacheFirst(clusterId);
long versionLong = VersionUtil.normailze(versionStr);
public boolean isClusterSupport(String version, VersionControlItem item) {
long versionLong = VersionUtil.normailze(version);
return item.getMinVersion() <= versionLong && versionLong < item.getMaxVersion();
}
@Override
public Map<String, VersionControlItem> getVersionControlItems(Long clusterId, Integer type, List<String> itemNames){
Map<String, VersionControlItem> versionControlItemMap = new HashMap<>();
for(String itemName : itemNames){
VersionControlItem item = getVersionControlItem(clusterId, type, itemName);
if(null != item){
versionControlItemMap.put(itemName, item);
}
}
return versionControlItemMap;
/**************************************************** private method ****************************************************/
private String versionFunctionKey(int typeCode, String methodName){
return typeCode + "@" + methodName;
}
}

View File

@@ -1,8 +1,10 @@
package com.xiaojukeji.know.streaming.km.core.service.version.metrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionConnectJmxInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMethodInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionJmxInfo;
import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum;
import com.xiaojukeji.know.streaming.km.core.service.version.VersionControlMetricService;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.V_0_10_0_0;
@@ -58,4 +60,17 @@ public abstract class BaseMetricVersionMetric implements VersionControlMetricSer
jmxExtendInfo.setMethodName(methodName);
return jmxExtendInfo;
}
protected VersionConnectJmxInfo buildConnectJMXMethodExtend(String methodName) {
VersionConnectJmxInfo connectorJmxInfo = new VersionConnectJmxInfo();
connectorJmxInfo.setMethodName(methodName);
return connectorJmxInfo;
}
protected VersionConnectJmxInfo buildConnectJMXMethodExtend(String methodName, ConnectorTypeEnum type) {
VersionConnectJmxInfo connectorJmxInfo = new VersionConnectJmxInfo();
connectorJmxInfo.setMethodName(methodName);
connectorJmxInfo.setType(type);
return connectorJmxInfo;
}
}

View File

@@ -0,0 +1,110 @@
package com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.BaseMetricVersionMetric;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
import java.util.List;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem.CATEGORY_CLUSTER;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem.CATEGORY_PERFORMANCE;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.METRIC_CONNECT_CLUSTER;
import static com.xiaojukeji.know.streaming.km.common.jmx.JmxAttribute.*;
import static com.xiaojukeji.know.streaming.km.common.jmx.JmxName.JMX_CONNECT_WORKER_METRIC;
import static com.xiaojukeji.know.streaming.km.core.service.connect.cluster.impl.ConnectClusterMetricServiceImpl.*;
@Component
public class ConnectClusterMetricVersionItems extends BaseMetricVersionMetric {
public static final String CONNECT_CLUSTER_METRIC_CONNECTOR_COUNT = "ConnectorCount";
public static final String CONNECT_CLUSTER_METRIC_TASK_COUNT = "TaskCount";
public static final String CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_ATTEMPTS_TOTAL = "ConnectorStartupAttemptsTotal";
public static final String CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_FAILURE_PERCENTAGE = "ConnectorStartupFailurePercentage";
public static final String CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_FAILURE_TOTAL = "ConnectorStartupFailureTotal";
public static final String CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_SUCCESS_PERCENTAGE = "ConnectorStartupSuccessPercentage";
public static final String CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_SUCCESS_TOTAL = "ConnectorStartupSuccessTotal";
public static final String CONNECT_CLUSTER_METRIC_TASK_STARTUP_ATTEMPTS_TOTAL = "TaskStartupAttemptsTotal";
public static final String CONNECT_CLUSTER_METRIC_TASK_STARTUP_FAILURE_PERCENTAGE = "TaskStartupFailurePercentage";
public static final String CONNECT_CLUSTER_METRIC_TASK_STARTUP_FAILURE_TOTAL = "TaskStartupFailureTotal";
public static final String CONNECT_CLUSTER_METRIC_TASK_STARTUP_SUCCESS_PERCENTAGE = "TaskStartupSuccessPercentage";
public static final String CONNECT_CLUSTER_METRIC_TASK_STARTUP_SUCCESS_TOTAL = "TaskStartupSuccessTotal";
public static final String CONNECT_CLUSTER_METRIC_COLLECT_COST_TIME = Constant.COLLECT_METRICS_COST_TIME_METRICS_NAME;
@Override
public int versionItemType() {
return METRIC_CONNECT_CLUSTER.getCode();
}
@Override
public List<VersionMetricControlItem> init() {
List<VersionMetricControlItem> items = new ArrayList<>();
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_CONNECTOR_COUNT).unit("").desc("连接器数量").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(TASK_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_TASK_COUNT).unit("").desc("任务数量").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(TASK_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_ATTEMPTS_TOTAL).unit("").desc("连接器启动次数").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(CONNECTOR_STARTUP_ATTEMPTS_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_FAILURE_PERCENTAGE).unit("%").desc("连接器启动失败概率").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_AVG)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(CONNECTOR_STARTUP_FAILURE_PERCENTAGE)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_FAILURE_TOTAL).unit("").desc("连接器启动失败次数").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(CONNECTOR_STARTUP_FAILURE_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_SUCCESS_PERCENTAGE).unit("%").desc("连接器启动成功概率").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_AVG)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(CONNECTOR_STARTUP_SUCCESS_PERCENTAGE)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_SUCCESS_TOTAL).unit("").desc("连接器启动成功次数").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(CONNECTOR_STARTUP_SUCCESS_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_TASK_STARTUP_ATTEMPTS_TOTAL).unit("").desc("任务启动次数").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(TASK_STARTUP_ATTEMPTS_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_TASK_STARTUP_FAILURE_PERCENTAGE).unit("%").desc("任务启动失败概率").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_AVG)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(TASK_STARTUP_FAILURE_PERCENTAGE)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_TASK_STARTUP_FAILURE_TOTAL).unit("").desc("任务启动失败次数").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(TASK_STARTUP_FAILURE_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_TASK_STARTUP_SUCCESS_PERCENTAGE).unit("%").desc("任务启动成功概率").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_AVG)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(TASK_STARTUP_SUCCESS_PERCENTAGE)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_TASK_STARTUP_SUCCESS_TOTAL).unit("").desc("任务启动成功次数").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(TASK_STARTUP_SUCCESS_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_COLLECT_COST_TIME).unit("").desc("采集connect集群指标耗时").category(CATEGORY_PERFORMANCE)
.extendMethod(CONNECT_CLUSTER_METHOD_DO_NOTHING));
return items;
}
}

View File

@@ -0,0 +1,310 @@
package com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.BaseMetricVersionMetric;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
import java.util.List;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem.*;
import static com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum.SINK;
import static com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum.SOURCE;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.METRIC_CONNECT_CONNECTOR;
import static com.xiaojukeji.know.streaming.km.common.jmx.JmxAttribute.*;
import static com.xiaojukeji.know.streaming.km.common.jmx.JmxName.*;
import static com.xiaojukeji.know.streaming.km.core.service.broker.impl.BrokerMetricServiceImpl.BROKER_METHOD_DO_NOTHING;
import static com.xiaojukeji.know.streaming.km.core.service.connect.connector.impl.ConnectorMetricServiceImpl.*;
@Component
public class ConnectorMetricVersionItems extends BaseMetricVersionMetric {
public static final String CONNECTOR_METRIC_COLLECT_COST_TIME = Constant.COLLECT_METRICS_COST_TIME_METRICS_NAME;
public static final String CONNECTOR_METRIC_HEALTH_STATE = "HealthState";
public static final String CONNECTOR_METRIC_CONNECTOR_TOTAL_TASK_COUNT = "ConnectorTotalTaskCount";
public static final String CONNECTOR_METRIC_HEALTH_CHECK_PASSED = "HealthCheckPassed";
public static final String CONNECTOR_METRIC_HEALTH_CHECK_TOTAL = "HealthCheckTotal";
public static final String CONNECTOR_METRIC_CONNECTOR_RUNNING_TASK_COUNT = "ConnectorRunningTaskCount";
public static final String CONNECTOR_METRIC_CONNECTOR_PAUSED_TASK_COUNT = "ConnectorPausedTaskCount";
public static final String CONNECTOR_METRIC_CONNECTOR_FAILED_TASK_COUNT = "ConnectorFailedTaskCount";
public static final String CONNECTOR_METRIC_CONNECTOR_UNASSIGNED_TASK_COUNT = "ConnectorUnassignedTaskCount";
public static final String CONNECTOR_METRIC_BATCH_SIZE_AVG = "BatchSizeAvg";
public static final String CONNECTOR_METRIC_BATCH_SIZE_MAX = "BatchSizeMax";
public static final String CONNECTOR_METRIC_OFFSET_COMMIT_AVG_TIME_MS = "OffsetCommitAvgTimeMs";
public static final String CONNECTOR_METRIC_OFFSET_COMMIT_MAX_TIME_MS = "OffsetCommitMaxTimeMs";
public static final String CONNECTOR_METRIC_OFFSET_COMMIT_FAILURE_PERCENTAGE = "OffsetCommitFailurePercentage";
public static final String CONNECTOR_METRIC_OFFSET_COMMIT_SUCCESS_PERCENTAGE = "OffsetCommitSuccessPercentage";
public static final String CONNECTOR_METRIC_POLL_BATCH_AVG_TIME_MS = "PollBatchAvgTimeMs";
public static final String CONNECTOR_METRIC_POLL_BATCH_MAX_TIME_MS = "PollBatchMaxTimeMs";
public static final String CONNECTOR_METRIC_SOURCE_RECORD_ACTIVE_COUNT = "SourceRecordActiveCount";
public static final String CONNECTOR_METRIC_SOURCE_RECORD_ACTIVE_COUNT_AVG = "SourceRecordActiveCountAvg";
public static final String CONNECTOR_METRIC_SOURCE_RECORD_ACTIVE_COUNT_MAX = "SourceRecordActiveCountMax";
public static final String CONNECTOR_METRIC_SOURCE_RECORD_POLL_RATE = "SourceRecordPollRate";
public static final String CONNECTOR_METRIC_SOURCE_RECORD_POLL_TOTAL = "SourceRecordPollTotal";
public static final String CONNECTOR_METRIC_SOURCE_RECORD_WRITE_RATE = "SourceRecordWriteRate";
public static final String CONNECTOR_METRIC_SOURCE_RECORD_WRITE_TOTAL = "SourceRecordWriteTotal";
public static final String CONNECTOR_METRIC_OFFSET_COMMIT_COMPLETION_RATE = "OffsetCommitCompletionRate";
public static final String CONNECTOR_METRIC_OFFSET_COMMIT_COMPLETION_TOTAL = "OffsetCommitCompletionTotal";
public static final String CONNECTOR_METRIC_OFFSET_COMMIT_SKIP_RATE = "OffsetCommitSkipRate";
public static final String CONNECTOR_METRIC_OFFSET_COMMIT_SKIP_TOTAL = "OffsetCommitSkipTotal";
public static final String CONNECTOR_METRIC_PARTITION_COUNT = "PartitionCount";
public static final String CONNECTOR_METRIC_PUT_BATCH_AVG_TIME_MS = "PutBatchAvgTimeMs";
public static final String CONNECTOR_METRIC_PUT_BATCH_MAX_TIME_MS = "PutBatchMaxTimeMs";
public static final String CONNECTOR_METRIC_SINK_RECORD_ACTIVE_COUNT = "SinkRecordActiveCount";
public static final String CONNECTOR_METRIC_SINK_RECORD_ACTIVE_COUNT_AVG = "SinkRecordActiveCountAvg";
public static final String CONNECTOR_METRIC_SINK_RECORD_ACTIVE_COUNT_MAX = "SinkRecordActiveCountMax";
public static final String CONNECTOR_METRIC_SINK_RECORD_LAG_MAX = "SinkRecordLagMax";
public static final String CONNECTOR_METRIC_SINK_RECORD_READ_RATE = "SinkRecordReadRate";
public static final String CONNECTOR_METRIC_SINK_RECORD_READ_TOTAL = "SinkRecordReadTotal";
public static final String CONNECTOR_METRIC_SINK_RECORD_SEND_RATE = "SinkRecordSendRate";
public static final String CONNECTOR_METRIC_SINK_RECORD_SEND_TOTAL = "SinkRecordSendTotal";
public static final String CONNECTOR_METRIC_DEADLETTERQUEUE_PRODUCE_FAILURES = "DeadletterqueueProduceFailures";
public static final String CONNECTOR_METRIC_DEADLETTERQUEUE_PRODUCE_REQUESTS = "DeadletterqueueProduceRequests";
public static final String CONNECTOR_METRIC_LAST_ERROR_TIMESTAMP = "LastErrorTimestamp";
public static final String CONNECTOR_METRIC_TOTAL_ERRORS_LOGGED = "TotalErrorsLogged";
public static final String CONNECTOR_METRIC_TOTAL_RECORD_ERRORS = "TotalRecordErrors";
public static final String CONNECTOR_METRIC_TOTAL_RECORD_FAILURES = "TotalRecordFailures";
public static final String CONNECTOR_METRIC_TOTAL_RECORDS_SKIPPED = "TotalRecordsSkipped";
public static final String CONNECTOR_METRIC_TOTAL_RETRIES = "TotalRetries";
@Override
public int versionItemType() {
return METRIC_CONNECT_CONNECTOR.getCode();
}
@Override
public List<VersionMetricControlItem> init() {
List<VersionMetricControlItem> items = new ArrayList<>();
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_COLLECT_COST_TIME).unit("").desc("采集connector指标的耗时").category(CATEGORY_PERFORMANCE)
.extendMethod(CONNECTOR_METHOD_DO_NOTHING));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_HEALTH_STATE).unit("0:好 1:中 2:差 3:宕机").desc("健康状态(0:好 1:中 2:差 3:宕机)").category(CATEGORY_HEALTH)
.extendMethod(CONNECTOR_METHOD_GET_METRIC_HEALTH_SCORE));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_CONNECTOR_TOTAL_TASK_COUNT).unit("").desc("所有任务数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECT_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_CONNECTOR_METRIC).jmxAttribute(CONNECTOR_TOTAL_TASK_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_CONNECTOR_RUNNING_TASK_COUNT).unit("").desc("运行状态的任务数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECT_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_CONNECTOR_METRIC).jmxAttribute(CONNECTOR_RUNNING_TASK_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_CONNECTOR_PAUSED_TASK_COUNT).unit("").desc("暂停状态的任务数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECT_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_CONNECTOR_METRIC).jmxAttribute(CONNECTOR_PAUSED_TASK_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_CONNECTOR_FAILED_TASK_COUNT).unit("").desc("失败状态的任务数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECT_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_CONNECTOR_METRIC).jmxAttribute(CONNECTOR_FAILED_TASK_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_CONNECTOR_UNASSIGNED_TASK_COUNT).unit("").desc("未被分配的任务数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECT_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_CONNECTOR_METRIC).jmxAttribute(CONNECTOR_UNASSIGNED_TASK_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_BATCH_SIZE_AVG).unit("").desc("批次数量平均值").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG)
.jmxObjectName(JMX_CONNECTOR_TASK_CONNECTOR_METRIC).jmxAttribute(BATCH_SIZE_AVG)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_BATCH_SIZE_MAX).unit("").desc("批次数量最大值").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_MAX)
.jmxObjectName(JMX_CONNECTOR_TASK_CONNECTOR_METRIC).jmxAttribute(BATCH_SIZE_MAX)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_OFFSET_COMMIT_AVG_TIME_MS).unit("ms").desc("位点提交平均耗时").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG)
.jmxObjectName(JMX_CONNECTOR_TASK_CONNECTOR_METRIC).jmxAttribute(OFFSET_COMMIT_AVG_TIME_MS)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_OFFSET_COMMIT_MAX_TIME_MS).unit("ms").desc("位点提交最大耗时").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_MAX)
.jmxObjectName(JMX_CONNECTOR_TASK_CONNECTOR_METRIC).jmxAttribute(OFFSET_COMMIT_MAX_TIME_MS)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_OFFSET_COMMIT_FAILURE_PERCENTAGE).unit("%").desc("位点提交失败概率").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG)
.jmxObjectName(JMX_CONNECTOR_TASK_CONNECTOR_METRIC).jmxAttribute(OFFSET_COMMIT_FAILURE_PERCENTAGE)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_OFFSET_COMMIT_SUCCESS_PERCENTAGE).unit("%").desc("位点提交成功概率").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG)
.jmxObjectName(JMX_CONNECTOR_TASK_CONNECTOR_METRIC).jmxAttribute(OFFSET_COMMIT_SUCCESS_PERCENTAGE)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_POLL_BATCH_AVG_TIME_MS).unit("ms").desc("POLL平均耗时").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG, SOURCE)
.jmxObjectName(JMX_CONNECTOR_SOURCE_TASK_METRICS).jmxAttribute(POLL_BATCH_AVG_TIME_MS)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_POLL_BATCH_MAX_TIME_MS).unit("ms").desc("POLL最大耗时").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_MAX, SOURCE)
.jmxObjectName(JMX_CONNECTOR_SOURCE_TASK_METRICS).jmxAttribute(POLL_BATCH_MAX_TIME_MS)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SOURCE_RECORD_ACTIVE_COUNT).unit("").desc("pending状态消息数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SOURCE)
.jmxObjectName(JMX_CONNECTOR_SOURCE_TASK_METRICS).jmxAttribute(SOURCE_RECORD_ACTIVE_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SOURCE_RECORD_ACTIVE_COUNT_AVG).unit("").desc("pending状态平均消息数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SOURCE)
.jmxObjectName(JMX_CONNECTOR_SOURCE_TASK_METRICS).jmxAttribute(SOURCE_RECORD_ACTIVE_COUNT_AVG)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SOURCE_RECORD_ACTIVE_COUNT_MAX).unit("").desc("pending状态最大消息数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SOURCE)
.jmxObjectName(JMX_CONNECTOR_SOURCE_TASK_METRICS).jmxAttribute(SOURCE_RECORD_ACTIVE_COUNT_MAX)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SOURCE_RECORD_POLL_RATE).unit(BYTE_PER_SEC).desc("消息读取速率").category(CATEGORY_FLOW)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SOURCE)
.jmxObjectName(JMX_CONNECTOR_SOURCE_TASK_METRICS).jmxAttribute(SOURCE_RECORD_POLL_RATE)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SOURCE_RECORD_POLL_TOTAL).unit("").desc("消息读取总数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SOURCE)
.jmxObjectName(JMX_CONNECTOR_SOURCE_TASK_METRICS).jmxAttribute(SOURCE_RECORD_POLL_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SOURCE_RECORD_WRITE_RATE).unit(BYTE_PER_SEC).desc("消息写入速率").category(CATEGORY_FLOW)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SOURCE)
.jmxObjectName(JMX_CONNECTOR_SOURCE_TASK_METRICS).jmxAttribute(SOURCE_RECORD_WRITE_RATE)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SOURCE_RECORD_WRITE_TOTAL).unit("").desc("消息写入总数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SOURCE)
.jmxObjectName(JMX_CONNECTOR_SOURCE_TASK_METRICS).jmxAttribute(SOURCE_RECORD_WRITE_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_OFFSET_COMMIT_COMPLETION_RATE).unit(BYTE_PER_SEC).desc("成功的位点提交速率").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(OFFSET_COMMIT_COMPLETION_RATE)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_OFFSET_COMMIT_COMPLETION_TOTAL).unit("").desc("成功的位点提交总数").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(OFFSET_COMMIT_COMPLETION_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_OFFSET_COMMIT_SKIP_RATE).unit("").desc("被跳过的位点提交速率").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(OFFSET_COMMIT_SKIP_RATE)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_OFFSET_COMMIT_SKIP_TOTAL).unit("").desc("被跳过的位点提交总数").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(OFFSET_COMMIT_SKIP_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_PARTITION_COUNT).unit("").desc("被分配到的分区数").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(PARTITION_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_PUT_BATCH_AVG_TIME_MS).unit("ms").desc("PUT平均耗时").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(PUT_BATCH_AVG_TIME_MS)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_PUT_BATCH_MAX_TIME_MS).unit("ms").desc("PUT最大耗时").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_MAX, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(PUT_BATCH_MAX_TIME_MS)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SINK_RECORD_ACTIVE_COUNT).unit("").desc("pending状态消息数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(SINK_RECORD_ACTIVE_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SINK_RECORD_ACTIVE_COUNT_AVG).unit("").desc("pending状态平均消息数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(SINK_RECORD_ACTIVE_COUNT_AVG)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SINK_RECORD_ACTIVE_COUNT_MAX).unit("").desc("pending状态最大消息数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(SINK_RECORD_ACTIVE_COUNT_MAX)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SINK_RECORD_READ_RATE).unit(BYTE_PER_SEC).desc("消息读取速率").category(CATEGORY_FLOW)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(SINK_RECORD_READ_RATE)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SINK_RECORD_READ_TOTAL).unit("").desc("消息读取总数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(SINK_RECORD_READ_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SINK_RECORD_SEND_RATE).unit(BYTE_PER_SEC).desc("消息写入速率").category(CATEGORY_FLOW)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(SINK_RECORD_SEND_RATE)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SINK_RECORD_SEND_TOTAL).unit("").desc("消息写入总数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(SINK_RECORD_SEND_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_DEADLETTERQUEUE_PRODUCE_FAILURES).unit("").desc("死信队列写入失败数").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM)
.jmxObjectName(JMX_CONNECTOR_TASK_ERROR_METRICS).jmxAttribute(DEADLETTERQUEUE_PRODUCE_FAILURES)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_DEADLETTERQUEUE_PRODUCE_REQUESTS).unit("").desc("死信队列写入数").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM)
.jmxObjectName(JMX_CONNECTOR_TASK_ERROR_METRICS).jmxAttribute(DEADLETTERQUEUE_PRODUCE_REQUESTS)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_LAST_ERROR_TIMESTAMP).unit("").desc("最后一次错误时间").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_MAX)
.jmxObjectName(JMX_CONNECTOR_TASK_ERROR_METRICS).jmxAttribute(LAST_ERROR_TIMESTAMP)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_TOTAL_ERRORS_LOGGED).unit("").desc("记录日志的错误消息数").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM)
.jmxObjectName(JMX_CONNECTOR_TASK_ERROR_METRICS).jmxAttribute(TOTAL_ERRORS_LOGGED)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_TOTAL_RECORD_ERRORS).unit("").desc("消息处理错误的次数(异常消息数量)").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM)
.jmxObjectName(JMX_CONNECTOR_TASK_ERROR_METRICS).jmxAttribute(TOTAL_RECORD_ERRORS)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_TOTAL_RECORD_FAILURES).unit("").desc("消息处理失败的次数每次retry处理失败都会+1)").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM)
.jmxObjectName(JMX_CONNECTOR_TASK_ERROR_METRICS).jmxAttribute(TOTAL_RECORD_FAILURES)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_TOTAL_RECORDS_SKIPPED).unit("").desc("因为失败导致跳过(未处理)的消息数").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM)
.jmxObjectName(JMX_CONNECTOR_TASK_ERROR_METRICS).jmxAttribute(TOTAL_RECORDS_SKIPPED)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_TOTAL_RETRIES).unit("").desc("失败重试的次数").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM)
.jmxObjectName(JMX_CONNECTOR_TASK_ERROR_METRICS).jmxAttribute(TOTAL_RETRIES)));
return items;
}
}

View File

@@ -0,0 +1,27 @@
package com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.BaseMetricVersionMetric;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
import java.util.List;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.METRIC_CONNECT_MIRROR_MAKER;
@Component
public class MirrorMakerMetricVersionItems extends BaseMetricVersionMetric {
@Override
public int versionItemType() {
return METRIC_CONNECT_MIRROR_MAKER.getCode();
}
@Override
public List<VersionMetricControlItem> init(){
List<VersionMetricControlItem> items = new ArrayList<>();
return items;
}
}

View File

@@ -55,6 +55,13 @@ public class ClusterMetricVersionItems extends BaseMetricVersionMetric {
public static final String CLUSTER_METRIC_HEALTH_CHECK_PASSED_CLUSTER = "HealthCheckPassed_Cluster";
public static final String CLUSTER_METRIC_HEALTH_CHECK_TOTAL_CLUSTER = "HealthCheckTotal_Cluster";
/**
* connector健康指标
*/
public static final String CLUSTER_METRIC_HEALTH_STATE_CONNECTOR = "HealthState_Connector";
public static final String CLUSTER_METRIC_HEALTH_CHECK_PASSED_CONNECTOR = "HealthCheckPassed_Connector";
public static final String CLUSTER_METRIC_HEALTH_CHECK_TOTAL_CONNECTOR = "HealthCheckTotal_Connector";
public static final String CLUSTER_METRIC_TOTAL_REQ_QUEUE_SIZE = "TotalRequestQueueSize";
public static final String CLUSTER_METRIC_TOTAL_RES_QUEUE_SIZE = "TotalResponseQueueSize";
public static final String CLUSTER_METRIC_EVENT_QUEUE_SIZE = "EventQueueSize";