合并3.3.0分支

This commit is contained in:
zengqiao
2023-02-24 17:13:50 +08:00
616 changed files with 32894 additions and 8421 deletions

View File

@@ -46,12 +46,6 @@
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-aop</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<version>${springboot.version}</version>
<scope>test</scope>
</dependency>
<!-- javax -->
<dependency>
@@ -67,10 +61,6 @@
<groupId>commons-lang</groupId>
<artifactId>commons-lang</artifactId>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
@@ -120,5 +110,9 @@
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.13</artifactId>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>connect-runtime</artifactId>
</dependency>
</dependencies>
</project>

View File

@@ -24,6 +24,17 @@ public class CollectedMetricsLocalCache {
.maximumSize(10000)
.build();
private static final Cache<String, Float> connectClusterMetricsCache = Caffeine.newBuilder()
.expireAfterWrite(90, TimeUnit.SECONDS)
.maximumSize(10000)
.build();
private static final Cache<String, Float> connectorMetricsCache = Caffeine.newBuilder()
.expireAfterWrite(90, TimeUnit.SECONDS)
.maximumSize(10000)
.build();
public static Float getBrokerMetrics(String brokerMetricKey) {
return brokerMetricsCache.getIfPresent(brokerMetricKey);
}
@@ -59,6 +70,28 @@ public class CollectedMetricsLocalCache {
partitionMetricsCache.put(partitionMetricsKey, metricsList);
}
public static void putConnectClusterMetrics(String connectClusterMetricKey, Float value) {
if (value == null) {
return;
}
connectClusterMetricsCache.put(connectClusterMetricKey, value);
}
public static Float getConnectClusterMetrics(String connectClusterMetricKey) {
return connectClusterMetricsCache.getIfPresent(connectClusterMetricKey);
}
public static void putConnectorMetrics(String connectClusterMetricKey, Float value) {
if (value == null) {
return;
}
connectorMetricsCache.put(connectClusterMetricKey, value);
}
public static Float getConnectorMetrics(String connectClusterMetricKey) {
return connectorMetricsCache.getIfPresent(connectClusterMetricKey);
}
public static String genBrokerMetricKey(Long clusterPhyId, Integer brokerId, String metricName) {
return clusterPhyId + "@" + brokerId + "@" + metricName;
}
@@ -71,6 +104,16 @@ public class CollectedMetricsLocalCache {
return clusterPhyId + "@" + brokerId + "@" + topicName + "@" + partitionId + "@" + metricName;
}
public static String genConnectClusterMetricCacheKey(Long connectClusterId, String metricName) {
return connectClusterId + "@" + metricName;
}
public static String genConnectorMetricCacheKey(Long connectClusterId, String connectorName, String metricName) {
return connectClusterId + "@" + connectorName + '@' + metricName;
}
/**************************************************** private method ****************************************************/
private CollectedMetricsLocalCache() {
}
}

View File

@@ -31,7 +31,7 @@ import com.xiaojukeji.know.streaming.km.core.service.partition.OpPartitionServic
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
import com.xiaojukeji.know.streaming.km.core.service.reassign.ReassignService;
import com.xiaojukeji.know.streaming.km.core.service.replica.ReplicaMetricService;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.ReplicaMetricVersionItems;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.ReplicaMetricVersionItems;
import com.xiaojukeji.know.streaming.km.persistence.mysql.enterprise.rebalance.ClusterBalanceJobDao;
import com.xiaojukeji.know.streaming.km.persistence.mysql.enterprise.rebalance.ClusterBalanceReassignDao;
import org.apache.kafka.common.TopicPartition;

View File

@@ -40,7 +40,7 @@ import com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service.Cluste
import com.xiaojukeji.know.streaming.km.core.service.config.ConfigUtils;
import com.xiaojukeji.know.streaming.km.core.service.job.JobService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.BrokerMetricVersionItems;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.BrokerMetricVersionItems;
import com.xiaojukeji.know.streaming.km.rebalance.executor.ExecutionRebalance;
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.BalanceParameter;
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.BrokerBalanceState;

View File

@@ -0,0 +1,177 @@
package com.xiaojukeji.know.streaming.km.core.flusher;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.ha.HaActiveStandbyRelation;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ClusterMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.TopicMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.partition.Partition;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
import com.xiaojukeji.know.streaming.km.common.bean.po.health.HealthCheckResultPO;
import com.xiaojukeji.know.streaming.km.common.utils.FutureUtil;
import com.xiaojukeji.know.streaming.km.persistence.cache.DataBaseDataLocalCache;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterMetricService;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.ha.HaActiveStandbyRelationService;
import com.xiaojukeji.know.streaming.km.core.service.health.checkresult.HealthCheckResultService;
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicMetricService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Service;
import javax.annotation.PostConstruct;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;
import java.util.stream.Collectors;
@Service
public class DatabaseDataFlusher {
private static final ILog LOGGER = LogFactory.getLog(DatabaseDataFlusher.class);
@Autowired
private TopicService topicService;
@Autowired
private TopicMetricService topicMetricService;
@Autowired
private ClusterPhyService clusterPhyService;
@Autowired
private ClusterMetricService clusterMetricService;
@Autowired
private HealthCheckResultService healthCheckResultService;
@Autowired
private PartitionService partitionService;
@Autowired
private HaActiveStandbyRelationService haActiveStandbyRelationService;
@PostConstruct
public void init() {
this.flushPartitionsCache();
this.flushClusterLatestMetricsCache();
this.flushTopicLatestMetricsCache();
this.flushHealthCheckResultCache();
this.flushHaTopicCache();
}
@Scheduled(cron="0 0/1 * * * ?")
public void flushPartitionsCache() {
for (ClusterPhy clusterPhy: clusterPhyService.listAllClusters()) {
FutureUtil.quickStartupFutureUtil.submitTask(() -> {
try {
// 更新缓存
Map<String, List<Partition>> newPartitionMap = new ConcurrentHashMap<>();
List<Partition> partitionList = partitionService.listPartitionByCluster(clusterPhy.getId());
partitionList.forEach(partition -> {
newPartitionMap.putIfAbsent(partition.getTopicName(), new ArrayList<>());
newPartitionMap.get(partition.getTopicName()).add(partition);
});
DataBaseDataLocalCache.putPartitions(clusterPhy.getId(), newPartitionMap);
} catch (Exception e) {
LOGGER.error("method=flushPartitionsCache||clusterPhyId={}||errMsg=exception!", clusterPhy.getId(), e);
}
});
}
}
@Scheduled(cron="0 0/1 * * * ?")
public void flushHealthCheckResultCache() {
FutureUtil.quickStartupFutureUtil.submitTask(() -> {
List<HealthCheckResultPO> poList = healthCheckResultService.listAll();
Map<Long, Map<String, List<HealthCheckResultPO>>> newPOMap = new ConcurrentHashMap<>();
// 更新缓存
poList.forEach(po -> {
Long cacheKey = DataBaseDataLocalCache.getHealthCheckCacheKey(po.getClusterPhyId(), po.getDimension());
newPOMap.putIfAbsent(cacheKey, new ConcurrentHashMap<>());
newPOMap.get(cacheKey).putIfAbsent(po.getResName(), new ArrayList<>());
newPOMap.get(cacheKey).get(po.getResName()).add(po);
});
for (Map.Entry<Long, Map<String, List<HealthCheckResultPO>>> entry: newPOMap.entrySet()) {
DataBaseDataLocalCache.putHealthCheckResults(entry.getKey(), entry.getValue());
}
});
}
@Scheduled(cron = "0 0/1 * * * ?")
private void flushClusterLatestMetricsCache() {
for (ClusterPhy clusterPhy: clusterPhyService.listAllClusters()) {
FutureUtil.quickStartupFutureUtil.submitTask(() -> {
try {
Result<ClusterMetrics> metricsResult = clusterMetricService.getLatestMetricsFromES(clusterPhy.getId(), Collections.emptyList());
if (metricsResult.hasData()) {
DataBaseDataLocalCache.putClusterLatestMetrics(clusterPhy.getId(), metricsResult.getData());
return;
}
LOGGER.error("method=flushClusterLatestMetricsCache||clusterPhyId={}||result={}||msg=failed", clusterPhy.getId(), metricsResult);
} catch (Exception e) {
LOGGER.error("method=flushClusterLatestMetricsCache||clusterPhyId={}||errMsg=exception!", clusterPhy.getId(), e);
}
DataBaseDataLocalCache.putClusterLatestMetrics(clusterPhy.getId(), new ClusterMetrics(clusterPhy.getId()));
});
}
}
@Scheduled(cron = "0 0/1 * * * ?")
private void flushTopicLatestMetricsCache() {
for (ClusterPhy clusterPhy: LoadedClusterPhyCache.listAll().values()) {
FutureUtil.quickStartupFutureUtil.submitTask(() -> {
List<String> topicNameList = topicService.listTopicsFromCacheFirst(clusterPhy.getId()).stream().map(Topic::getTopicName).collect(Collectors.toList());
for (int i = 0; i < 3; ++i) {
try {
List<TopicMetrics> metricsList = topicMetricService.listTopicLatestMetricsFromES(
clusterPhy.getId(),
topicNameList,
Collections.emptyList()
);
if (!topicNameList.isEmpty() && metricsList.isEmpty()) {
// 没有指标时,重试
continue;
}
Map<String, TopicMetrics> metricsMap = metricsList
.stream()
.collect(Collectors.toMap(TopicMetrics::getTopic, Function.identity()));
DataBaseDataLocalCache.putTopicMetrics(clusterPhy.getId(), metricsMap);
break;
} catch (Exception e) {
LOGGER.error("method=flushTopicLatestMetricsCache||clusterPhyId={}||errMsg=exception!", clusterPhy.getId(), e);
}
}
});
}
}
@Scheduled(cron="0 0/1 * * * ?")
public void flushHaTopicCache() {
List<HaActiveStandbyRelation> haTopicList = haActiveStandbyRelationService.listAllTopicHa();
for (HaActiveStandbyRelation topic : haTopicList) {
DataBaseDataLocalCache.putHaTopic(topic.getStandbyClusterPhyId(), topic.getResName());
}
}
}

View File

@@ -0,0 +1,44 @@
package com.xiaojukeji.know.streaming.km.core.flusher;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.utils.FutureUtil;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaJMXClient;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Service;
/**
* JMX连接检查
*/
@Service
public class JmxClientLegalFlusher {
private static final ILog LOGGER = LogFactory.getLog(JmxClientLegalFlusher.class);
@Autowired
private BrokerService brokerService;
@Autowired
private KafkaJMXClient kafkaJMXClient;
@Scheduled(cron="0 0/1 * * * ?")
public void checkJmxClient() {
for (ClusterPhy clusterPhy: LoadedClusterPhyCache.listAll().values()) {
FutureUtil.quickStartupFutureUtil.submitTask(
() -> {
try {
kafkaJMXClient.checkAndRemoveIfIllegal(
clusterPhy.getId(),
brokerService.listAliveBrokersFromDB(clusterPhy.getId())
);
} catch (Exception e) {
LOGGER.error("method=checkJmxClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
}
}
);
}
}
}

View File

@@ -43,14 +43,14 @@ public class ZKWatcherManager extends AbstractClusterLoadedChangedHandler {
try {
FutureUtil.quickStartupFutureUtil.submitTask(
() -> {
log.debug("class={}||method=scheduledTriggerFlush||clusterPhyId={}||msg=flush task start"
log.debug("runClass={}||method=scheduledTriggerFlush||clusterPhyId={}||msg=flush task start"
, abstractZKWatcher.getClass().getSimpleName(), clusterPhy.getId());
long startTime = System.currentTimeMillis();
abstractZKWatcher.flush(clusterPhy);
log.info("class={}||method=scheduledTriggerFlush||clusterPhyId={}||costTime={}ms||msg=flush task finished"
log.info("runClass={}||method=scheduledTriggerFlush||clusterPhyId={}||costTime={}ms||msg=flush task finished"
, abstractZKWatcher.getClass().getSimpleName(), clusterPhy.getId(), System.currentTimeMillis() - startTime);
});
} catch (Exception e) {

View File

@@ -2,6 +2,7 @@ package com.xiaojukeji.know.streaming.km.core.flusher.zk.handler;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafkacontroller.KafkaController;
import com.xiaojukeji.know.streaming.km.common.bean.po.changerecord.KafkaChangeRecordPO;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
@@ -100,7 +101,9 @@ public class ControllerNodeChangeHandler extends AbstractZKHandler implements ZN
if (kafkaController == null) {
kafkaControllerService.setNoKafkaController(clusterPhyId, triggerTime);
} else {
kafkaControllerService.insertAndIgnoreDuplicateException(kafkaController);
Broker broker = kafkaZKDAO.getBrokerMetadata(clusterPhyId, kafkaController.getBrokerId());
kafkaControllerService.insertAndIgnoreDuplicateException(kafkaController, broker != null? broker.getHost(): "", broker != null? broker.getRack(): "");
}
} catch (Exception e) {
log.error("method=updateDBData||clusterPhyId={}||errMsg=exception", clusterPhyId, e);

View File

@@ -17,6 +17,7 @@ import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistExcept
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.acl.KafkaAclService;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
@@ -47,7 +48,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.
@Service
public class KafkaAclServiceImpl extends BaseVersionControlService implements KafkaAclService {
public class KafkaAclServiceImpl extends BaseKafkaVersionControlService implements KafkaAclService {
private static final ILog log = LogFactory.getLog(KafkaAclServiceImpl.class);
private static final String ACL_GET_FROM_KAFKA = "getAclFromKafka";

View File

@@ -19,6 +19,7 @@ import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.core.service.acl.OpKafkaAclService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
@@ -47,7 +48,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.
@Service
public class OpKafkaAclServiceImpl extends BaseVersionControlService implements OpKafkaAclService {
public class OpKafkaAclServiceImpl extends BaseKafkaVersionControlService implements OpKafkaAclService {
private static final ILog log = LogFactory.getLog(OpKafkaAclServiceImpl.class);
private static final String ACL_CREATE = "createKafkaAcl";

View File

@@ -22,7 +22,7 @@ import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistExcept
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerConfigService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
import com.xiaojukeji.know.streaming.km.persistence.mysql.broker.BrokerConfigDAO;
@@ -42,7 +42,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.
@Service
public class BrokerConfigServiceImpl extends BaseVersionControlService implements BrokerConfigService {
public class BrokerConfigServiceImpl extends BaseKafkaVersionControlService implements BrokerConfigService {
private static final ILog log = LogFactory.getLog(BrokerConfigServiceImpl.class);
private static final String GET_BROKER_CONFIG = "getBrokerConfig";
@@ -70,8 +70,8 @@ public class BrokerConfigServiceImpl extends BaseVersionControlService implement
registerVCHandler(GET_BROKER_CONFIG, V_0_10_1_0, V_0_11_0_0, "getBrokerConfigByZKClient", this::getBrokerConfigByZKClient);
registerVCHandler(GET_BROKER_CONFIG, V_0_11_0_0, V_MAX, "getBrokerConfigByKafkaClient", this::getBrokerConfigByKafkaClient);
registerVCHandler(MODIFY_BROKER_CONFIG, V_0_10_1_0, V_0_11_0_0, "modifyBrokerConfigByZKClient", this::modifyBrokerConfigByZKClient);
registerVCHandler(MODIFY_BROKER_CONFIG, V_0_11_0_0, V_MAX, "modifyBrokerConfigByKafkaClient", this::modifyBrokerConfigByKafkaClient);
registerVCHandler(MODIFY_BROKER_CONFIG, V_0_10_1_0, V_2_3_0, "modifyBrokerConfigByZKClient", this::modifyBrokerConfigByZKClient);
registerVCHandler(MODIFY_BROKER_CONFIG, V_2_3_0, V_MAX, "modifyBrokerConfigByKafkaClient", this::modifyBrokerConfigByKafkaClient);
}
@Override

View File

@@ -18,6 +18,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.BrokerMetricPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
@@ -33,8 +34,8 @@ import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
import com.xiaojukeji.know.streaming.km.core.service.replica.ReplicaMetricService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseMetricService;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.BrokerMetricVersionItems;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.ReplicaMetricVersionItems;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.BrokerMetricVersionItems;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.ReplicaMetricVersionItems;
import com.xiaojukeji.know.streaming.km.persistence.es.dao.BrokerMetricESDAO;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaJMXClient;
import org.apache.kafka.clients.admin.LogDirDescription;
@@ -51,14 +52,13 @@ import java.util.*;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus.*;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.*;
/**
* @author didi
*/
@Service
public class BrokerMetricServiceImpl extends BaseMetricService implements BrokerMetricService {
protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER");
protected static final ILog LOGGER = LogFactory.getLog(BrokerMetricServiceImpl.class);
public static final String BROKER_METHOD_DO_NOTHING = "doNothing";
public static final String BROKER_METHOD_GET_METRIC_FROM_KAFKA_BY_JMX = "getMetricFromKafkaByJMX";
@@ -66,7 +66,8 @@ public class BrokerMetricServiceImpl extends BaseMetricService implements Broker
public static final String BROKER_METHOD_GET_HEALTH_SCORE = "getMetricHealthScore";
public static final String BROKER_METHOD_GET_PARTITIONS_SKEW = "getPartitionsSkew";
public static final String BROKER_METHOD_GET_LEADERS_SKEW = "getLeadersSkew";
public static final String BROKER_METHOD_GET_LOG_SIZE = "getLogSize";
public static final String BROKER_METHOD_GET_LOG_SIZE_FROM_CLIENT = "getLogSizeFromClient";
public static final String BROKER_METHOD_GET_LOG_SIZE_FROM_JMX = "getLogSizeFromJmx";
public static final String BROKER_METHOD_IS_BROKER_ALIVE = "isBrokerAlive";
@Autowired
@@ -109,8 +110,8 @@ public class BrokerMetricServiceImpl extends BaseMetricService implements Broker
registerVCHandler( BROKER_METHOD_GET_PARTITIONS_SKEW, this::getPartitionsSkew);
registerVCHandler( BROKER_METHOD_GET_LEADERS_SKEW, this::getLeadersSkew);
registerVCHandler( BROKER_METHOD_GET_LOG_SIZE, V_0_10_0_0, V_1_0_0, "getLogSizeFromJmx", this::getLogSizeFromJmx);
registerVCHandler( BROKER_METHOD_GET_LOG_SIZE, V_1_0_0, V_MAX, "getLogSizeFromClient", this::getLogSizeFromClient);
registerVCHandler( BROKER_METHOD_GET_LOG_SIZE_FROM_JMX, this::getLogSizeFromJmx);
registerVCHandler( BROKER_METHOD_GET_LOG_SIZE_FROM_CLIENT, this::getLogSizeFromClient);
registerVCHandler( BROKER_METHOD_IS_BROKER_ALIVE, this::isBrokerAlive);
}
@@ -364,7 +365,7 @@ public class BrokerMetricServiceImpl extends BaseMetricService implements Broker
Long clusterId = param.getClusterId();
Integer brokerId = param.getBrokerId();
List<Partition> partitions = partitionService.listPartitionByBroker(clusterId, brokerId);
List<Partition> partitions = partitionService.listPartitionFromCacheFirst(clusterId, brokerId);
Float logSizeSum = 0f;
for(Partition p : partitions) {
@@ -372,8 +373,8 @@ public class BrokerMetricServiceImpl extends BaseMetricService implements Broker
Result<ReplicationMetrics> metricsResult = replicaMetricService.collectReplicaMetricsFromKafka(
clusterId,
p.getTopicName(),
brokerId,
p.getPartitionId(),
brokerId,
ReplicaMetricVersionItems.REPLICATION_METRIC_LOG_SIZE
);
@@ -386,7 +387,7 @@ public class BrokerMetricServiceImpl extends BaseMetricService implements Broker
logSizeSum += (replicaLogSize == null? 0.0f: replicaLogSize);
} catch (Exception e) {
LOGGER.error(
"class=BrokerMetricServiceImpl||method=getLogSize||clusterPhyId={}||brokerId={}||topicName={}||partitionId={}||metricName={}||errMsg=exception",
"method=getLogSize||clusterPhyId={}||brokerId={}||topicName={}||partitionId={}||metricName={}||errMsg=exception",
clusterId, brokerId, p.getTopicName(), p.getPartitionId(), metric, e.getClass().getName()
);
}
@@ -431,7 +432,9 @@ public class BrokerMetricServiceImpl extends BaseMetricService implements Broker
Float brokerLeaderCount = metricsResult.getData().getMetric( BrokerMetricVersionItems.BROKER_METRIC_LEADERS);
Integer globalLeaderCount = partitionService.getLeaderPartitionSizeByClusterId(clusterId);
Integer globalLeaderCount = (int) partitionService.listPartitionFromCacheFirst(clusterId)
.stream()
.filter(partition -> !partition.getLeaderBrokerId().equals(KafkaConstant.NO_LEADER)).count();
Integer globalBrokerCount = brokerService.listAllBrokersFromDB(clusterId).size();
if (globalLeaderCount <= 0 || globalBrokerCount <= 0) {

View File

@@ -8,8 +8,8 @@ import com.github.benmanes.caffeine.cache.Caffeine;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.JmxConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.broker.BrokerParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.broker.BrokerParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
@@ -26,12 +26,12 @@ import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.jmx.JmxDAO;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaJMXClient;
import com.xiaojukeji.know.streaming.km.persistence.mysql.broker.BrokerDAO;
import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO;
import com.xiaojukeji.know.streaming.km.persistence.mysql.broker.BrokerDAO;
import kafka.zk.BrokerIdsZNode;
import org.apache.kafka.clients.admin.*;
import org.apache.kafka.common.Node;
@@ -54,7 +54,7 @@ import static com.xiaojukeji.know.streaming.km.common.jmx.JmxAttribute.VERSION;
import static com.xiaojukeji.know.streaming.km.common.jmx.JmxName.JMX_SERVER_APP_INFO;
@Service
public class BrokerServiceImpl extends BaseVersionControlService implements BrokerService {
public class BrokerServiceImpl extends BaseKafkaVersionControlService implements BrokerService {
private static final ILog log = LogFactory.getLog(BrokerServiceImpl.class);
private static final String BROKER_LOG_DIR = "getLogDir";
@@ -168,7 +168,7 @@ public class BrokerServiceImpl extends BaseVersionControlService implements Brok
allBrokerList = this.listAllBrokersAndUpdateCache(clusterPhyId);
}
return allBrokerList.stream().filter( elem -> elem.alive()).collect(Collectors.toList());
return allBrokerList.stream().filter(elem -> elem.alive()).collect(Collectors.toList());
}
@Override
@@ -234,11 +234,10 @@ public class BrokerServiceImpl extends BaseVersionControlService implements Brok
@Override
public String getBrokerVersionFromKafka(Long clusterId, Integer brokerId) {
JmxConnectorWrap jmxConnectorWrap = kafkaJMXClient.getClient(clusterId, brokerId);
if (ValidateUtils.isNull(jmxConnectorWrap) || !jmxConnectorWrap.checkJmxConnectionAndInitIfNeed()) {
JmxConnectorWrap jmxConnectorWrap = kafkaJMXClient.getClientWithCheck(clusterId, brokerId);
if (jmxConnectorWrap == null) {
return "";
}
try {
return (String) jmxConnectorWrap.getAttribute(new ObjectName(JMX_SERVER_APP_INFO + ",id=" + brokerId), VERSION);
} catch (Exception e) {
@@ -331,7 +330,7 @@ public class BrokerServiceImpl extends BaseVersionControlService implements Brok
return Result.buildSuc(brokerList);
} catch (Exception e) {
log.error("class=BrokerServiceImpl||method=getBrokersFromZKClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
log.error("method=getBrokersFromZKClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
return Result.buildFromRSAndMsg(ResultStatus.ZK_OPERATE_FAILED, e.getMessage());
}
@@ -353,7 +352,7 @@ public class BrokerServiceImpl extends BaseVersionControlService implements Brok
return Result.buildSuc(newBrokerList);
} catch (Exception e) {
log.error("class=BrokerServiceImpl||method=getBrokersFromAdminClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
log.error("method=getBrokersFromAdminClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
}
@@ -361,13 +360,13 @@ public class BrokerServiceImpl extends BaseVersionControlService implements Brok
private Broker getStartTimeAndBuildBroker(Long clusterPhyId, Node newNode, JmxConfig jmxConfig) {
try {
Long startTime = jmxDAO.getServerStartTime(clusterPhyId, newNode.host(), null, jmxConfig);
Long startTime = jmxDAO.getServerStartTime(clusterPhyId, newNode.host(), jmxConfig.getJmxPort(), jmxConfig);
return Broker.buildFrom(clusterPhyId, newNode, startTime, jmxConfig);
return Broker.buildFrom(clusterPhyId, newNode, startTime);
} catch (Exception e) {
log.error("class=BrokerServiceImpl||method=getStartTimeAndBuildBroker||clusterPhyId={}||brokerNode={}||jmxConfig={}||errMsg=exception!", clusterPhyId, newNode, jmxConfig, e);
log.error("method=getStartTimeAndBuildBroker||clusterPhyId={}||brokerNode={}||jmxConfig={}||errMsg=exception!", clusterPhyId, newNode, jmxConfig, e);
}
return Broker.buildFrom(clusterPhyId, newNode, null, jmxConfig);
return Broker.buildFrom(clusterPhyId, newNode, null);
}
}

View File

@@ -15,11 +15,9 @@ import java.util.Map;
@Service
public class BrokerSpecServiceImpl implements BrokerSpecService {
@Autowired
private PlatformClusterConfigService platformClusterConfigService;
@Override
public Map<Integer, BrokerSpec> getBrokerSpecMap(Long clusterPhyId) {
//获取规格信息
@@ -37,6 +35,4 @@ public class BrokerSpecServiceImpl implements BrokerSpecService {
}
return brokerSpecMap;
}
}

View File

@@ -9,7 +9,6 @@ import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import com.xiaojukeji.know.streaming.km.common.exception.ParamErrorException;
import java.util.List;
import java.util.Set;
/**
* @author didi

View File

@@ -2,8 +2,6 @@ package com.xiaojukeji.know.streaming.km.core.service.cluster.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.google.common.collect.Table;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.MetricDTO;
@@ -15,6 +13,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.kafkacontroller.Kafka
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BrokerMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ClusterMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.TopicMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.offset.KSOffsetSpec;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.metric.ClusterMetricParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
@@ -26,6 +25,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.ClusterMetricPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.enums.cluster.ClusterAuthTypeEnum;
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupStateEnum;
@@ -35,6 +35,7 @@ import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.common.jmx.JmxConnectorWrap;
import com.xiaojukeji.know.streaming.km.common.utils.*;
import com.xiaojukeji.know.streaming.km.persistence.cache.DataBaseDataLocalCache;
import com.xiaojukeji.know.streaming.km.core.service.acl.KafkaAclService;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerMetricService;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
@@ -54,27 +55,24 @@ import com.xiaojukeji.know.streaming.km.persistence.es.dao.ClusterMetricESDAO;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaJMXClient;
import com.xiaojukeji.know.streaming.km.rebalance.model.Resource;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.resource.ResourceType;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
import org.springframework.util.StringUtils;
import javax.annotation.PostConstruct;
import javax.management.InstanceNotFoundException;
import javax.management.ObjectName;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ClusterMetrics.initWithMetrics;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.ClusterMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.TopicMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.TopicMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.ClusterMetricVersionItems.*;
/**
* @author didi
@@ -167,19 +165,6 @@ public class ClusterMetricServiceImpl extends BaseMetricService implements Clust
@Autowired
private ClusterPhyService clusterPhyService;
private final Cache<Long, ClusterMetrics> clusterLatestMetricsCache = Caffeine.newBuilder()
.expireAfterWrite(180, TimeUnit.SECONDS)
.maximumSize(1000)
.build();
@PostConstruct
@Scheduled(cron = "0 0/1 * * * ?")
private void flushClusterLatestMetricsCache() {
for (ClusterPhy clusterPhy: clusterPhyService.listAllClusters()) {
FutureUtil.quickStartupFutureUtil.submitTask(() -> this.updateCacheAndGetMetrics(clusterPhy.getId()));
}
}
@Override
protected VersionItemTypeEnum getVersionItemType() {
return VersionItemTypeEnum.METRIC_CLUSTER;
@@ -297,7 +282,7 @@ public class ClusterMetricServiceImpl extends BaseMetricService implements Clust
@Override
public ClusterMetrics getLatestMetricsFromCache(Long clusterPhyId) {
ClusterMetrics metrics = clusterLatestMetricsCache.getIfPresent(clusterPhyId);
ClusterMetrics metrics = DataBaseDataLocalCache.getClusterLatestMetrics(clusterPhyId);
if (metrics != null) {
return metrics;
}
@@ -349,24 +334,6 @@ public class ClusterMetricServiceImpl extends BaseMetricService implements Clust
/**************************************************** private method ****************************************************/
private ClusterMetrics updateCacheAndGetMetrics(Long clusterPhyId) {
try {
Result<ClusterMetrics> metricsResult = this.getLatestMetricsFromES(clusterPhyId, Arrays.asList());
if (metricsResult.hasData()) {
LOGGER.info("method=updateCacheAndGetMetrics||clusterPhyId={}||msg=success", clusterPhyId);
clusterLatestMetricsCache.put(clusterPhyId, metricsResult.getData());
return metricsResult.getData();
}
} catch (Exception e) {
LOGGER.error("method=updateCacheAndGetMetrics||clusterPhyId={}||errMsg=exception!", clusterPhyId, e);
}
ClusterMetrics clusterMetrics = new ClusterMetrics(clusterPhyId);
clusterLatestMetricsCache.put(clusterPhyId, clusterMetrics);
return clusterMetrics;
}
/**
* doNothing
*/
@@ -396,9 +363,28 @@ public class ClusterMetricServiceImpl extends BaseMetricService implements Clust
/**
* 获取集群的 messageSize
*/
private Result<ClusterMetrics> getMessageSize(VersionItemParam metricParam){
private Result<ClusterMetrics> getMessageSize(VersionItemParam metricParam) {
ClusterMetricParam param = (ClusterMetricParam)metricParam;
return getMetricFromKafkaByTotalTopics(param.getClusterId(), param.getMetric(), TOPIC_METRIC_MESSAGES);
Result<Map<TopicPartition, Long>> beginOffsetMapResult = partitionService.getAllPartitionOffsetFromKafka(param.getClusterId(), KSOffsetSpec.earliest());
Result<Map<TopicPartition, Long>> endOffsetMapResult = partitionService.getAllPartitionOffsetFromKafka(param.getClusterId(), KSOffsetSpec.latest());
if (endOffsetMapResult.failed() || beginOffsetMapResult.failed()) {
// 有一个失败,直接返回失败
return Result.buildFromIgnoreData(endOffsetMapResult);
}
long msgCount = 0;
for (Map.Entry<TopicPartition, Long> entry: endOffsetMapResult.getData().entrySet()) {
Long beginOffset = beginOffsetMapResult.getData().get(entry.getKey());
if (beginOffset == null) {
continue;
}
msgCount += Math.max(0, entry.getValue() - beginOffset);
}
return Result.buildSuc(initWithMetrics(param.getClusterId(), param.getMetric(), (float)msgCount));
}
/**
@@ -420,9 +406,9 @@ public class ClusterMetricServiceImpl extends BaseMetricService implements Clust
private Result<ClusterMetrics> getPartitionSize(VersionItemParam metricParam){
ClusterMetricParam param = (ClusterMetricParam)metricParam;
String metric = param.getMetric();
Long clusterId = param.getClusterId();
Integer partitionNu = partitionService.getPartitionSizeByClusterId(clusterId);
String metric = param.getMetric();
Long clusterId = param.getClusterId();
Integer partitionNu = partitionService.listPartitionFromCacheFirst(clusterId).size();
return Result.buildSuc(initWithMetrics(clusterId, metric, partitionNu.floatValue()));
}
@@ -435,7 +421,10 @@ public class ClusterMetricServiceImpl extends BaseMetricService implements Clust
String metric = param.getMetric();
Long clusterId = param.getClusterId();
Integer noLeaders = partitionService.getNoLeaderPartitionSizeByClusterId(clusterId);
Integer noLeaders = (int) partitionService.listPartitionFromCacheFirst(clusterId)
.stream()
.filter(partition -> partition.getLeaderBrokerId().equals(KafkaConstant.NO_LEADER))
.count();
return Result.buildSuc(initWithMetrics(clusterId, metric, noLeaders.floatValue()));
}
@@ -781,7 +770,7 @@ public class ClusterMetricServiceImpl extends BaseMetricService implements Clust
/**
* 从所有的 Topic 的指标中加总聚合得到集群的指标
*/
private Result<ClusterMetrics> getMetricFromKafkaByTotalTopics(Long clusterId, String metric, String topicMetric){
private Result<ClusterMetrics> getMetricFromKafkaByTotalTopics(Long clusterId, String metric, String topicMetric) {
List<Topic> topics = topicService.listTopicsFromCacheFirst(clusterId);
float sumMetricValue = 0f;

View File

@@ -16,7 +16,6 @@ import com.xiaojukeji.know.streaming.km.persistence.jmx.JmxDAO;
import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO;
import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.impl.KafkaZKDAOImpl;
import kafka.server.KafkaConfig;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.*;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
@@ -35,7 +34,6 @@ import java.util.*;
* @author zengqiao
* @date 22/02/28
*/
@Slf4j
@Service
public class ClusterValidateServiceImpl implements ClusterValidateService {
private static final ILog logger = LogFactory.getLog(KafkaZKDAOImpl.class);

View File

@@ -4,7 +4,6 @@ import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationSor
import com.xiaojukeji.know.streaming.km.common.bean.po.ControllerChangeLogPO;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ControllerChangeLogService;
import com.xiaojukeji.know.streaming.km.persistence.mysql.ControllerChangeLogDAO;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
@@ -12,7 +11,6 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
@Slf4j
@Service
public class ControllerChangeLogServiceImpl implements ControllerChangeLogService {

View File

@@ -17,4 +17,5 @@ public interface PlatformClusterConfigService {
Map<String, PlatformClusterConfigPO> getByClusterAndGroupWithoutDefault(Long clusterPhyId, String group);
Map<Long, Map<String, PlatformClusterConfigPO>> listByGroup(String groupName);
}

View File

@@ -12,6 +12,7 @@ import com.xiaojukeji.know.streaming.km.persistence.mysql.config.PlatformCluster
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
@@ -68,4 +69,20 @@ public class PlatformClusterConfigServiceImpl implements PlatformClusterConfigSe
return configPOMap;
}
@Override
public Map<Long, Map<String, PlatformClusterConfigPO>> listByGroup(String groupName) {
LambdaQueryWrapper<PlatformClusterConfigPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(PlatformClusterConfigPO::getValueGroup, groupName);
List<PlatformClusterConfigPO> poList = platformClusterConfigDAO.selectList(lambdaQueryWrapper);
Map<Long, Map<String, PlatformClusterConfigPO>> poMap = new HashMap<>();
poList.forEach(elem -> {
poMap.putIfAbsent(elem.getClusterId(), new HashMap<>());
poMap.get(elem.getClusterId()).put(elem.getValueName(), elem);
});
return poMap;
}
}

View File

@@ -0,0 +1,27 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.cluster;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.connect.MetricsConnectClustersDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectClusterMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
import java.util.List;
/**
* @author didi
*/
public interface ConnectClusterMetricService {
/**
* 从Kafka获取指标
*/
Result<ConnectClusterMetrics> collectConnectClusterMetricsFromKafkaWithCacheFirst(Long connectClusterPhyId, String metricName);
Result<ConnectClusterMetrics> collectConnectClusterMetricsFromKafka(Long connectClusterPhyId, String metricName);
/**
* 从ES中获取一段时间内聚合计算之后的指标线
*/
Result<List<MetricMultiLinesVO>> listConnectClusterMetricsFromES(Long clusterPhyId, MetricsConnectClustersDTO dto);
boolean isMetricName(String str);
}

View File

@@ -0,0 +1,34 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.cluster;
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.cluster.ConnectClusterDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectClusterMetadata;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import java.util.List;
/**
* Connect-Cluster
*/
public interface ConnectClusterService {
Long replaceAndReturnIdInDB(ConnectClusterMetadata metadata);
List<ConnectCluster> listByKafkaCluster(Long kafkaClusterPhyId);
List<ConnectCluster> listAllClusters();
ConnectCluster getById(Long connectClusterId);
ConnectCluster getByName(Long clusterPhyId, String connectClusterName);
String getClusterVersion(Long connectClusterId);
String getClusterName(Long connectClusterId);
Result<Void> deleteInDB(Long connectClusterId, String operator);
Result<Void> batchModifyInDB(List<ConnectClusterDTO> dtoList, String operator);
Boolean existConnectClusterDown(Long kafkaClusterPhyId);
}

View File

@@ -0,0 +1,270 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.cluster.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.google.common.collect.Table;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.connect.MetricsConnectClustersDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectClusterMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectWorkerMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.metric.connect.ConnectClusterMetricParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionJmxInfo;
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.BrokerMetricPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricLineVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.common.jmx.JmxConnectorWrap;
import com.xiaojukeji.know.streaming.km.common.utils.BeanUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.cache.CollectedMetricsLocalCache;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterMetricService;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseConnectorMetricService;
import com.xiaojukeji.know.streaming.km.persistence.connect.ConnectJMXClient;
import com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.ConnectClusterMetricESDAO;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
import javax.management.ObjectName;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus.*;
/**
* @author didi
*/
@Service
public class ConnectClusterMetricServiceImpl extends BaseConnectorMetricService implements ConnectClusterMetricService {
protected static final ILog LOGGER = LogFactory.getLog(ConnectClusterMetricServiceImpl.class);
public static final String CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_AVG = "getWorkerMetricAvg";
public static final String CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM = "getWorkerMetricSum";
public static final String CONNECT_CLUSTER_METHOD_DO_NOTHING = "doNothing";
@Autowired
private ConnectClusterService connectClusterService;
@Autowired
private ConnectClusterMetricESDAO connectClusterMetricESDAO;
@Autowired
private ConnectJMXClient connectJMXClient;
@Autowired
private WorkerService workerService;
@Override
protected VersionItemTypeEnum getVersionItemType() {
return VersionItemTypeEnum.METRIC_CONNECT_CLUSTER;
}
@Override
protected List<String> listMetricPOFields() {
return BeanUtil.listBeanFields(BrokerMetricPO.class);
}
@Override
protected void initRegisterVCHandler() {
registerVCHandler(CONNECT_CLUSTER_METHOD_DO_NOTHING, this::doNothing);
registerVCHandler(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_AVG, this::getConnectWorkerMetricAvg);
registerVCHandler(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM, this::getConnectWorkerMetricSum);
}
@Override
public Result<ConnectClusterMetrics> collectConnectClusterMetricsFromKafkaWithCacheFirst(Long connectClusterPhyId, String metric) {
String connectClusterMetricKey = CollectedMetricsLocalCache.genConnectClusterMetricCacheKey(connectClusterPhyId, metric);
Float keyValue = CollectedMetricsLocalCache.getConnectClusterMetrics(connectClusterMetricKey);
if (keyValue != null) {
ConnectClusterMetrics connectClusterMetrics = ConnectClusterMetrics.initWithMetric(connectClusterPhyId,metric,keyValue);
return Result.buildSuc(connectClusterMetrics);
}
Result<ConnectClusterMetrics> ret = this.collectConnectClusterMetricsFromKafka(connectClusterPhyId, metric);
if (ret == null || !ret.hasData()) {
return ret;
}
Map<String, Float> metricsMap = ret.getData().getMetrics();
for (Map.Entry<String, Float> entry : metricsMap.entrySet()) {
CollectedMetricsLocalCache.putConnectClusterMetrics(entry.getKey(), entry.getValue());
}
return ret;
}
@Override
public Result<ConnectClusterMetrics> collectConnectClusterMetricsFromKafka( Long connectClusterPhyId, String metric) {
try {
ConnectClusterMetricParam metricParam = new ConnectClusterMetricParam(connectClusterPhyId, metric);
return (Result<ConnectClusterMetrics>) doVCHandler(connectClusterPhyId, metric, metricParam);
} catch (VCHandlerNotExistException e) {
return Result.buildFailure(VC_HANDLE_NOT_EXIST);
}
}
@Override
public Result<List<MetricMultiLinesVO>> listConnectClusterMetricsFromES(Long clusterPhyId, MetricsConnectClustersDTO dto) {
Long startTime = dto.getStartTime();
Long endTime = dto.getEndTime();
Integer topN = dto.getTopNu();
String aggType = dto.getAggType();
List<Long> connectClusterIdList = dto.getConnectClusterIdList();
List<String> metricNameList = dto.getMetricsNames();
Table<String, Long, List<MetricPointVO>> retTable;
if (ValidateUtils.isEmptyList(connectClusterIdList)) {
// 按照TopN的方式去获取
List<Long> defaultConnectClusterIdList = this.listTopNConnectClusterIdList(clusterPhyId, topN);
retTable = connectClusterMetricESDAO.listMetricsByTop(clusterPhyId, defaultConnectClusterIdList, metricNameList, aggType, topN, startTime, endTime);
} else {
// 制定集群ID去获取
retTable = connectClusterMetricESDAO.listMetricsByConnectClusterIdList(clusterPhyId, metricNameList, aggType, connectClusterIdList, startTime, endTime);
}
return Result.buildSuc(this.metricMap2VO(clusterPhyId, retTable.rowMap()));
}
@Override
public boolean isMetricName(String str) {
return super.isMetricName(str);
}
/**************************************************** private method ****************************************************/
private Result<ConnectClusterMetrics> doNothing(VersionItemParam metricParam) {
ConnectClusterMetricParam param = (ConnectClusterMetricParam) metricParam;
return Result.buildSuc(new ConnectClusterMetrics(null, param.getConnectClusterId()));
}
private Result<ConnectClusterMetrics> getConnectWorkerMetricAvg(VersionItemParam metricParam) {
ConnectClusterMetricParam param = (ConnectClusterMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String metric = param.getMetric();
Result<List<ConnectWorkerMetrics>> ret = this.getConnectWorkerMetricsByJMX(connectClusterId, metric);
if (ret == null || !ret.hasData() || ret.getData().isEmpty()) {
return Result.buildFailure(NOT_EXIST);
}
//求均值
Float value = ret.getData().stream().map(elem -> elem.getMetric(metric) == null ? 0 : elem.getMetric(metric)).reduce(Float::sum).get();
ConnectClusterMetrics connectClusterMetrics = new ConnectClusterMetrics(null, connectClusterId);
connectClusterMetrics.putMetric(metric, value / ret.getData().size());
return Result.buildSuc(connectClusterMetrics);
}
private Result<ConnectClusterMetrics> getConnectWorkerMetricSum(VersionItemParam metricParam) {
ConnectClusterMetricParam param = (ConnectClusterMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String metric = param.getMetric();
Result<List<ConnectWorkerMetrics>> ret = this.getConnectWorkerMetricsByJMX(connectClusterId, metric);
if (ret == null || !ret.hasData() || ret.getData().isEmpty()) {
return Result.buildFailure(NOT_EXIST);
}
//求和
Float value = ret.getData().stream().map(elem -> elem.getMetric(metric) == null ? 0 : elem.getMetric(metric)).reduce(Float::sum).get();
ConnectClusterMetrics connectClusterMetrics = new ConnectClusterMetrics(null, connectClusterId);
connectClusterMetrics.putMetric(metric, value);
return Result.buildSuc(connectClusterMetrics);
}
//获取workermetric列表
private Result<List<ConnectWorkerMetrics>> getConnectWorkerMetricsByJMX(Long connectClusterId, String metric) {
List<String> workerIdList = workerService.listFromDB(connectClusterId).stream().map(elem -> elem.getWorkerId()).collect(Collectors.toList());
List<ConnectWorkerMetrics> workerMetricsList = new ArrayList<>();
for (String workerId : workerIdList) {
Result<ConnectWorkerMetrics> ret = this.getConnectWorkerMetricByJMX(connectClusterId, workerId, metric);
if (ret == null || !ret.hasData() || ret.getData().getMetric(metric) == null) {
continue;
}
workerMetricsList.add(ret.getData());
}
return Result.buildSuc(workerMetricsList);
}
private Result<ConnectWorkerMetrics> getConnectWorkerMetricByJMX(Long connectClusterId, String workerId, String metric) {
VersionJmxInfo jmxInfo = getJMXInfo(connectClusterId, metric);
if (null == jmxInfo) {
return Result.buildFailure(VC_ITEM_JMX_NOT_EXIST);
}
JmxConnectorWrap jmxConnectorWrap = connectJMXClient.getClientWithCheck(connectClusterId, workerId);
if (ValidateUtils.isNull(jmxConnectorWrap)) {
return Result.buildFailure(VC_JMX_INIT_ERROR);
}
try {
//2、获取jmx指标
String value = jmxConnectorWrap.getAttribute(new ObjectName(jmxInfo.getJmxObjectName()), jmxInfo.getJmxAttribute()).toString();
ConnectWorkerMetrics connectWorkerMetrics = ConnectWorkerMetrics.initWithMetric(connectClusterId, workerId, metric, Float.valueOf(value));
return Result.buildSuc(connectWorkerMetrics);
} catch (Exception e) {
LOGGER.error("method=getConnectWorkerMetricsByJMX||connectClusterId={}||workerId={}||metrics={}||jmx={}||msg={}",
connectClusterId, workerId, metric, jmxInfo.getJmxObjectName(), e.getClass().getName());
return Result.buildFailure(VC_JMX_CONNECT_ERROR);
}
}
private List<Long> listTopNConnectClusterIdList(Long clusterPhyId, Integer topN) {
List<ConnectCluster> connectClusters = connectClusterService.listByKafkaCluster(clusterPhyId);
if (CollectionUtils.isEmpty(connectClusters)) {
return new ArrayList<>();
}
return connectClusters.subList(0, Math.min(topN, connectClusters.size()))
.stream()
.map(b -> b.getId().longValue())
.collect(Collectors.toList());
}
protected List<MetricMultiLinesVO> metricMap2VO(Long connectClusterId,
Map<String/*metric*/, Map<Long, List<MetricPointVO>>> map){
List<MetricMultiLinesVO> multiLinesVOS = new ArrayList<>();
if (map == null || map.isEmpty()) {
// 如果为空,则直接返回
return multiLinesVOS;
}
for(String metric : map.keySet()){
try {
MetricMultiLinesVO multiLinesVO = new MetricMultiLinesVO();
multiLinesVO.setMetricName(metric);
List<MetricLineVO> metricLines = new ArrayList<>();
Map<Long, List<MetricPointVO>> metricPointMap = map.get(metric);
if(null == metricPointMap || metricPointMap.isEmpty()){continue;}
for(Map.Entry<Long, List<MetricPointVO>> entry : metricPointMap.entrySet()){
MetricLineVO metricLineVO = new MetricLineVO();
metricLineVO.setName(entry.getKey().toString());
metricLineVO.setMetricName(metric);
metricLineVO.setMetricPoints(entry.getValue());
metricLines.add(metricLineVO);
}
multiLinesVO.setMetricLines(metricLines);
multiLinesVOS.add(multiLinesVO);
}catch (Exception e){
LOGGER.error("method=metricMap2VO||connectClusterId={}||msg=exception!", connectClusterId, e);
}
}
return multiLinesVOS;
}
}

View File

@@ -0,0 +1,243 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.cluster.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.didiglobal.logi.security.common.dto.oplog.OplogDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.cluster.ConnectClusterDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectClusterMetadata;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectClusterPO;
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupStateEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.ModuleEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.ConnectClusterDAO;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.transaction.interceptor.TransactionAspectSupport;
import java.util.List;
@Service
public class ConnectClusterServiceImpl implements ConnectClusterService {
private static final ILog LOGGER = LogFactory.getLog(ConnectClusterServiceImpl.class);
@Autowired
private ConnectClusterDAO connectClusterDAO;
@Autowired
private OpLogWrapService opLogWrapService;
@Override
public Long replaceAndReturnIdInDB(ConnectClusterMetadata metadata) {
//url去斜杠
String clusterUrl = metadata.getMemberLeaderUrl();
if (clusterUrl.charAt(clusterUrl.length() - 1) == '/') {
clusterUrl = clusterUrl.substring(0, clusterUrl.length() - 1);
}
ConnectClusterPO oldPO = this.getPOFromDB(metadata.getKafkaClusterPhyId(), metadata.getGroupName());
if (oldPO == null) {
oldPO = new ConnectClusterPO();
oldPO.setKafkaClusterPhyId(metadata.getKafkaClusterPhyId());
oldPO.setGroupName(metadata.getGroupName());
oldPO.setName(metadata.getGroupName());
oldPO.setState(metadata.getState().getCode());
oldPO.setMemberLeaderUrl(metadata.getMemberLeaderUrl());
oldPO.setClusterUrl(clusterUrl);
oldPO.setVersion(KafkaConstant.DEFAULT_CONNECT_VERSION);
connectClusterDAO.insert(oldPO);
oldPO = this.getPOFromDB(metadata.getKafkaClusterPhyId(), metadata.getGroupName());
return oldPO == null? null: oldPO.getId();
}
oldPO.setKafkaClusterPhyId(metadata.getKafkaClusterPhyId());
oldPO.setGroupName(metadata.getGroupName());
oldPO.setState(metadata.getState().getCode());
oldPO.setMemberLeaderUrl(metadata.getMemberLeaderUrl());
if (ValidateUtils.isBlank(oldPO.getVersion())) {
oldPO.setVersion(KafkaConstant.DEFAULT_CONNECT_VERSION);
}
if (!ValidateUtils.isBlank(clusterUrl)) {
oldPO.setClusterUrl(clusterUrl);
}
connectClusterDAO.updateById(oldPO);
return oldPO.getId();
}
@Override
public List<ConnectCluster> listByKafkaCluster(Long kafkaClusterPhyId) {
LambdaQueryWrapper<ConnectClusterPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectClusterPO::getKafkaClusterPhyId, kafkaClusterPhyId);
return ConvertUtil.list2List(connectClusterDAO.selectList(lambdaQueryWrapper), ConnectCluster.class);
}
@Override
public List<ConnectCluster> listAllClusters() {
List<ConnectClusterPO> connectClusterPOList = connectClusterDAO.selectList(null);
return ConvertUtil.list2List(connectClusterPOList, ConnectCluster.class);
}
@Override
public ConnectCluster getById(Long connectClusterId) {
return ConvertUtil.obj2Obj(connectClusterDAO.selectById(connectClusterId), ConnectCluster.class);
}
@Override
public ConnectCluster getByName(Long clusterPhyId, String connectClusterName) {
LambdaQueryWrapper<ConnectClusterPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectClusterPO::getKafkaClusterPhyId, clusterPhyId);
lambdaQueryWrapper.eq(ConnectClusterPO::getName, connectClusterName);
return ConvertUtil.obj2Obj(connectClusterDAO.selectOne(lambdaQueryWrapper), ConnectCluster.class);
}
@Override
public String getClusterVersion(Long connectClusterId) {
ConnectClusterPO connectClusterPO = connectClusterDAO.selectById(connectClusterId);
return null != connectClusterPO ? connectClusterPO.getVersion() : "";
}
@Override
public String getClusterName(Long connectClusterId) {
ConnectClusterPO connectClusterPO = connectClusterDAO.selectById(connectClusterId);
return null != connectClusterPO ? connectClusterPO.getName() : "";
}
@Override
public Result<Void> deleteInDB(Long connectClusterId, String operator) {
ConnectCluster connectCluster = this.getById(connectClusterId);
if (connectCluster == null) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
if (!GroupStateEnum.DEAD.getCode().equals(connectCluster.getState())) {
return Result.buildFromRSAndMsg(ResultStatus.OPERATION_FORBIDDEN, "只有集群处于Dead状态才允许删除");
}
connectClusterDAO.deleteById(connectClusterId);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.DELETE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectClusterBizStr(connectCluster.getId(), connectCluster.getName()),
ConvertUtil.obj2Json(connectCluster)
));
return Result.buildSuc();
}
@Override
@Transactional
public Result<Void> batchModifyInDB(List<ConnectClusterDTO> dtoList, String operator) {
LOGGER.info("method=batchModifyInDB||data={}||operator={}", dtoList, operator);
for (ConnectClusterDTO dto: dtoList) {
if (!dto.getClusterUrl().startsWith("http://") && !dto.getClusterUrl().startsWith("https://")) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "clusterUrl必须以http或者https开头");
}
}
for (ConnectClusterDTO dto: dtoList) {
try {
ConnectClusterPO po = this.getRowById(dto.getId());
if (po == null) {
// 回滚事务
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(dto.getId()));
}
if (!ValidateUtils.isNull(dto.getName())) {
po.setName(dto.getName());
}
if (!ValidateUtils.isNull(dto.getClusterUrl())) {
String clusterUrl = dto.getClusterUrl();
if (clusterUrl.charAt(clusterUrl.length() - 1) == '/') {
clusterUrl = clusterUrl.substring(0, clusterUrl.length() - 1);
}
po.setClusterUrl(clusterUrl);
}
if (!ValidateUtils.isNull(dto.getVersion())) {
po.setVersion(dto.getVersion());
}
if (!ValidateUtils.isNull(dto.getJmxProperties())) {
po.setJmxProperties(dto.getJmxProperties());
}
connectClusterDAO.updateById(po);
// 记录操作
opLogWrapService.saveOplogAndIgnoreException(
new OplogDTO(
operator,
OperationEnum.EDIT.getDesc(),
ModuleEnum.KAFKA_CONNECT_CLUSTER.getDesc(),
MsgConstant.getConnectClusterBizStr(dto.getId(), dto.getName()),
ConvertUtil.obj2Json(po)
)
);
} catch (DuplicateKeyException dke) {
LOGGER.error(
"method=batchModifyInDB||data={}||operator={}||errMsg=connectCluster name duplicate",
dtoList, operator
);
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "connect集群name重复");
} catch (Exception e) {
LOGGER.error(
"method=batchModifyInDB||data={}||operator={}||errMsg=exception",
dtoList, operator, e
);
// 回滚事务
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return Result.buildFromRSAndMsg(ResultStatus.MYSQL_OPERATE_FAILED, e.getMessage());
}
}
return Result.buildSuc();
}
@Override
public Boolean existConnectClusterDown(Long kafkaClusterPhyId) {
List<ConnectCluster> connectClusters = this.listByKafkaCluster(kafkaClusterPhyId);
for (ConnectCluster connectCluster : connectClusters) {
if (GroupStateEnum.getByState(String.valueOf(connectCluster.getState())) == GroupStateEnum.DEAD)
return true;
}
return false;
}
/**************************************************** private method ****************************************************/
private ConnectClusterPO getPOFromDB(Long kafkaClusterPhyId, String groupName) {
LambdaQueryWrapper<ConnectClusterPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectClusterPO::getGroupName, groupName);
lambdaQueryWrapper.eq(ConnectClusterPO::getKafkaClusterPhyId, kafkaClusterPhyId);
return connectClusterDAO.selectOne(lambdaQueryWrapper);
}
public ConnectClusterPO getRowById(Long connectClusterId) {
return connectClusterDAO.selectById(connectClusterId);
}
}

View File

@@ -0,0 +1,36 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.connector;
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.ClusterConnectorDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.connect.MetricsConnectorsDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectorMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum;
import java.util.List;
/**
* @author didi
*/
public interface ConnectorMetricService {
/**
* 从Kafka获取指标
*/
Result<ConnectorMetrics> collectConnectClusterMetricsFromKafkaWithCacheFirst(Long connectClusterPhyId, String connectorName, String metricName);
Result<ConnectorMetrics> collectConnectClusterMetricsFromKafka(Long connectClusterPhyId, String connectorName, String metricName);
Result<ConnectorMetrics> collectConnectClusterMetricsFromKafka(Long connectClusterPhyId, String connectorName, String metricName, ConnectorTypeEnum connectorType);
/**
* 从ES中获取一段时间内聚合计算之后的指标线
*/
Result<List<MetricMultiLinesVO>> listConnectClusterMetricsFromES(Long clusterPhyId, MetricsConnectorsDTO dto);
Result<List<ConnectorMetrics>> getLatestMetricsFromES(Long clusterPhyId, List<ClusterConnectorDTO> connectorNameList, List<String> metricNameList);
Result<ConnectorMetrics> getLatestMetricsFromES(Long connectClusterId, String connectorName, List<String> metricsNames);
boolean isMetricName(String str);
}

View File

@@ -0,0 +1,62 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.connector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorStateInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum;
import java.util.List;
import java.util.Properties;
import java.util.Set;
/**
* 查看Connector
*/
public interface ConnectorService {
Result<KSConnectorInfo> createConnector(Long connectClusterId, String connectorName, Properties configs, String operator);
/**
* 获取所有的连接器名称列表
*/
Result<List<String>> listConnectorsFromCluster(Long connectClusterId);
/**
* 获取单个连接器信息
*/
Result<KSConnectorInfo> getConnectorInfoFromCluster(Long connectClusterId, String connectorName);
Result<List<String>> getConnectorTopicsFromCluster(Long connectClusterId, String connectorName);
Result<KSConnectorStateInfo> getConnectorStateInfoFromCluster(Long connectClusterId, String connectorName);
Result<KSConnector> getAllConnectorInfoFromCluster(Long connectClusterId, String connectorName);
Result<Void> resumeConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> restartConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> stopConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> deleteConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> updateConnectorConfig(Long connectClusterId, String connectorName, Properties configs, String operator);
void batchReplace(Long kafkaClusterPhyId, Long connectClusterId, List<KSConnector> connectorList, Set<String> allConnectorNameSet);
void addNewToDB(KSConnector connector);
List<ConnectorPO> listByKafkaClusterIdFromDB(Long kafkaClusterPhyId);
List<ConnectorPO> listByConnectClusterIdFromDB(Long connectClusterId);
int countByConnectClusterIdFromDB(Long connectClusterId);
ConnectorPO getConnectorFromDB(Long connectClusterId, String connectorName);
ConnectorTypeEnum getConnectorType(Long connectClusterId, String connectorName);
void completeMirrorMakerInfo(ConnectCluster connectCluster, List<KSConnector> connectorList);
}

View File

@@ -0,0 +1,443 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.connector.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.google.common.collect.Table;
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.ClusterConnectorDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.connect.MetricsConnectorsDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.WorkerConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectorMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectorTaskMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.metric.connect.ConnectorMetricParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionConnectJmxInfo;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.BrokerMetricPO;
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.connect.ConnectorMetricPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricLineVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.common.jmx.JmxConnectorWrap;
import com.xiaojukeji.know.streaming.km.common.utils.BeanUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.cache.CollectedMetricsLocalCache;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorMetricService;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerService;
import com.xiaojukeji.know.streaming.km.core.service.health.state.HealthStateService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseConnectorMetricService;
import com.xiaojukeji.know.streaming.km.persistence.connect.ConnectJMXClient;
import com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.connector.ConnectorMetricESDAO;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
import javax.management.InstanceNotFoundException;
import javax.management.ObjectName;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus.*;
/**
* @author didi
*/
@Service
public class ConnectorMetricServiceImpl extends BaseConnectorMetricService implements ConnectorMetricService {
protected static final ILog LOGGER = LogFactory.getLog(ConnectorMetricServiceImpl.class);
public static final String CONNECTOR_METHOD_DO_NOTHING = "doNothing";
public static final String CONNECTOR_METHOD_GET_CONNECT_WORKER_METRIC_SUM = "getConnectWorkerMetricSum";
public static final String CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG = "getConnectorTaskMetricsAvg";
public static final String CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_MAX = "getConnectorTaskMetricsMax";
public static final String CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM = "getConnectorTaskMetricsSum";
public static final String CONNECTOR_METHOD_GET_METRIC_HEALTH_SCORE = "getMetricHealthScore";
@Autowired
private ConnectorMetricESDAO connectorMetricESDAO;
@Autowired
private ConnectJMXClient connectJMXClient;
@Autowired
private WorkerService workerService;
@Autowired
private ConnectorService connectorService;
@Autowired
private WorkerConnectorService workerConnectorService;
@Autowired
private HealthStateService healthStateService;
@Override
protected VersionItemTypeEnum getVersionItemType() {
return VersionItemTypeEnum.METRIC_CONNECT_CONNECTOR;
}
@Override
protected List<String> listMetricPOFields() {
return BeanUtil.listBeanFields(BrokerMetricPO.class);
}
@Override
protected void initRegisterVCHandler() {
registerVCHandler(CONNECTOR_METHOD_DO_NOTHING, this::doNothing);
registerVCHandler(CONNECTOR_METHOD_GET_CONNECT_WORKER_METRIC_SUM, this::getConnectWorkerMetricSum);
registerVCHandler(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG, this::getConnectorTaskMetricsAvg);
registerVCHandler(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_MAX, this::getConnectorTaskMetricsMax);
registerVCHandler(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, this::getConnectorTaskMetricsSum);
registerVCHandler(CONNECTOR_METHOD_GET_METRIC_HEALTH_SCORE, this::getMetricHealthScore);
}
@Override
public Result<ConnectorMetrics> collectConnectClusterMetricsFromKafkaWithCacheFirst(Long connectClusterPhyId, String connectorName, String metric) {
String connectorMetricKey = CollectedMetricsLocalCache.genConnectorMetricCacheKey(connectClusterPhyId, connectorName, metric);
Float keyValue = CollectedMetricsLocalCache.getConnectorMetrics(connectorMetricKey);
if (null != keyValue) {
ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterPhyId, connectorName, metric, keyValue);
return Result.buildSuc(connectorMetrics);
}
Result<ConnectorMetrics> ret = this.collectConnectClusterMetricsFromKafka(connectClusterPhyId, connectorName, metric);
if (ret == null || !ret.hasData()) {
return ret;
}
Map<String, Float> metricMap = ret.getData().getMetrics();
for (Map.Entry<String, Float> entry : metricMap.entrySet()) {
CollectedMetricsLocalCache.putConnectorMetrics(entry.getKey(), entry.getValue());
}
return ret;
}
@Override
public Result<ConnectorMetrics> collectConnectClusterMetricsFromKafka(Long connectClusterPhyId, String connectorName, String metricName) {
try {
ConnectorMetricParam metricParam = new ConnectorMetricParam(connectClusterPhyId, connectorName, metricName, null);
return (Result<ConnectorMetrics>) doVCHandler(connectClusterPhyId, metricName, metricParam);
} catch (VCHandlerNotExistException e) {
return Result.buildFailure(VC_HANDLE_NOT_EXIST);
}
}
@Override
public Result<ConnectorMetrics> collectConnectClusterMetricsFromKafka(Long connectClusterPhyId, String connectorName, String metricName, ConnectorTypeEnum connectorType) {
try {
ConnectorMetricParam metricParam = new ConnectorMetricParam(connectClusterPhyId, connectorName, metricName, connectorType);
return (Result<ConnectorMetrics>) doVCHandler(connectClusterPhyId, metricName, metricParam);
} catch (VCHandlerNotExistException e) {
return Result.buildFailure(VC_HANDLE_NOT_EXIST);
}
}
@Override
public Result<List<MetricMultiLinesVO>> listConnectClusterMetricsFromES(Long clusterPhyId, MetricsConnectorsDTO dto) {
Long startTime = dto.getStartTime();
Long endTime = dto.getEndTime();
Integer topN = dto.getTopNu();
String aggType = dto.getAggType();
List<String> metricNameList = dto.getMetricsNames();
List<Tuple<Long, String>> connectorList = new ArrayList<>();
if(!CollectionUtils.isEmpty(dto.getConnectorNameList())){
connectorList = dto.getConnectorNameList().stream()
.map(c -> new Tuple<>(c.getConnectClusterId(), c.getConnectorName()))
.collect(Collectors.toList());
}
Table<String/*metric*/, Tuple<Long, String>, List<MetricPointVO>> retTable;
if(ValidateUtils.isEmptyList(connectorList)) {
// 按照TopN的方式去获取
List<Tuple<Long, String>> defaultConnectorList = this.listTopNConnectorList(clusterPhyId, topN);
retTable = connectorMetricESDAO.listMetricsByTopN(clusterPhyId, defaultConnectorList, metricNameList, aggType, topN, startTime, endTime);
} else {
// 制定集群ID去获取
retTable = connectorMetricESDAO.listMetricsByConnectors(clusterPhyId, metricNameList, aggType, connectorList, startTime, endTime);
}
return Result.buildSuc(this.metricMap2VO(clusterPhyId, retTable.rowMap()));
}
@Override
public Result<List<ConnectorMetrics>> getLatestMetricsFromES(Long clusterPhyId, List<ClusterConnectorDTO> connectorNameList, List<String> metricsNames) {
List<Tuple<Long, String>> connectClusterIdAndConnectorNameList = connectorNameList
.stream()
.map(elem -> new Tuple<>(elem.getConnectClusterId(), elem.getConnectorName()))
.collect(Collectors.toList());
List<ConnectorMetricPO> poList =
connectorMetricESDAO.getConnectorLatestMetric(clusterPhyId, connectClusterIdAndConnectorNameList, metricsNames);
return Result.buildSuc(ConvertUtil.list2List(poList, ConnectorMetrics.class));
}
@Override
public Result<ConnectorMetrics> getLatestMetricsFromES(Long connectClusterId, String connectorName, List<String> metricsNames) {
ConnectorMetricPO connectorMetricPO = connectorMetricESDAO.getConnectorLatestMetric(
null, connectClusterId, connectorName, metricsNames);
return Result.buildSuc(ConvertUtil.obj2Obj(connectorMetricPO, ConnectorMetrics.class));
}
@Override
public boolean isMetricName(String str) {
return super.isMetricName(str);
}
/**************************************************** private method ****************************************************/
private Result<ConnectorMetrics> doNothing(VersionItemParam metricParam){
ConnectorMetricParam param = (ConnectorMetricParam) metricParam;
return Result.buildSuc(new ConnectorMetrics(param.getConnectClusterId(), param.getConnectorName()));
}
private Result<ConnectorMetrics> getMetricHealthScore(VersionItemParam metricParam) {
ConnectorMetricParam param = (ConnectorMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String connectorName = param.getConnectorName();
ConnectorMetrics metrics = healthStateService.calConnectorHealthMetrics(connectClusterId, connectorName);
return Result.buildSuc(metrics);
}
private Result<ConnectorMetrics> getConnectWorkerMetricSum(VersionItemParam metricParam) {
ConnectorMetricParam param = (ConnectorMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String connectorName = param.getConnectorName();
String metric = param.getMetricName();
ConnectorTypeEnum connectorType = param.getConnectorType();
float sum = 0;
boolean isCollected = false;
//根据connectClusterId获取connectMemberId列表
List<String> workerIdList = workerService.listFromDB(connectClusterId).stream().map(elem -> elem.getWorkerId()).collect(Collectors.toList());
for (String workerId : workerIdList) {
Result<ConnectorMetrics> ret = this.getConnectorMetric(connectClusterId, workerId, connectorName, metric, connectorType);
if (ret == null || !ret.hasData() || ret.getData().getMetric(metric) == null) {
continue;
}
isCollected = true;
sum += ret.getData().getMetric(metric);
}
if (!isCollected) {
return Result.buildFailure(NOT_EXIST);
}
return Result.buildSuc(ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, sum));
}
//kafka.connect:type=connect-worker-metrics,connector="{connector}" 指标
private Result<ConnectorMetrics> getConnectorMetric(Long connectClusterId, String workerId, String connectorName, String metric, ConnectorTypeEnum connectorType) {
VersionConnectJmxInfo jmxInfo = getJMXInfo(connectClusterId, metric);
if (jmxInfo.getType() != null) {
if (connectorType == null) {
connectorType = connectorService.getConnectorType(connectClusterId, connectorName);
}
if (connectorType != jmxInfo.getType()) {
return Result.buildFailure(VC_JMX_INSTANCE_NOT_FOUND);
}
}
if (null == jmxInfo) {
return Result.buildFailure(VC_ITEM_JMX_NOT_EXIST);
}
String jmxObjectName = String.format(jmxInfo.getJmxObjectName(), connectorName);
JmxConnectorWrap jmxConnectorWrap = connectJMXClient.getClientWithCheck(connectClusterId, workerId);
if (ValidateUtils.isNull(jmxConnectorWrap)) {
return Result.buildFailure(VC_JMX_INIT_ERROR);
}
try {
//2、获取jmx指标
String value = jmxConnectorWrap.getAttribute(new ObjectName(jmxObjectName), jmxInfo.getJmxAttribute()).toString();
ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, Float.valueOf(value));
return Result.buildSuc(connectorMetrics);
} catch (InstanceNotFoundException e) {
// 忽略该错误该错误出现的原因是该指标在JMX中不存在
return Result.buildSuc(new ConnectorMetrics(connectClusterId, connectorName));
} catch (Exception e) {
LOGGER.error("method=getConnectorMetric||connectClusterId={}||workerId={}||connectorName={}||metrics={}||jmx={}||msg={}",
connectClusterId, workerId, connectorName, metric, jmxObjectName, e.getClass().getName());
return Result.buildFailure(VC_JMX_CONNECT_ERROR);
}
}
private Result<ConnectorMetrics> getConnectorTaskMetricsAvg(VersionItemParam metricParam){
ConnectorMetricParam param = (ConnectorMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String connectorName = param.getConnectorName();
String metric = param.getMetricName();
ConnectorTypeEnum connectorType = param.getConnectorType();
Result<List<ConnectorTaskMetrics>> ret = this.getConnectorTaskMetricList(connectClusterId, connectorName, metric, connectorType);
if (ret == null || !ret.hasData() || ret.getData().isEmpty()) {
return Result.buildFailure(NOT_EXIST);
}
Float sum = ret.getData().stream().map(elem -> elem.getMetric(metric)).reduce(Float::sum).get();
ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, sum / ret.getData().size());
return Result.buildSuc(connectorMetrics);
}
private Result<ConnectorMetrics> getConnectorTaskMetricsMax(VersionItemParam metricParam){
ConnectorMetricParam param = (ConnectorMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String connectorName = param.getConnectorName();
String metric = param.getMetricName();
ConnectorTypeEnum connectorType = param.getConnectorType();
Result<List<ConnectorTaskMetrics>> ret = this.getConnectorTaskMetricList(connectClusterId, connectorName, metric, connectorType);
if (ret == null || !ret.hasData() || ret.getData().isEmpty()) {
return Result.buildFailure(NOT_EXIST);
}
Float max = ret.getData().stream().max((a, b) -> a.getMetric(metric).compareTo(b.getMetric(metric))).get().getMetric(metric);
ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, max);
return Result.buildSuc(connectorMetrics);
}
private Result<ConnectorMetrics> getConnectorTaskMetricsSum(VersionItemParam metricParam){
ConnectorMetricParam param = (ConnectorMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String connectorName = param.getConnectorName();
String metric = param.getMetricName();
ConnectorTypeEnum connectorType = param.getConnectorType();
Result<List<ConnectorTaskMetrics>> ret = this.getConnectorTaskMetricList(connectClusterId, connectorName, metric, connectorType);
if (ret == null || !ret.hasData() || ret.getData().isEmpty()) {
return Result.buildFailure(NOT_EXIST);
}
Float sum = ret.getData().stream().map(elem -> elem.getMetric(metric)).reduce(Float::sum).get();
ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, sum);
return Result.buildSuc(connectorMetrics);
}
private Result<List<ConnectorTaskMetrics>> getConnectorTaskMetricList(Long connectClusterId, String connectorName, String metricName, ConnectorTypeEnum connectorType) {
List<ConnectorTaskMetrics> connectorTaskMetricsList = new ArrayList<>();
List<WorkerConnector> workerConnectorList = workerConnectorService.listFromDB(connectClusterId).stream().filter(elem -> elem.getConnectorName().equals(connectorName)).collect(Collectors.toList());
if (workerConnectorList.isEmpty()) {
return Result.buildFailure(NOT_EXIST);
}
for (WorkerConnector workerConnector : workerConnectorList) {
Result<ConnectorTaskMetrics> ret = getConnectorTaskMetric(connectClusterId, workerConnector.getWorkerId(), connectorName, workerConnector.getTaskId(), metricName, connectorType);
if (ret == null || !ret.hasData() || ret.getData().getMetric(metricName) == null) {
continue;
}
connectorTaskMetricsList.add(ret.getData());
}
return Result.buildSuc(connectorTaskMetricsList);
}
private Result<ConnectorTaskMetrics> getConnectorTaskMetric(Long connectClusterId, String workerId, String connectorName, Integer taskId, String metric, ConnectorTypeEnum connectorType) {
VersionConnectJmxInfo jmxInfo = getJMXInfo(connectClusterId, metric);
if (jmxInfo.getType() != null) {
if (connectorType == null) {
connectorType = connectorService.getConnectorType(connectClusterId, connectorName);
}
if (connectorType != jmxInfo.getType()) {
return Result.buildFailure(VC_JMX_INSTANCE_NOT_FOUND);
}
}
if (null == jmxInfo) {
return Result.buildFailure(VC_ITEM_JMX_NOT_EXIST);
}
String jmxObjectName=String.format(jmxInfo.getJmxObjectName(), connectorName, taskId);
JmxConnectorWrap jmxConnectorWrap = connectJMXClient.getClientWithCheck(connectClusterId, workerId);
if (ValidateUtils.isNull(jmxConnectorWrap)) {
return Result.buildFailure(VC_JMX_INIT_ERROR);
}
try {
//2、获取jmx指标
String value = jmxConnectorWrap.getAttribute(new ObjectName(jmxObjectName), jmxInfo.getJmxAttribute()).toString();
ConnectorTaskMetrics connectorTaskMetrics = ConnectorTaskMetrics.initWithMetric(connectClusterId, connectorName, taskId, metric, Float.valueOf(value));
return Result.buildSuc(connectorTaskMetrics);
} catch (Exception e) {
LOGGER.error("method=getConnectorTaskMetric||connectClusterId={}||workerId={}||connectorName={}||taskId={}||metrics={}||jmx={}||msg={}",
connectClusterId, workerId, connectorName, taskId, metric, jmxObjectName, e.getClass().getName());
return Result.buildFailure(VC_JMX_CONNECT_ERROR);
}
}
private List<Tuple<Long, String>> listTopNConnectorList(Long clusterPhyId, Integer topN) {
List<ConnectorPO> poList = connectorService.listByKafkaClusterIdFromDB(clusterPhyId);
if (CollectionUtils.isEmpty(poList)) {
return new ArrayList<>();
}
return poList.subList(0, Math.min(topN, poList.size()))
.stream()
.map( c -> new Tuple<>(c.getId(), c.getConnectorName()) )
.collect(Collectors.toList());
}
protected List<MetricMultiLinesVO> metricMap2VO(Long connectClusterId,
Map<String/*metric*/, Map<Tuple<Long, String>, List<MetricPointVO>>> map){
List<MetricMultiLinesVO> multiLinesVOS = new ArrayList<>();
if (map == null || map.isEmpty()) {
// 如果为空,则直接返回
return multiLinesVOS;
}
for(String metric : map.keySet()){
try {
MetricMultiLinesVO multiLinesVO = new MetricMultiLinesVO();
multiLinesVO.setMetricName(metric);
List<MetricLineVO> metricLines = new ArrayList<>();
Map<Tuple<Long, String>, List<MetricPointVO>> metricPointMap = map.get(metric);
if(null == metricPointMap || metricPointMap.isEmpty()){continue;}
for(Map.Entry<Tuple<Long, String>, List<MetricPointVO>> entry : metricPointMap.entrySet()){
MetricLineVO metricLineVO = new MetricLineVO();
metricLineVO.setName(entry.getKey().getV1() + "#" + entry.getKey().getV2());
metricLineVO.setMetricName(metric);
metricLineVO.setMetricPoints(entry.getValue());
metricLines.add(metricLineVO);
}
multiLinesVO.setMetricLines(metricLines);
multiLinesVOS.add(multiLinesVO);
}catch (Exception e){
LOGGER.error("method=metricMap2VO||connectClusterId={}||msg=exception!", connectClusterId, e);
}
}
return multiLinesVOS;
}
}

View File

@@ -0,0 +1,683 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.connector.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.didiglobal.logi.security.common.dto.oplog.OplogDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorStateInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
import com.xiaojukeji.know.streaming.km.common.component.RestTool;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant;
import com.xiaojukeji.know.streaming.km.common.converter.ConnectConverter;
import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.ModuleEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.utils.BackoffUtils;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.ConnectorDAO;
import org.apache.kafka.connect.runtime.rest.entities.ActiveTopicsInfo;
import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import java.util.*;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant.MIRROR_MAKER_SOURCE_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME;
import static com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant.MIRROR_MAKER_TARGET_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.SERVICE_OP_CONNECT_CONNECTOR;
@Service
public class ConnectorServiceImpl extends BaseVersionControlService implements ConnectorService {
private static final ILog LOGGER = LogFactory.getLog(ConnectorServiceImpl.class);
@Autowired
private RestTool restTool;
@Autowired
private ConnectorDAO connectorDAO;
@Autowired
private ConnectClusterService connectClusterService;
@Autowired
private OpLogWrapService opLogWrapService;
private static final String LIST_CONNECTORS_URI = "/connectors";
private static final String GET_CONNECTOR_INFO_PREFIX_URI = "/connectors";
private static final String GET_CONNECTOR_TOPICS_URI = "/connectors/%s/topics";
private static final String GET_CONNECTOR_STATUS_URI = "/connectors/%s/status";
private static final String CREATE_CONNECTOR_URI = "/connectors";
private static final String RESUME_CONNECTOR_URI = "/connectors/%s/resume";
private static final String RESTART_CONNECTOR_URI = "/connectors/%s/restart";
private static final String PAUSE_CONNECTOR_URI = "/connectors/%s/pause";
private static final String DELETE_CONNECTOR_URI = "/connectors/%s";
private static final String UPDATE_CONNECTOR_CONFIG_URI = "/connectors/%s/config";
@Override
protected VersionItemTypeEnum getVersionItemType() {
return SERVICE_OP_CONNECT_CONNECTOR;
}
@Override
public Result<KSConnectorInfo> createConnector(Long connectClusterId, String connectorName, Properties configs, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
// 构造参数
Properties props = new Properties();
props.put(KafkaConnectConstant.MIRROR_MAKER_NAME_FIELD_NAME, connectorName);
props.put("config", configs);
ConnectorInfo connectorInfo = restTool.postObjectWithJsonContent(
connectCluster.getClusterUrl() + CREATE_CONNECTOR_URI,
props,
ConnectorInfo.class
);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.ADD.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
ConvertUtil.obj2Json(configs)
));
KSConnectorInfo connector = new KSConnectorInfo();
connector.setConnectClusterId(connectClusterId);
connector.setConfig(connectorInfo.config());
connector.setName(connectorInfo.name());
connector.setTasks(connectorInfo.tasks());
connector.setType(connectorInfo.type());
return Result.buildSuc(connector);
} catch (Exception e) {
LOGGER.error(
"method=createConnector||connectClusterId={}||connectorName={}||configs={}||operator={}||errMsg=exception",
connectClusterId, connectorName, configs, operator, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<List<String>> listConnectorsFromCluster(Long connectClusterId) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
List<String> nameList = restTool.getArrayObjectWithJsonContent(
connectCluster.getClusterUrl() + LIST_CONNECTORS_URI,
new HashMap<>(),
String.class
);
return Result.buildSuc(nameList);
} catch (Exception e) {
LOGGER.error(
"method=listConnectorsFromCluster||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<KSConnectorInfo> getConnectorInfoFromCluster(Long connectClusterId, String connectorName) {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
return this.getConnectorInfoFromCluster(connectCluster, connectorName);
}
@Override
public Result<List<String>> getConnectorTopicsFromCluster(Long connectClusterId, String connectorName) {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
return this.getConnectorTopicsFromCluster(connectCluster, connectorName);
}
@Override
public Result<KSConnectorStateInfo> getConnectorStateInfoFromCluster(Long connectClusterId, String connectorName) {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
return this.getConnectorStateInfoFromCluster(connectCluster, connectorName);
}
@Override
public Result<KSConnector> getAllConnectorInfoFromCluster(Long connectClusterId, String connectorName) {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
Result<KSConnectorInfo> connectorResult = this.getConnectorInfoFromCluster(connectCluster, connectorName);
if (connectorResult.failed()) {
LOGGER.error(
"method=getAllConnectorInfoFromCluster||connectClusterId={}||connectorName={}||result={}",
connectClusterId, connectorName, connectorResult
);
return Result.buildFromIgnoreData(connectorResult);
}
Result<List<String>> topicNameListResult = this.getConnectorTopicsFromCluster(connectCluster, connectorName);
if (topicNameListResult.failed()) {
LOGGER.error(
"method=getAllConnectorInfoFromCluster||connectClusterId={}||connectorName={}||result={}",
connectClusterId, connectorName, connectorResult
);
}
Result<KSConnectorStateInfo> stateInfoResult = this.getConnectorStateInfoFromCluster(connectCluster, connectorName);
if (stateInfoResult.failed()) {
LOGGER.error(
"method=getAllConnectorInfoFromCluster||connectClusterId={}||connectorName={}||result={}",
connectClusterId, connectorName, connectorResult
);
}
return Result.buildSuc(ConnectConverter.convert2KSConnector(
connectCluster.getKafkaClusterPhyId(),
connectCluster.getId(),
connectorResult.getData(),
stateInfoResult.getData(),
topicNameListResult.getData()
));
}
@Override
public Result<Void> resumeConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.putJsonForObject(
connectCluster.getClusterUrl() + String.format(RESUME_CONNECTOR_URI, connectorName),
new HashMap<>(),
String.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.ENABLE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"class=ConnectorServiceImpl||method=resumeConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> restartConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.postObjectWithJsonContent(
connectCluster.getClusterUrl() + String.format(RESTART_CONNECTOR_URI, connectorName),
new HashMap<>(),
String.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.RESTART.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=restartConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> stopConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.putJsonForObject(
connectCluster.getClusterUrl() + String.format(PAUSE_CONNECTOR_URI, connectorName),
new HashMap<>(),
String.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.DISABLE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=stopConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> deleteConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.deleteWithParamsAndHeader(
connectCluster.getClusterUrl() + String.format(DELETE_CONNECTOR_URI, connectorName),
new HashMap<>(),
new HashMap<>(),
String.class
);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.DELETE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
this.deleteConnectorInDB(connectClusterId, connectorName);
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=deleteConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> updateConnectorConfig(Long connectClusterId, String connectorName, Properties configs, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
ConnectorInfo connectorInfo = restTool.putJsonForObject(
connectCluster.getClusterUrl() + String.format(UPDATE_CONNECTOR_CONFIG_URI, connectorName),
configs,
org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.EDIT.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
ConvertUtil.obj2Json(configs)
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=updateConnectorConfig||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public void batchReplace(Long kafkaClusterPhyId, Long connectClusterId, List<KSConnector> connectorList, Set<String> allConnectorNameSet) {
List<ConnectorPO> poList = this.listByConnectClusterIdFromDB(connectClusterId);
Map<String, ConnectorPO> oldPOMap = new HashMap<>();
poList.forEach(elem -> oldPOMap.put(elem.getConnectorName(), elem));
for (KSConnector connector: connectorList) {
try {
ConnectorPO oldPO = oldPOMap.remove(connector.getConnectorName());
if (oldPO == null) {
oldPO = ConvertUtil.obj2Obj(connector, ConnectorPO.class);
connectorDAO.insert(oldPO);
} else {
ConnectorPO newPO = ConvertUtil.obj2Obj(connector, ConnectorPO.class);
newPO.setId(oldPO.getId());
connectorDAO.updateById(newPO);
}
} catch (DuplicateKeyException dke) {
// ignore
}
}
try {
oldPOMap.values().forEach(elem -> {
if (allConnectorNameSet.contains(elem.getConnectorName())) {
// 当前connector还存在
return;
}
// 当前connector不存在了则进行删除
connectorDAO.deleteById(elem.getId());
});
} catch (Exception e) {
// ignore
}
}
@Override
public void addNewToDB(KSConnector connector) {
try {
connectorDAO.insert(ConvertUtil.obj2Obj(connector, ConnectorPO.class));
} catch (DuplicateKeyException dke) {
// ignore
}
}
@Override
public List<ConnectorPO> listByKafkaClusterIdFromDB(Long kafkaClusterPhyId) {
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getKafkaClusterPhyId, kafkaClusterPhyId);
return connectorDAO.selectList(lambdaQueryWrapper);
}
@Override
public List<ConnectorPO> listByConnectClusterIdFromDB(Long connectClusterId) {
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId);
return connectorDAO.selectList(lambdaQueryWrapper);
}
@Override
public int countByConnectClusterIdFromDB(Long connectClusterId) {
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId);
return connectorDAO.selectCount(lambdaQueryWrapper);
}
@Override
public ConnectorPO getConnectorFromDB(Long connectClusterId, String connectorName) {
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId);
lambdaQueryWrapper.eq(ConnectorPO::getConnectorName, connectorName);
return connectorDAO.selectOne(lambdaQueryWrapper);
}
@Override
public ConnectorTypeEnum getConnectorType(Long connectClusterId, String connectorName) {
ConnectorTypeEnum connectorType = ConnectorTypeEnum.UNKNOWN;
ConnectorPO connector = this.getConnectorFromDB(connectClusterId, connectorName);
if (connector != null) {
connectorType = ConnectorTypeEnum.getByName(connector.getConnectorType());
}
return connectorType;
}
@Override
public void completeMirrorMakerInfo(ConnectCluster connectCluster, List<KSConnector> connectorList) {
List<KSConnector> sourceConnectorList = connectorList.stream().filter(elem -> elem.getConnectorClassName().equals(KafkaConnectConstant.MIRROR_MAKER_SOURCE_CONNECTOR_TYPE)).collect(Collectors.toList());
if (sourceConnectorList.isEmpty()) {
return;
}
List<KSConnector> heartBeatConnectorList = connectorList.stream().filter(elem -> elem.getConnectorClassName().equals(KafkaConnectConstant.MIRROR_MAKER_HEARTBEAT_CONNECTOR_TYPE)).collect(Collectors.toList());
List<KSConnector> checkpointConnectorList = connectorList.stream().filter(elem -> elem.getConnectorClassName().equals(KafkaConnectConstant.MIRROR_MAKER_CHECKPOINT_CONNECTOR_TYPE)).collect(Collectors.toList());
Map<String, String> heartbeatMap = this.buildMirrorMakerMap(connectCluster, heartBeatConnectorList);
Map<String, String> checkpointMap = this.buildMirrorMakerMap(connectCluster, checkpointConnectorList);
for (KSConnector sourceConnector : sourceConnectorList) {
Result<KSConnectorInfo> ret = this.getConnectorInfoFromCluster(connectCluster, sourceConnector.getConnectorName());
if (!ret.hasData()) {
LOGGER.error(
"method=completeMirrorMakerInfo||connectClusterId={}||connectorName={}||get connectorInfo fail!",
connectCluster.getId(), sourceConnector.getConnectorName()
);
continue;
}
KSConnectorInfo ksConnectorInfo = ret.getData();
String targetServers = ksConnectorInfo.getConfig().get(MIRROR_MAKER_TARGET_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME);
String sourceServers = ksConnectorInfo.getConfig().get(MIRROR_MAKER_SOURCE_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME);
if (ValidateUtils.anyBlank(targetServers, sourceServers)) {
continue;
}
String[] targetBrokerList = getBrokerList(targetServers);
String[] sourceBrokerList = getBrokerList(sourceServers);
sourceConnector.setHeartbeatConnectorName(this.findBindConnector(targetBrokerList, sourceBrokerList, heartbeatMap));
sourceConnector.setCheckpointConnectorName(this.findBindConnector(targetBrokerList, sourceBrokerList, checkpointMap));
}
}
/**************************************************** private method ****************************************************/
private int deleteConnectorInDB(Long connectClusterId, String connectorName) {
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId);
lambdaQueryWrapper.eq(ConnectorPO::getConnectorName, connectorName);
return connectorDAO.delete(lambdaQueryWrapper);
}
private Result<KSConnectorInfo> getConnectorInfoFromCluster(ConnectCluster connectCluster, String connectorName) {
try {
ConnectorInfo connectorInfo = restTool.getForObject(
connectCluster.getClusterUrl() + GET_CONNECTOR_INFO_PREFIX_URI + "/" + connectorName,
new HashMap<>(),
ConnectorInfo.class
);
KSConnectorInfo connector = new KSConnectorInfo();
connector.setConnectClusterId(connectCluster.getId());
connector.setConfig(connectorInfo.config());
connector.setName(connectorInfo.name());
connector.setTasks(connectorInfo.tasks());
connector.setType(connectorInfo.type());
return Result.buildSuc(connector);
} catch (Exception e) {
LOGGER.error(
"method=getConnectorInfoFromCluster||connectClusterId={}||connectorName={}||errMsg=exception",
connectCluster.getId(), connectorName, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
private Result<List<String>> getConnectorTopicsFromCluster(ConnectCluster connectCluster, String connectorName) {
try {
Properties properties = restTool.getForObject(
connectCluster.getClusterUrl() + String.format(GET_CONNECTOR_TOPICS_URI, connectorName),
new HashMap<>(),
Properties.class
);
ActiveTopicsInfo activeTopicsInfo = ConvertUtil.toObj(ConvertUtil.obj2Json(properties.get(connectorName)), ActiveTopicsInfo.class);
return Result.buildSuc(new ArrayList<>(activeTopicsInfo.topics()));
} catch (Exception e) {
LOGGER.error(
"method=getConnectorTopicsFromCluster||connectClusterId={}||connectorName={}||errMsg=exception",
connectCluster.getId(), connectorName, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
private Result<KSConnectorStateInfo> getConnectorStateInfoFromCluster(ConnectCluster connectCluster, String connectorName) {
try {
KSConnectorStateInfo connectorStateInfo = restTool.getForObject(
connectCluster.getClusterUrl() + String.format(GET_CONNECTOR_STATUS_URI, connectorName),
new HashMap<>(),
KSConnectorStateInfo.class
);
return Result.buildSuc(connectorStateInfo);
} catch (Exception e) {
LOGGER.error(
"method=getConnectorStateInfoFromCluster||connectClusterId={}||connectorName={}||errMsg=exception",
connectCluster.getId(), connectorName, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
private void updateStatus(ConnectCluster connectCluster, Long connectClusterId, String connectorName) {
try {
// 延迟3秒
BackoffUtils.backoff(2000);
Result<KSConnectorStateInfo> stateInfoResult = this.getConnectorStateInfoFromCluster(connectCluster, connectorName);
if (stateInfoResult.failed()) {
return;
}
ConnectorPO po = new ConnectorPO();
po.setConnectClusterId(connectClusterId);
po.setConnectorName(connectorName);
po.setState(stateInfoResult.getData().getConnector().getState());
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId);
lambdaQueryWrapper.eq(ConnectorPO::getConnectorName, connectorName);
connectorDAO.update(po, lambdaQueryWrapper);
} catch (Exception e) {
LOGGER.error(
"method=updateStatus||connectClusterId={}||connectorName={}||errMsg=exception",
connectClusterId, connectorName, e
);
}
}
private Map<String, String> buildMirrorMakerMap(ConnectCluster connectCluster, List<KSConnector> ksConnectorList) {
Map<String, String> bindMap = new HashMap<>();
for (KSConnector ksConnector : ksConnectorList) {
Result<KSConnectorInfo> ret = this.getConnectorInfoFromCluster(connectCluster, ksConnector.getConnectorName());
if (!ret.hasData()) {
LOGGER.error(
"method=buildMirrorMakerMap||connectClusterId={}||connectorName={}||get connectorInfo fail!",
connectCluster.getId(), ksConnector.getConnectorName()
);
continue;
}
KSConnectorInfo ksConnectorInfo = ret.getData();
String targetServers = ksConnectorInfo.getConfig().get(MIRROR_MAKER_TARGET_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME);
String sourceServers = ksConnectorInfo.getConfig().get(MIRROR_MAKER_SOURCE_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME);
if (ValidateUtils.anyBlank(targetServers, sourceServers)) {
continue;
}
String[] targetBrokerList = getBrokerList(targetServers);
String[] sourceBrokerList = getBrokerList(sourceServers);
for (String targetBroker : targetBrokerList) {
for (String sourceBroker : sourceBrokerList) {
bindMap.put(targetBroker + "@" + sourceBroker, ksConnector.getConnectorName());
}
}
}
return bindMap;
}
private String findBindConnector(String[] targetBrokerList, String[] sourceBrokerList, Map<String, String> connectorBindMap) {
for (String targetBroker : targetBrokerList) {
for (String sourceBroker : sourceBrokerList) {
String connectorName = connectorBindMap.get(targetBroker + "@" + sourceBroker);
if (connectorName != null) {
return connectorName;
}
}
}
return "";
}
private String[] getBrokerList(String str) {
if (ValidateUtils.isBlank(str)) {
return new String[0];
}
if (str.contains(";")) {
return str.split(";");
}
if (str.contains(",")) {
return str.split(",");
}
return new String[]{str};
}
}

View File

@@ -0,0 +1,28 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.mm2;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.mm2.MetricsMirrorMakersDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.mm2.MirrorMakerTopic;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.mm2.MirrorMakerMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import java.util.List;
/**
* @author wyb
* @date 2022/12/15
*/
public interface MirrorMakerMetricService {
Result<MirrorMakerMetrics> collectMirrorMakerMetricsFromKafka(Long connectClusterPhyId, String mirrorMakerName, List<MirrorMakerTopic> mirrorMakerTopicList, String metricName);
/**
* 从ES中获取一段时间内聚合计算之后的指标线
*/
Result<List<MetricMultiLinesVO>> listMirrorMakerClusterMetricsFromES(Long clusterPhyId, MetricsMirrorMakersDTO dto);
Result<List<MirrorMakerMetrics>> getLatestMetricsFromES(Long clusterPhyId, List<Tuple<Long, String>> mirrorMakerList, List<String> metricNameList);
Result<MirrorMakerMetrics> getLatestMetricsFromES(Long connectClusterId, String connectorName, List<String> metricsNames);
}

View File

@@ -0,0 +1,18 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.mm2;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.mm2.MirrorMakerTopic;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
import java.util.List;
import java.util.Map;
/**
* @author wyb
* @date 2022/12/14
*/
public interface MirrorMakerService {
Result<Map<String, MirrorMakerTopic>> getMirrorMakerTopicMap(Long connectClusterId);
List<MirrorMakerTopic> getMirrorMakerTopicList(ConnectorPO mirrorMaker, Map<String, MirrorMakerTopic> mirrorMakerTopicMap);
}

View File

@@ -0,0 +1,324 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.mm2.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.google.common.collect.Table;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.mm2.MetricsMirrorMakersDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.mm2.MirrorMakerTopic;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.mm2.MirrorMakerMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.mm2.MirrorMakerTopicPartitionMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.metric.connect.mm2.MirrorMakerMetricParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.connect.ConnectorMetricPO;
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.mm2.MirrorMakerMetricPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricLineVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionConnectJmxInfo;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.jmx.JmxConnectorWrap;
import com.xiaojukeji.know.streaming.km.common.utils.BeanUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.mm2.MirrorMakerMetricService;
import com.xiaojukeji.know.streaming.km.core.service.health.state.HealthStateService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseConnectorMetricService;
import com.xiaojukeji.know.streaming.km.persistence.connect.ConnectJMXClient;
import org.springframework.beans.factory.annotation.Autowired;
import com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.mm2.MirrorMakerMetricESDAO;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
import javax.management.InstanceNotFoundException;
import javax.management.ObjectName;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus.*;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.METRIC_CONNECT_MIRROR_MAKER;
/**
* @author wyb
* @date 2022/12/15
*/
@Service
public class MirrorMakerMetricServiceImpl extends BaseConnectorMetricService implements MirrorMakerMetricService {
protected static final ILog LOGGER = LogFactory.getLog(MirrorMakerMetricServiceImpl.class);
public static final String MIRROR_MAKER_METHOD_DO_NOTHING = "doNothing";
public static final String MIRROR_MAKER_METHOD_GET_HEALTH_SCORE = "getMetricHealthScore";
public static final String MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_SUM = "getTopicPartitionMetricListSum";
public static final String MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_AVG = "getTopicPartitionMetricListAvg";
public static final String MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_MIN = "getTopicPartitionMetricListMin";
public static final String MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_MAX = "getTopicPartitionMetricListMax";
@Autowired
private ConnectJMXClient connectJMXClient;
@Autowired
private MirrorMakerMetricESDAO mirrorMakerMetricESDAO;
@Autowired
private ConnectorService connectorService;
@Autowired
private HealthStateService healthStateService;
@Override
protected List<String> listMetricPOFields() {
return BeanUtil.listBeanFields(MirrorMakerMetricPO.class);
}
@Override
protected void initRegisterVCHandler() {
registerVCHandler(MIRROR_MAKER_METHOD_DO_NOTHING, this::doNothing);
registerVCHandler(MIRROR_MAKER_METHOD_GET_HEALTH_SCORE, this::getMetricHealthScore);
registerVCHandler(MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_SUM, this::getTopicPartitionMetricListSum);
registerVCHandler(MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_AVG, this::getTopicPartitionMetricListAvg);
registerVCHandler(MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_MAX, this::getTopicPartitionMetricListMax);
registerVCHandler(MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_MIN, this::getTopicPartitionMetricListMin);
}
@Override
protected VersionItemTypeEnum getVersionItemType() {
return METRIC_CONNECT_MIRROR_MAKER;
}
@Override
public Result<MirrorMakerMetrics> collectMirrorMakerMetricsFromKafka(Long connectClusterPhyId, String mirrorMakerName, List<MirrorMakerTopic> mirrorMakerTopicList, String metricName) {
try {
MirrorMakerMetricParam metricParam = new MirrorMakerMetricParam(connectClusterPhyId, mirrorMakerName, mirrorMakerTopicList, metricName);
return (Result<MirrorMakerMetrics>) doVCHandler(connectClusterPhyId, metricName, metricParam);
} catch (Exception e) {
return Result.buildFailure(VC_HANDLE_NOT_EXIST);
}
}
@Override
public Result<List<MetricMultiLinesVO>> listMirrorMakerClusterMetricsFromES(Long clusterPhyId, MetricsMirrorMakersDTO dto) {
Long startTime = dto.getStartTime();
Long endTime = dto.getEndTime();
Integer topN = dto.getTopNu();
String aggType = dto.getAggType();
List<String> metricNameList = dto.getMetricsNames();
List<Tuple<Long, String>> connectorList = new ArrayList<>();
if(!CollectionUtils.isEmpty(dto.getConnectorNameList())){
connectorList = dto.getConnectorNameList().stream()
.map(c -> new Tuple<>(c.getConnectClusterId(), c.getConnectorName()))
.collect(Collectors.toList());
}
Table<String/*metric*/, Tuple<Long, String>, List<MetricPointVO>> retTable;
if(ValidateUtils.isEmptyList(connectorList)) {
// 按照TopN的方式去获取
List<Tuple<Long, String>> defaultConnectorList = this.listTopNMirrorMakerList(clusterPhyId, topN);
retTable = mirrorMakerMetricESDAO.listMetricsByTopN(clusterPhyId, defaultConnectorList, metricNameList, aggType, topN, startTime, endTime);
} else {
// 制定集群ID去获取
retTable = mirrorMakerMetricESDAO.listMetricsByConnectors(clusterPhyId, metricNameList, aggType, connectorList, startTime, endTime);
}
return Result.buildSuc(this.metricMap2VO(clusterPhyId, retTable.rowMap()));
}
@Override
public Result<List<MirrorMakerMetrics>> getLatestMetricsFromES(Long clusterPhyId, List<Tuple<Long, String>> mirrorMakerList, List<String> metricNameList) {
List<ConnectorMetricPO> connectorLatestMetricList = mirrorMakerMetricESDAO.getConnectorLatestMetric(clusterPhyId, mirrorMakerList, metricNameList);
return Result.buildSuc(ConvertUtil.list2List(connectorLatestMetricList, MirrorMakerMetrics.class));
}
@Override
public Result<MirrorMakerMetrics> getLatestMetricsFromES(Long connectClusterId, String connectorName, List<String> metricsNames) {
ConnectorMetricPO connectorLatestMetric = mirrorMakerMetricESDAO.getConnectorLatestMetric(null, connectClusterId, connectorName, metricsNames);
MirrorMakerMetrics mirrorMakerMetrics = ConvertUtil.obj2Obj(connectorLatestMetric, MirrorMakerMetrics.class);
return Result.buildSuc(mirrorMakerMetrics);
}
private List<Tuple<Long, String>> listTopNMirrorMakerList(Long clusterPhyId, Integer topN) {
List<ConnectorPO> poList = connectorService.listByKafkaClusterIdFromDB(clusterPhyId);
if (CollectionUtils.isEmpty(poList)) {
return new ArrayList<>();
}
return poList.subList(0, Math.min(topN, poList.size()))
.stream()
.map( c -> new Tuple<>(c.getId(), c.getConnectorName()) )
.collect(Collectors.toList());
}
protected List<MetricMultiLinesVO> metricMap2VO(Long connectClusterId,
Map<String/*metric*/, Map<Tuple<Long, String>, List<MetricPointVO>>> map){
List<MetricMultiLinesVO> multiLinesVOS = new ArrayList<>();
if (map == null || map.isEmpty()) {
// 如果为空,则直接返回
return multiLinesVOS;
}
for(String metric : map.keySet()){
try {
MetricMultiLinesVO multiLinesVO = new MetricMultiLinesVO();
multiLinesVO.setMetricName(metric);
List<MetricLineVO> metricLines = new ArrayList<>();
Map<Tuple<Long, String>, List<MetricPointVO>> metricPointMap = map.get(metric);
if(null == metricPointMap || metricPointMap.isEmpty()){continue;}
for(Map.Entry<Tuple<Long, String>, List<MetricPointVO>> entry : metricPointMap.entrySet()){
MetricLineVO metricLineVO = new MetricLineVO();
metricLineVO.setName(entry.getKey().getV1() + "#" + entry.getKey().getV2());
metricLineVO.setMetricName(metric);
metricLineVO.setMetricPoints(entry.getValue());
metricLines.add(metricLineVO);
}
multiLinesVO.setMetricLines(metricLines);
multiLinesVOS.add(multiLinesVO);
}catch (Exception e){
LOGGER.error("method=metricMap2VO||connectClusterId={}||msg=exception!", connectClusterId, e);
}
}
return multiLinesVOS;
}
private Result<MirrorMakerMetrics> doNothing(VersionItemParam metricParam) {
MirrorMakerMetricParam param = (MirrorMakerMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String mirrorMakerName = param.getMirrorMakerName();
return Result.buildSuc(new MirrorMakerMetrics(connectClusterId,mirrorMakerName));
}
private Result<MirrorMakerMetrics> getMetricHealthScore(VersionItemParam metricParam) {
MirrorMakerMetricParam param = (MirrorMakerMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String mirrorMakerName = param.getMirrorMakerName();
MirrorMakerMetrics metrics = healthStateService.calMirrorMakerHealthMetrics(connectClusterId, mirrorMakerName);
return Result.buildSuc(metrics);
}
private Result<MirrorMakerMetrics> getTopicPartitionMetricListSum(VersionItemParam metricParam) {
MirrorMakerMetricParam param = (MirrorMakerMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String mirrorMakerName = param.getMirrorMakerName();
List<MirrorMakerTopic> mirrorMakerTopicList = param.getMirrorMakerTopicList();
String metric = param.getMetric();
Result<List<MirrorMakerTopicPartitionMetrics>> ret = this.getTopicPartitionMetricList(connectClusterId, mirrorMakerName, mirrorMakerTopicList, metric);
if (!ret.hasData() || ret.getData().isEmpty()) {
return Result.buildFailure(NOT_EXIST);
}
Float sum = ret.getData().stream().map(elem -> elem.getMetric(metric)).reduce(Float::sum).get();
return Result.buildSuc(MirrorMakerMetrics.initWithMetric(connectClusterId, mirrorMakerName, metric, sum));
}
private Result<MirrorMakerMetrics> getTopicPartitionMetricListAvg(VersionItemParam metricParam) {
MirrorMakerMetricParam param = (MirrorMakerMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String mirrorMakerName = param.getMirrorMakerName();
List<MirrorMakerTopic> mirrorMakerTopicList = param.getMirrorMakerTopicList();
String metric = param.getMetric();
Result<List<MirrorMakerTopicPartitionMetrics>> ret = this.getTopicPartitionMetricList(connectClusterId, mirrorMakerName, mirrorMakerTopicList, metric);
if (!ret.hasData() || ret.getData().isEmpty()) {
return Result.buildFailure(NOT_EXIST);
}
Float sum = ret.getData().stream().map(elem -> elem.getMetric(metric)).reduce(Float::sum).get();
return Result.buildSuc(MirrorMakerMetrics.initWithMetric(connectClusterId, mirrorMakerName, metric, sum / ret.getData().size()));
}
private Result<MirrorMakerMetrics> getTopicPartitionMetricListMax(VersionItemParam metricParam) {
MirrorMakerMetricParam param = (MirrorMakerMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String mirrorMakerName = param.getMirrorMakerName();
List<MirrorMakerTopic> mirrorMakerTopicList = param.getMirrorMakerTopicList();
String metric = param.getMetric();
Result<List<MirrorMakerTopicPartitionMetrics>> ret = this.getTopicPartitionMetricList(connectClusterId, mirrorMakerName, mirrorMakerTopicList, metric);
if (!ret.hasData() || ret.getData().isEmpty()) {
return Result.buildFailure(NOT_EXIST);
}
Float max = ret.getData().stream().max((a, b) -> a.getMetric(metric).compareTo(b.getMetric(metric))).get().getMetric(metric);
return Result.buildSuc(MirrorMakerMetrics.initWithMetric(connectClusterId, mirrorMakerName, metric, max));
}
private Result<MirrorMakerMetrics> getTopicPartitionMetricListMin(VersionItemParam metricParam) {
MirrorMakerMetricParam param = (MirrorMakerMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String mirrorMakerName = param.getMirrorMakerName();
List<MirrorMakerTopic> mirrorMakerTopicList = param.getMirrorMakerTopicList();
String metric = param.getMetric();
Result<List<MirrorMakerTopicPartitionMetrics>> ret = this.getTopicPartitionMetricList(connectClusterId, mirrorMakerName, mirrorMakerTopicList, metric);
if (!ret.hasData() || ret.getData().isEmpty()) {
return Result.buildFailure(NOT_EXIST);
}
Float min = ret.getData().stream().max((a, b) -> b.getMetric(metric).compareTo(a.getMetric(metric))).get().getMetric(metric);
return Result.buildSuc(MirrorMakerMetrics.initWithMetric(connectClusterId, mirrorMakerName, metric, min));
}
private Result<List<MirrorMakerTopicPartitionMetrics>> getTopicPartitionMetricList(Long connectClusterId, String mirrorMakerName, List<MirrorMakerTopic> mirrorMakerTopicList, String metric) {
List<MirrorMakerTopicPartitionMetrics> topicPartitionMetricsList = new ArrayList<>();
for (MirrorMakerTopic mirrorMakerTopic : mirrorMakerTopicList) {
for (Map.Entry<Integer, String> entry : mirrorMakerTopic.getPartitionMap().entrySet()) {
Result<MirrorMakerTopicPartitionMetrics> ret = this.getMirrorMakerTopicPartitionMetric(connectClusterId, mirrorMakerName, mirrorMakerTopic.getClusterAlias(), mirrorMakerTopic.getTopicName(), entry.getKey(), entry.getValue(), metric);
if (!ret.hasData() || ret.getData().getMetric(metric) == null) {
continue;
}
topicPartitionMetricsList.add(ret.getData());
}
}
return Result.buildSuc(topicPartitionMetricsList);
}
private Result<MirrorMakerTopicPartitionMetrics> getMirrorMakerTopicPartitionMetric(Long connectClusterId, String mirrorMakerName, String clusterAlias, String topicName, Integer partitionId, String workerId, String metric) {
VersionConnectJmxInfo jmxInfo = getJMXInfo(connectClusterId, metric);
if (null == jmxInfo) {
return Result.buildFailure(VC_ITEM_JMX_NOT_EXIST);
}
String jmxObjectName = String.format(jmxInfo.getJmxObjectName(), clusterAlias, topicName, partitionId);
JmxConnectorWrap jmxConnectorWrap = connectJMXClient.getClientWithCheck(connectClusterId, workerId);
if (ValidateUtils.isNull(jmxConnectorWrap)) {
return Result.buildFailure(VC_JMX_INIT_ERROR);
}
try {
//2、获取jmx指标
String value = jmxConnectorWrap.getAttribute(new ObjectName(jmxObjectName), jmxInfo.getJmxAttribute()).toString();
MirrorMakerTopicPartitionMetrics metrics = MirrorMakerTopicPartitionMetrics.initWithMetric(connectClusterId, mirrorMakerName, clusterAlias, topicName, partitionId, workerId, metric, Float.valueOf(value));
return Result.buildSuc(metrics);
} catch (InstanceNotFoundException e) {
// 忽略该错误该错误出现的原因是该指标在JMX中不存在
return Result.buildSuc(new MirrorMakerTopicPartitionMetrics(connectClusterId, mirrorMakerName, clusterAlias, topicName, partitionId, workerId));
} catch (Exception e) {
LOGGER.error("method=getMirrorMakerTopicPartitionMetric||connectClusterId={}||mirrorMakerName={}||clusterAlias={}||topicName={}||partitionId={}||workerId={}||metrics={}||jmx={}||msg={}",
connectClusterId, mirrorMakerName, clusterAlias, topicName, partitionId, workerId, metric, jmxObjectName, e.getClass().getName());
return Result.buildFailure(VC_JMX_CONNECT_ERROR);
}
}
}

View File

@@ -0,0 +1,95 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.mm2.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectWorker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.mm2.MirrorMakerTopic;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
import com.xiaojukeji.know.streaming.km.common.jmx.JmxConnectorWrap;
import com.xiaojukeji.know.streaming.km.common.utils.CommonUtils;
import com.xiaojukeji.know.streaming.km.core.service.connect.mm2.MirrorMakerService;
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerService;
import com.xiaojukeji.know.streaming.km.persistence.connect.ConnectJMXClient;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import javax.management.ObjectName;
import java.util.*;
import static com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant.MIRROR_MAKER_TOPIC_PARTITION_PATTERN;
/**
* @author wyb
* @date 2022/12/14
*/
@Service
public class MirrorMakerServiceImpl implements MirrorMakerService {
private static final ILog LOGGER = LogFactory.getLog(MirrorMakerServiceImpl.class);
@Autowired
private WorkerService workerService;
@Autowired
private ConnectJMXClient connectJMXClient;
@Override
public Result<Map<String, MirrorMakerTopic>> getMirrorMakerTopicMap(Long connectClusterId) {
List<ConnectWorker> connectWorkerList = workerService.listFromDB(connectClusterId);
//Map<TopicName,MirrorMakerTopic>
Map<String, MirrorMakerTopic> topicMap = new HashMap<>();
for (ConnectWorker connectWorker : connectWorkerList) {
JmxConnectorWrap jmxConnectorWrap = connectJMXClient.getClientWithCheck(connectClusterId, connectWorker.getWorkerId());
Set<ObjectName> objectNameSet = new HashSet<>();
try {
objectNameSet = jmxConnectorWrap.queryNames(new ObjectName(MIRROR_MAKER_TOPIC_PARTITION_PATTERN), null);
} catch (Exception e) {
LOGGER.error("method=getMirrorMakerTopic||connectClusterId={}||workerId={}||queryNames failed!",
connectClusterId, connectWorker.getWorkerId());
continue;
}
//解析数据
for (ObjectName objectName : objectNameSet) {
try {
String[] paramList = objectName.getCanonicalName().split(",");
String clusterAlias = paramList[1].split("=")[1];
String topicName = paramList[2].split("=")[1];
Integer partition = Integer.valueOf(paramList[0].split("=")[1]);
MirrorMakerTopic mirrorMakerTopic = topicMap.get(topicName);
if (mirrorMakerTopic == null) {
mirrorMakerTopic = new MirrorMakerTopic(clusterAlias, topicName, new HashMap<>());
topicMap.put(topicName, mirrorMakerTopic);
}
mirrorMakerTopic.getPartitionMap().put(partition, connectWorker.getWorkerId());
} catch (Exception e) {
LOGGER.error("method=getMirrorMakerTopic||connectClusterId={}||workerId={}||canonicalName={}||canonicalName explain error!",
connectClusterId, connectWorker.getWorkerId(), objectName.getCanonicalName());
}
}
}
return Result.buildSuc(topicMap);
}
@Override
public List<MirrorMakerTopic> getMirrorMakerTopicList(ConnectorPO mirrorMaker, Map<String, MirrorMakerTopic> mirrorMakerTopicMap) {
List<MirrorMakerTopic> mirrorMakerTopicList = new ArrayList<>();
List<String> topicList = CommonUtils.string2StrList(mirrorMaker.getTopics());
for (String topicName : topicList) {
MirrorMakerTopic mirrorMakerTopic = mirrorMakerTopicMap.get(topicName);
if (mirrorMakerTopic != null) {
mirrorMakerTopicList.add(mirrorMakerTopic);
}
}
return mirrorMakerTopicList;
}
}

View File

@@ -0,0 +1,20 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.plugin;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.config.ConnectConfigInfos;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.plugin.ConnectPluginBasic;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import java.util.List;
import java.util.Properties;
/**
* 查看Connector
*/
public interface PluginService {
Result<ConnectConfigInfos> getConfig(Long connectClusterId, String pluginName);
Result<ConnectConfigInfos> validateConfig(Long connectClusterId, Properties props);
Result<List<ConnectPluginBasic>> listPluginsFromCluster(Long connectClusterId);
}

View File

@@ -0,0 +1,112 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.plugin.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.config.ConnectConfigInfos;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.plugin.ConnectPluginBasic;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.component.RestTool;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.connect.plugin.PluginService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import org.apache.kafka.connect.runtime.rest.entities.ConfigInfos;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.HashMap;
import java.util.List;
import java.util.Properties;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.SERVICE_OP_CONNECT_PLUGIN;
@Service
public class PluginServiceImpl extends BaseVersionControlService implements PluginService {
private static final ILog LOGGER = LogFactory.getLog(PluginServiceImpl.class);
@Autowired
private RestTool restTool;
@Autowired
private ConnectClusterService connectClusterService;
private static final String GET_PLUGIN_CONFIG_DESC_URI = "/connector-plugins/%s/config/validate";
private static final String GET_ALL_PLUGINS_URI = "/connector-plugins";
@Override
protected VersionItemTypeEnum getVersionItemType() {
return SERVICE_OP_CONNECT_PLUGIN;
}
@Override
public Result<ConnectConfigInfos> getConfig(Long connectClusterId, String pluginName) {
Properties props = new Properties();
props.put(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME, pluginName);
props.put(KafkaConnectConstant.CONNECTOR_TOPICS_FILED_NAME, KafkaConnectConstant.CONNECTOR_TOPICS_FILED_ERROR_VALUE);
return this.validateConfig(connectClusterId, props);
}
@Override
public Result<ConnectConfigInfos> validateConfig(Long connectClusterId, Properties props) {
try {
if (ValidateUtils.isBlank(props.getProperty(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME))) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "参数错误, connector.class字段数据不允许不存在或者为空");
}
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
// 通过参数检查接口,获取插件配置
ConfigInfos configInfos = restTool.putJsonForObject(
connectCluster.getClusterUrl() + String.format(GET_PLUGIN_CONFIG_DESC_URI, props.getProperty(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME)),
props,
ConfigInfos.class
);
return Result.buildSuc(new ConnectConfigInfos(configInfos));
} catch (Exception e) {
LOGGER.error(
"method=validateConfig||connectClusterId={}||pluginName={}||errMsg=exception",
connectClusterId,
props.getProperty(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME),
e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<List<ConnectPluginBasic>> listPluginsFromCluster(Long connectClusterId) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
// 通过参数检查接口,获取插件配置
List<ConnectPluginBasic> pluginList = restTool.getArrayObjectWithJsonContent(
connectCluster.getClusterUrl() + GET_ALL_PLUGINS_URI,
new HashMap<>(),
ConnectPluginBasic.class
);
return Result.buildSuc(pluginList);
} catch (Exception e) {
LOGGER.error(
"method=listPluginsFromCluster||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
}

View File

@@ -0,0 +1,23 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.worker;
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.task.TaskActionDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.WorkerConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import java.util.List;
/**
* Worker
*/
public interface WorkerConnectorService {
void batchReplaceInDB(Long connectClusterId, List<WorkerConnector> workerList);
List<WorkerConnector> listFromDB(Long connectClusterId);
List<WorkerConnector> listByKafkaClusterIdFromDB(Long kafkaClusterPhyId);
Result<Void> actionTask(TaskActionDTO dto);
List<WorkerConnector> getWorkerConnectorListFromCluster(ConnectCluster connectCluster, String connectorName);
}

View File

@@ -0,0 +1,38 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.worker;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectWorker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.connector.ClusterWorkerOverviewVO;
import java.util.List;
/**
* Worker
* @author didi
*/
public interface WorkerService {
/**
* 批量插入数据库
* @param connectClusterId
* @param workerList
*/
void batchReplaceInDB(Long connectClusterId, List<ConnectWorker> workerList);
/**
* 从数据库中获取
* @param connectClusterId
* @return
*/
List<ConnectWorker> listFromDB(Long connectClusterId);
/**
* 分页获取
* @param kafkaClusterPhyId
* @param dto
* @return
*/
PaginationResult<ClusterWorkerOverviewVO> pageWorkByKafkaClusterPhy(Long kafkaClusterPhyId, PaginationBaseDTO dto);
List<ConnectWorker> listByKafkaClusterIdFromDB(Long kafkaClusterPhyId);
}

View File

@@ -0,0 +1,143 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.worker.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.task.TaskActionDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectWorker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.WorkerConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorStateInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSTaskState;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.WorkerConnectorPO;
import com.xiaojukeji.know.streaming.km.common.component.RestTool;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerService;
import com.xiaojukeji.know.streaming.km.persistence.connect.cache.LoadedConnectClusterCache;
import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.WorkerConnectorDAO;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectActionEnum.RESTART;
@Service
public class WorkerConnectorServiceImpl implements WorkerConnectorService {
protected static final ILog LOGGER = LogFactory.getLog(WorkerConnectorServiceImpl.class);
@Autowired
private WorkerConnectorDAO workerConnectorDAO;
@Autowired
private RestTool restTool;
@Autowired
private ConnectorService connectorService;
@Autowired
private WorkerService workerService;
private static final String RESTART_TASK_URI = "%s/connectors/%s/tasks/%d/restart";
@Override
public void batchReplaceInDB(Long connectClusterId, List<WorkerConnector> workerList) {
Map<String, WorkerConnectorPO> oldMap = new HashMap<>();
for (WorkerConnectorPO oldPO : this.listPOSFromDB(connectClusterId)) {
oldMap.put(oldPO.getConnectorName() + oldPO.getWorkerId() + oldPO.getTaskId() + oldPO.getState(), oldPO);
}
for (WorkerConnector workerConnector : workerList) {
try {
String key = workerConnector.getConnectorName() + workerConnector.getWorkerId() + workerConnector.getTaskId() + workerConnector.getState();
WorkerConnectorPO oldPO = oldMap.remove(key);
if (oldPO == null) {
workerConnectorDAO.insert(ConvertUtil.obj2Obj(workerConnector, WorkerConnectorPO.class));
} else {
// 如果该数据已经存在,则不需要进行操作
}
} catch (DuplicateKeyException dke) {
// ignore
}
}
try {
oldMap.values().forEach(elem -> workerConnectorDAO.deleteById(elem.getId()));
} catch (Exception e) {
// ignore
}
}
@Override
public List<WorkerConnector> listFromDB(Long connectClusterId) {
return ConvertUtil.list2List(this.listPOSFromDB(connectClusterId), WorkerConnector.class);
}
@Override
public List<WorkerConnector> listByKafkaClusterIdFromDB(Long kafkaClusterPhyId) {
LambdaQueryWrapper<WorkerConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(WorkerConnectorPO::getKafkaClusterPhyId, kafkaClusterPhyId);
return ConvertUtil.list2List(workerConnectorDAO.selectList(lambdaQueryWrapper), WorkerConnector.class);
}
@Override
public Result<Void> actionTask(TaskActionDTO dto) {
if (!dto.getAction().equals(RESTART.getValue())) {
return Result.buildFailure(ResultStatus.OPERATION_FORBIDDEN);
}
ConnectCluster connectCluster = LoadedConnectClusterCache.getByPhyId(dto.getConnectClusterId());
if (connectCluster == null) {
return Result.buildFailure(ResultStatus.NOT_EXIST);
}
String url = String.format(RESTART_TASK_URI, connectCluster.getClusterUrl(), dto.getConnectorName(), dto.getTaskId());
try {
restTool.postObjectWithJsonContent(url, null, String.class);
} catch (Exception e) {
LOGGER.error("method=actionTask||connectClusterId={}||connectorName={}||taskId={}||restart failed||msg=exception",
dto.getConnectClusterId(), dto.getConnectorName(), dto.getTaskId(), e);
}
return Result.buildSuc();
}
@Override
public List<WorkerConnector> getWorkerConnectorListFromCluster(ConnectCluster connectCluster, String connectorName) {
Map<String, ConnectWorker> workerMap = workerService.listFromDB(connectCluster.getId()).stream().collect(Collectors.toMap(elem -> elem.getWorkerId(), Function.identity()));
List<WorkerConnector> workerConnectorList = new ArrayList<>();
Result<KSConnectorStateInfo> ret = connectorService.getConnectorStateInfoFromCluster(connectCluster.getId(), connectorName);
if (!ret.hasData()) {
return workerConnectorList;
}
KSConnectorStateInfo ksConnectorStateInfo = ret.getData();
for (KSTaskState task : ksConnectorStateInfo.getTasks()) {
WorkerConnector workerConnector = new WorkerConnector(connectCluster.getKafkaClusterPhyId(), connectCluster.getId(), ksConnectorStateInfo.getName(), workerMap.get(task.getWorkerId()).getMemberId(), task.getId(), task.getState(), task.getWorkerId(), task.getTrace());
workerConnectorList.add(workerConnector);
}
return workerConnectorList;
}
private List<WorkerConnectorPO> listPOSFromDB(Long connectClusterId) {
LambdaQueryWrapper<WorkerConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(WorkerConnectorPO::getConnectClusterId, connectClusterId);
return workerConnectorDAO.selectList(lambdaQueryWrapper);
}
}

View File

@@ -0,0 +1,114 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.worker.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectWorker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectWorkerPO;
import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupMemberPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.connector.ClusterWorkerOverviewVO;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerService;
import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.ConnectWorkerDAO;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@Service
public class WorkerServiceImpl implements WorkerService {
@Autowired
private ConnectWorkerDAO connectWorkerDAO;
@Autowired
private ConnectorService connectorService;
@Autowired
private ConnectClusterService connectClusterService;
@Override
public void batchReplaceInDB(Long connectClusterId, List<ConnectWorker> workerList) {
Map<String, ConnectWorkerPO> oldMap = new HashMap<>();
for (ConnectWorkerPO oldPO: this.listPOSFromDB(connectClusterId)) {
oldMap.put(oldPO.getMemberId(), oldPO);
}
for (ConnectWorker worker: workerList) {
try {
ConnectWorkerPO newPO = ConvertUtil.obj2Obj(worker, ConnectWorkerPO.class);
ConnectWorkerPO oldPO = oldMap.remove(newPO.getMemberId());
if (oldPO == null) {
connectWorkerDAO.insert(newPO);
} else {
newPO.setId(oldPO.getId());
connectWorkerDAO.updateById(newPO);
}
} catch (DuplicateKeyException dke) {
// ignore
}
}
try {
oldMap.values().forEach(elem -> connectWorkerDAO.deleteById(elem.getId()));
} catch (Exception e) {
// ignore
}
}
@Override
public List<ConnectWorker> listFromDB(Long connectClusterId) {
return ConvertUtil.list2List(this.listPOSFromDB(connectClusterId), ConnectWorker.class);
}
@Override
public PaginationResult<ClusterWorkerOverviewVO> pageWorkByKafkaClusterPhy(Long kafkaClusterPhyId, PaginationBaseDTO dto) {
IPage<ConnectWorkerPO> pageInfo = new Page<>(dto.getPageNo(), dto.getPageSize());
LambdaQueryWrapper<ConnectWorkerPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectWorkerPO::getKafkaClusterPhyId, kafkaClusterPhyId);
lambdaQueryWrapper.like(!ValidateUtils.isBlank(dto.getSearchKeywords()), ConnectWorkerPO::getHost, dto.getSearchKeywords());
connectWorkerDAO.selectPage(pageInfo, lambdaQueryWrapper);
List<ConnectWorkerPO> connectWorkerPOS = pageInfo.getRecords();
List<ClusterWorkerOverviewVO> clusterWorkerOverviewVOS = new ArrayList<>();
for(ConnectWorkerPO connectWorkerPO : connectWorkerPOS){
Long connectClusterId = connectWorkerPO.getConnectClusterId();
ClusterWorkerOverviewVO clusterWorkerOverviewVO = new ClusterWorkerOverviewVO();
clusterWorkerOverviewVO.setConnectClusterId(connectClusterId);
clusterWorkerOverviewVO.setWorkerHost(connectWorkerPO.getHost());
clusterWorkerOverviewVO.setConnectorCount(connectorService.countByConnectClusterIdFromDB(connectClusterId));
clusterWorkerOverviewVO.setConnectClusterName(connectClusterService.getClusterName(connectClusterId));
clusterWorkerOverviewVO.setTaskCount(1);
clusterWorkerOverviewVOS.add(clusterWorkerOverviewVO);
}
return PaginationResult.buildSuc(clusterWorkerOverviewVOS, pageInfo);
}
@Override
public List<ConnectWorker> listByKafkaClusterIdFromDB(Long kafkaClusterPhyId) {
LambdaQueryWrapper<ConnectWorkerPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectWorkerPO::getKafkaClusterPhyId, kafkaClusterPhyId);
return ConvertUtil.list2List(connectWorkerDAO.selectList(lambdaQueryWrapper), ConnectWorker.class);
}
/**************************************************** private method ****************************************************/
private List<ConnectWorkerPO> listPOSFromDB(Long connectClusterId) {
LambdaQueryWrapper<ConnectWorkerPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectWorkerPO::getConnectClusterId, connectClusterId);
return connectWorkerDAO.selectList(lambdaQueryWrapper);
}
}

View File

@@ -1,14 +1,15 @@
package com.xiaojukeji.know.streaming.km.core.service.group;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.group.Group;
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafka.KSGroupDescription;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupMemberPO;
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupStateEnum;
import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import org.apache.kafka.clients.admin.ConsumerGroupDescription;
import org.apache.kafka.common.TopicPartition;
import java.util.Date;
@@ -19,16 +20,16 @@ public interface GroupService {
/**
* 从Kafka中获取消费组名称列表
*/
List<String> listGroupsFromKafka(Long clusterPhyId) throws NotExistException, AdminOperateException;
List<String> listGroupsFromKafka(ClusterPhy clusterPhy) throws AdminOperateException;
/**
* 从Kafka中获取消费组详细信息
*/
Group getGroupFromKafka(Long clusterPhyId, String groupName) throws NotExistException, AdminOperateException;
Group getGroupFromKafka(ClusterPhy clusterPhy, String groupName) throws NotExistException, AdminOperateException;
Map<TopicPartition, Long> getGroupOffsetFromKafka(Long clusterPhyId, String groupName) throws NotExistException, AdminOperateException;
ConsumerGroupDescription getGroupDescriptionFromKafka(Long clusterPhyId, String groupName) throws NotExistException, AdminOperateException;
KSGroupDescription getGroupDescriptionFromKafka(ClusterPhy clusterPhy, String groupName) throws AdminOperateException;
Result<Void> resetGroupOffsets(Long clusterPhyId, String groupName, Map<TopicPartition, Long> offsetMap, String operator) throws NotExistException, AdminOperateException;
@@ -57,6 +58,7 @@ public interface GroupService {
/**
* DB-GroupTopic相关接口
*/
List<GroupMemberPO> listGroupByCluster(Long clusterPhyId);
List<GroupMemberPO> listGroupByTopic(Long clusterPhyId, String topicName);
PaginationResult<GroupMemberPO> pagingGroupMembers(Long clusterPhyId,

View File

@@ -6,6 +6,7 @@ import com.google.common.collect.Table;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.MetricGroupPartitionDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.group.GroupTopic;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.GroupMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.offset.KSOffsetSpec;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.metric.GroupMetricParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
@@ -24,16 +25,14 @@ import com.xiaojukeji.know.streaming.km.core.service.health.state.HealthStateSer
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseMetricService;
import com.xiaojukeji.know.streaming.km.persistence.es.dao.GroupMetricESDAO;
import org.apache.kafka.clients.admin.OffsetSpec;
import org.apache.kafka.common.TopicPartition;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.*;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.GroupMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.GroupMetricVersionItems.*;
/**
* @author didi
@@ -192,36 +191,34 @@ public class GroupMetricServiceImpl extends BaseMetricService implements GroupMe
metricsList.add(metrics);
}
for (String topicName: groupOffsetMap.keySet().stream().map(elem -> elem.topic()).collect(Collectors.toSet())) {
Result<Map<TopicPartition, Long>> offsetMapResult = partitionService.getPartitionOffsetFromKafka(clusterId, topicName, OffsetSpec.latest(), null);
if (!offsetMapResult.hasData()) {
// 这个分区获取失败
Result<Map<TopicPartition, Long>> offsetMapResult = partitionService.getPartitionOffsetFromKafka(clusterId, new ArrayList<>(groupOffsetMap.keySet()), KSOffsetSpec.latest());
if (!offsetMapResult.hasData()) {
// 获取失败
return Result.buildSuc(metricsList);
}
for (Map.Entry<TopicPartition, Long> entry: offsetMapResult.getData().entrySet()) {
// 组织 GROUP_METRIC_LOG_END_OFFSET 指标
GroupMetrics metrics = new GroupMetrics(clusterId, entry.getKey().partition(), entry.getKey().topic(), groupName, false);
metrics.putMetric(GROUP_METRIC_LOG_END_OFFSET, entry.getValue().floatValue());
metricsList.add(metrics);
Long groupOffset = groupOffsetMap.get(entry.getKey());
if (groupOffset == null) {
// 不存在,则直接跳过
continue;
}
for (Map.Entry<TopicPartition, Long> entry: offsetMapResult.getData().entrySet()) {
// 组织 GROUP_METRIC_LOG_END_OFFSET 指标
GroupMetrics metrics = new GroupMetrics(clusterId, entry.getKey().partition(), entry.getKey().topic(), groupName, false);
metrics.putMetric(GROUP_METRIC_LOG_END_OFFSET, entry.getValue().floatValue());
metricsList.add(metrics);
// 组织 GROUP_METRIC_LAG 指标
GroupMetrics groupMetrics = new GroupMetrics(clusterId, entry.getKey().partition(), entry.getKey().topic(), groupName, false);
groupMetrics.putMetric(GROUP_METRIC_LAG, Math.max(0L, entry.getValue() - groupOffset) * 1.0f);
Long groupOffset = groupOffsetMap.get(entry.getKey());
if (groupOffset == null) {
// 不存在,则直接跳过
continue;
}
// 组织 GROUP_METRIC_LAG 指标
GroupMetrics groupMetrics = new GroupMetrics(clusterId, entry.getKey().partition(), entry.getKey().topic(), groupName, false);
groupMetrics.putMetric(GROUP_METRIC_LAG, Math.max(0L, entry.getValue() - groupOffset) * 1.0f);
metricsList.add(groupMetrics);
}
metricsList.add(groupMetrics);
}
return Result.buildSuc(metricsList);
} catch (Exception e) {
LOGGER.error("class=GroupMetricServiceImpl||method=getLagFromAdminClient||clusterPhyId={}||groupName={}||metrics={}||msg=exception", clusterId, groupName, metric, e);
LOGGER.error("method=getLagFromAdminClient||clusterPhyId={}||groupName={}||metrics={}||msg=exception", clusterId, groupName, metric, e);
return Result.buildFailure(VC_KAFKA_CLIENT_ERROR);
}
}

View File

@@ -7,8 +7,10 @@ import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.didiglobal.logi.security.common.dto.oplog.OplogDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.group.Group;
import com.xiaojukeji.know.streaming.km.common.bean.entity.group.GroupTopicMember;
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafka.*;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
@@ -17,6 +19,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupPO;
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
import com.xiaojukeji.know.streaming.km.common.converter.GroupConverter;
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupStateEnum;
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupTypeEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.ModuleEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
@@ -24,9 +27,10 @@ import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.common.utils.kafka.KSPartialKafkaAdminClient;
import com.xiaojukeji.know.streaming.km.core.service.group.GroupService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.mysql.group.GroupDAO;
import com.xiaojukeji.know.streaming.km.persistence.mysql.group.GroupMemberDAO;
@@ -36,6 +40,7 @@ import org.apache.kafka.common.TopicPartition;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.time.Duration;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
@@ -43,7 +48,7 @@ import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.SERVICE_SEARCH_GROUP;
@Service
public class GroupServiceImpl extends BaseVersionControlService implements GroupService {
public class GroupServiceImpl extends BaseKafkaVersionControlService implements GroupService {
private static final ILog log = LogFactory.getLog(GroupServiceImpl.class);
@Autowired
@@ -64,11 +69,18 @@ public class GroupServiceImpl extends BaseVersionControlService implements Group
}
@Override
public List<String> listGroupsFromKafka(Long clusterPhyId) throws NotExistException, AdminOperateException {
AdminClient adminClient = kafkaAdminClient.getClient(clusterPhyId);
public List<String> listGroupsFromKafka(ClusterPhy clusterPhy) throws AdminOperateException {
KSPartialKafkaAdminClient adminClient = null;
try {
ListConsumerGroupsResult listConsumerGroupsResult = adminClient.listConsumerGroups(
Properties props = ConvertUtil.str2ObjByJson(clusterPhy.getClientProperties(), Properties.class);
if (props == null) {
props = new Properties();
}
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers());
adminClient = KSPartialKafkaAdminClient.create(props);
KSListGroupsResult listConsumerGroupsResult = adminClient.listConsumerGroups(
new ListConsumerGroupsOptions()
.timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS)
);
@@ -80,33 +92,46 @@ public class GroupServiceImpl extends BaseVersionControlService implements Group
return groupNameList;
} catch (Exception e) {
log.error("method=getGroupsFromKafka||clusterPhyId={}||errMsg=exception!", clusterPhyId, e);
log.error("method=listGroupsFromKafka||clusterPhyId={}||errMsg=exception!", clusterPhy.getId(), e);
throw new AdminOperateException(e.getMessage(), e, ResultStatus.KAFKA_OPERATE_FAILED);
} finally {
if (adminClient != null) {
try {
adminClient.close(Duration.ofSeconds(10));
} catch (Exception e) {
// ignore
}
}
}
}
@Override
public Group getGroupFromKafka(Long clusterPhyId, String groupName) throws NotExistException, AdminOperateException {
public Group getGroupFromKafka(ClusterPhy clusterPhy, String groupName) throws NotExistException, AdminOperateException {
// 获取消费组的详细信息
ConsumerGroupDescription groupDescription = this.getGroupDescriptionFromKafka(clusterPhyId, groupName);
KSGroupDescription groupDescription = this.getGroupDescriptionFromKafka(clusterPhy, groupName);
if (groupDescription == null) {
return null;
}
Group group = new Group(clusterPhyId, groupName, groupDescription);
Group group = new Group(clusterPhy.getId(), groupName, groupDescription);
// 获取消费组消费过哪些Topic
Map<String, GroupTopicMember> memberMap = new HashMap<>();
for (TopicPartition tp : this.getGroupOffsetFromKafka(clusterPhyId, groupName).keySet()) {
for (TopicPartition tp : this.getGroupOffsetFromKafka(clusterPhy.getId(), groupName).keySet()) {
memberMap.putIfAbsent(tp.topic(), new GroupTopicMember(tp.topic(), 0));
}
// 记录成员信息
for (MemberDescription memberDescription : groupDescription.members()) {
for (KSMemberDescription memberDescription : groupDescription.members()) {
if (group.getType() == GroupTypeEnum.CONNECT_CLUSTER) {
continue;
}
Set<TopicPartition> partitionList = new HashSet<>();
if (!ValidateUtils.isNull(memberDescription.assignment().topicPartitions())) {
partitionList = memberDescription.assignment().topicPartitions();
KSMemberConsumerAssignment assignment = (KSMemberConsumerAssignment) memberDescription.assignment();
if (!ValidateUtils.isNull(assignment.topicPartitions())) {
partitionList = assignment.topicPartitions();
}
Set<String> topicNameSet = partitionList.stream().map(elem -> elem.topic()).collect(Collectors.toSet());
@@ -143,20 +168,36 @@ public class GroupServiceImpl extends BaseVersionControlService implements Group
}
@Override
public ConsumerGroupDescription getGroupDescriptionFromKafka(Long clusterPhyId, String groupName) throws NotExistException, AdminOperateException {
AdminClient adminClient = kafkaAdminClient.getClient(clusterPhyId);
public KSGroupDescription getGroupDescriptionFromKafka(ClusterPhy clusterPhy, String groupName) throws AdminOperateException {
KSPartialKafkaAdminClient adminClient = null;
try {
DescribeConsumerGroupsResult describeConsumerGroupsResult = adminClient.describeConsumerGroups(
Properties props = ConvertUtil.str2ObjByJson(clusterPhy.getClientProperties(), Properties.class);
if (props == null) {
props = new Properties();
}
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers());
adminClient = KSPartialKafkaAdminClient.create(props);
KSDescribeGroupsResult describeGroupsResult = adminClient.describeConsumerGroups(
Arrays.asList(groupName),
new DescribeConsumerGroupsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS).includeAuthorizedOperations(false)
);
return describeConsumerGroupsResult.all().get().get(groupName);
return describeGroupsResult.all().get().get(groupName);
} catch(Exception e){
log.error("method=getGroupDescription||clusterPhyId={}|groupName={}||errMsg=exception!", clusterPhyId, groupName, e);
log.error("method=getGroupDescription||clusterPhyId={}|groupName={}||errMsg=exception!", clusterPhy.getId(), groupName, e);
throw new AdminOperateException(e.getMessage(), e, ResultStatus.KAFKA_OPERATE_FAILED);
} finally {
if (adminClient != null) {
try {
adminClient.close(Duration.ofSeconds(10));
} catch (Exception e) {
// ignore
}
}
}
}
@@ -183,6 +224,14 @@ public class GroupServiceImpl extends BaseVersionControlService implements Group
return GroupStateEnum.getByState(poList.get(0).getState());
}
@Override
public List<GroupMemberPO> listGroupByCluster(Long clusterPhyId) {
LambdaQueryWrapper<GroupMemberPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(GroupMemberPO::getClusterPhyId, clusterPhyId);
return groupMemberDAO.selectList(lambdaQueryWrapper);
}
@Override
public List<GroupMemberPO> listGroupByTopic(Long clusterPhyId, String topicName) {
LambdaQueryWrapper<GroupMemberPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();

View File

@@ -0,0 +1,25 @@
package com.xiaojukeji.know.streaming.km.core.service.ha;
import com.xiaojukeji.know.streaming.km.common.bean.entity.ha.HaActiveStandbyRelation;
import com.xiaojukeji.know.streaming.km.common.enums.ha.HaResTypeEnum;
import java.util.List;
public interface HaActiveStandbyRelationService {
/**
* 新增或者变更,支持幂等
*/
void batchReplaceTopicHA(Long activeClusterPhyId, Long standbyClusterPhyId, List<String> topicNameList);
/**
* 删除
*/
void batchDeleteTopicHA(Long activeClusterPhyId, Long standbyClusterPhyId, List<String> topicNameList);
/**
* 按照集群ID查询
*/
List<HaActiveStandbyRelation> listByClusterAndType(Long firstClusterId, HaResTypeEnum haResTypeEnum);
List<HaActiveStandbyRelation> listAllTopicHa();
}

View File

@@ -0,0 +1,106 @@
package com.xiaojukeji.know.streaming.km.core.service.ha.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.xiaojukeji.know.streaming.km.common.bean.entity.ha.HaActiveStandbyRelation;
import com.xiaojukeji.know.streaming.km.common.bean.po.ha.HaActiveStandbyRelationPO;
import com.xiaojukeji.know.streaming.km.common.enums.ha.HaResTypeEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.ha.HaActiveStandbyRelationService;
import com.xiaojukeji.know.streaming.km.persistence.mysql.ha.HaActiveStandbyRelationDAO;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.enums.ha.HaResTypeEnum.MIRROR_TOPIC;
@Service
public class HaActiveStandbyRelationServiceImpl implements HaActiveStandbyRelationService {
@Autowired
private HaActiveStandbyRelationDAO haActiveStandbyRelationDAO;
@Override
public void batchReplaceTopicHA(Long activeClusterPhyId, Long standbyClusterPhyId, List<String> topicNameList) {
Map<String, HaActiveStandbyRelationPO> poMap = this.listPOs(activeClusterPhyId, standbyClusterPhyId, MIRROR_TOPIC)
.stream()
.collect(Collectors.toMap(HaActiveStandbyRelationPO::getResName, Function.identity()));
for (String topicName: topicNameList) {
HaActiveStandbyRelationPO oldPO = poMap.get(topicName);
if (oldPO != null) {
continue;
}
try {
haActiveStandbyRelationDAO.insert(new HaActiveStandbyRelationPO(activeClusterPhyId, standbyClusterPhyId, topicName, MIRROR_TOPIC.getCode()));
} catch (DuplicateKeyException dke) {
// ignore
}
}
}
@Override
public void batchDeleteTopicHA(Long activeClusterPhyId, Long standbyClusterPhyId, List<String> topicNameList) {
Map<String, HaActiveStandbyRelationPO> poMap = this.listPOs(activeClusterPhyId, standbyClusterPhyId, MIRROR_TOPIC)
.stream()
.collect(Collectors.toMap(HaActiveStandbyRelationPO::getResName, Function.identity()));
for (String topicName: topicNameList) {
HaActiveStandbyRelationPO oldPO = poMap.get(topicName);
if (oldPO == null) {
continue;
}
haActiveStandbyRelationDAO.deleteById(oldPO.getId());
}
}
@Override
public List<HaActiveStandbyRelation> listByClusterAndType(Long firstClusterId, HaResTypeEnum haResTypeEnum) {
// 查询HA列表
List<HaActiveStandbyRelationPO> poList = this.listPOs(firstClusterId, haResTypeEnum);
if (ValidateUtils.isNull(poList)) {
return new ArrayList<>();
}
return ConvertUtil.list2List(poList, HaActiveStandbyRelation.class);
}
@Override
public List<HaActiveStandbyRelation> listAllTopicHa() {
LambdaQueryWrapper<HaActiveStandbyRelationPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(HaActiveStandbyRelationPO::getResType, MIRROR_TOPIC.getCode());
List<HaActiveStandbyRelationPO> poList = haActiveStandbyRelationDAO.selectList(lambdaQueryWrapper);
if (ValidateUtils.isNull(poList)) {
return new ArrayList<>();
}
return ConvertUtil.list2List(poList, HaActiveStandbyRelation.class);
}
private List<HaActiveStandbyRelationPO> listPOs(Long firstClusterId, HaResTypeEnum haResTypeEnum) {
LambdaQueryWrapper<HaActiveStandbyRelationPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(HaActiveStandbyRelationPO::getResType, haResTypeEnum.getCode());
lambdaQueryWrapper.and(lambda ->
lambda.eq(HaActiveStandbyRelationPO::getActiveClusterPhyId, firstClusterId).or().eq(HaActiveStandbyRelationPO::getStandbyClusterPhyId, firstClusterId)
);
// 查询HA列表
return haActiveStandbyRelationDAO.selectList(lambdaQueryWrapper);
}
private List<HaActiveStandbyRelationPO> listPOs(Long activeClusterPhyId, Long standbyClusterPhyId, HaResTypeEnum haResTypeEnum) {
LambdaQueryWrapper<HaActiveStandbyRelationPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(HaActiveStandbyRelationPO::getResType, haResTypeEnum.getCode());
lambdaQueryWrapper.eq(HaActiveStandbyRelationPO::getActiveClusterPhyId, activeClusterPhyId);
lambdaQueryWrapper.eq(HaActiveStandbyRelationPO::getStandbyClusterPhyId, standbyClusterPhyId);
// 查询HA列表
return haActiveStandbyRelationDAO.selectList(lambdaQueryWrapper);
}
}

View File

@@ -4,7 +4,7 @@ import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.BaseClusterHealthConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterParam;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
@@ -21,15 +21,17 @@ public abstract class AbstractHealthCheckService {
protected static final Map<
String,
Function<Tuple<ClusterPhyParam, BaseClusterHealthConfig>, HealthCheckResult>
Function<Tuple<ClusterParam, BaseClusterHealthConfig>, HealthCheckResult>
> functionMap = new ConcurrentHashMap<>();
public abstract List<ClusterPhyParam> getResList(Long clusterPhyId);
public abstract List<ClusterParam> getResList(Long clusterId);
public abstract HealthCheckDimensionEnum getHealthCheckDimensionEnum();
public HealthCheckResult checkAndGetResult(ClusterPhyParam clusterPhyParam, BaseClusterHealthConfig clusterHealthConfig) {
if (ValidateUtils.anyNull(clusterPhyParam.getClusterPhyId(), clusterPhyParam, clusterHealthConfig)) {
public abstract Integer getDimensionCodeIfSupport(Long kafkaClusterPhyId);
public HealthCheckResult checkAndGetResult(ClusterParam clusterParam, BaseClusterHealthConfig clusterHealthConfig) {
if (ValidateUtils.anyNull(clusterParam, clusterHealthConfig)) {
return null;
}
@@ -39,16 +41,18 @@ public abstract class AbstractHealthCheckService {
return null;
}
Function<Tuple<ClusterPhyParam, BaseClusterHealthConfig>, HealthCheckResult> function = functionMap.get(clusterHealthConfig.getCheckNameEnum().getConfigName());
Function<Tuple<ClusterParam, BaseClusterHealthConfig>, HealthCheckResult> function = functionMap.get(clusterHealthConfig.getCheckNameEnum().getConfigName());
if (function == null) {
return null;
}
try {
return function.apply(new Tuple<>(clusterPhyParam, clusterHealthConfig));
return function.apply(new Tuple<>(clusterParam, clusterHealthConfig));
} catch (Exception e) {
log.error("method=checkAndGetResult||clusterPhyParam={}||clusterHealthConfig={}||errMsg=exception!",
clusterPhyParam, clusterHealthConfig, e);
log.error(
"method=checkAndGetResult||clusterParam={}||clusterHealthConfig={}||errMsg=exception!",
clusterParam, clusterHealthConfig, e
);
}
return null;

View File

@@ -8,7 +8,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.He
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BrokerMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.broker.BrokerParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckNameEnum;
@@ -17,8 +17,7 @@ import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerMetricService;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.health.checker.AbstractHealthCheckService;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.BrokerMetricVersionItems;
import lombok.Data;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.BrokerMetricVersionItems;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
@@ -27,7 +26,6 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
@Data
@Service
public class HealthCheckBrokerService extends AbstractHealthCheckService {
private static final ILog log = LogFactory.getLog(HealthCheckBrokerService.class);
@@ -45,11 +43,12 @@ public class HealthCheckBrokerService extends AbstractHealthCheckService {
}
@Override
public List<ClusterPhyParam> getResList(Long clusterPhyId) {
List<ClusterPhyParam> paramList = new ArrayList<>();
for (Broker broker: brokerService.listAliveBrokersFromDB(clusterPhyId)) {
public List<ClusterParam> getResList(Long clusterPhyId) {
List<ClusterParam> paramList = new ArrayList<>();
for (Broker broker: brokerService.listAliveBrokersFromCacheFirst(clusterPhyId)) {
paramList.add(new BrokerParam(clusterPhyId, broker.getBrokerId()));
}
return paramList;
}
@@ -58,10 +57,15 @@ public class HealthCheckBrokerService extends AbstractHealthCheckService {
return HealthCheckDimensionEnum.BROKER;
}
@Override
public Integer getDimensionCodeIfSupport(Long kafkaClusterPhyId) {
return this.getHealthCheckDimensionEnum().getDimension();
}
/**
* Broker网络处理线程平均值过低
*/
private HealthCheckResult checkBrokerNetworkProcessorAvgIdleTooLow(Tuple<ClusterPhyParam, BaseClusterHealthConfig> paramTuple) {
private HealthCheckResult checkBrokerNetworkProcessorAvgIdleTooLow(Tuple<ClusterParam, BaseClusterHealthConfig> paramTuple) {
BrokerParam param = (BrokerParam) paramTuple.getV1();
HealthCompareValueConfig singleConfig = (HealthCompareValueConfig) paramTuple.getV2();
@@ -72,8 +76,11 @@ public class HealthCheckBrokerService extends AbstractHealthCheckService {
String.valueOf(param.getBrokerId())
);
Result<BrokerMetrics> metricsResult = brokerMetricService.getLatestMetricsFromES(
param.getClusterPhyId(), param.getBrokerId());
Result<BrokerMetrics> metricsResult = brokerMetricService.collectBrokerMetricsFromKafka(
param.getClusterPhyId(),
param.getBrokerId(),
BrokerMetricVersionItems.BROKER_METRIC_NETWORK_RPO_AVG_IDLE
);
if (metricsResult.failed()) {
log.error("method=checkBrokerNetworkProcessorAvgIdleTooLow||param={}||config={}||result={}||errMsg=get metrics failed",
@@ -81,14 +88,14 @@ public class HealthCheckBrokerService extends AbstractHealthCheckService {
return null;
}
Float avgIdle = metricsResult.getData().getMetrics().get( BrokerMetricVersionItems.BROKER_METRIC_NETWORK_RPO_AVG_IDLE);
Float avgIdle = metricsResult.getData().getMetrics().get(BrokerMetricVersionItems.BROKER_METRIC_NETWORK_RPO_AVG_IDLE);
if (avgIdle == null) {
log.error("method=checkBrokerNetworkProcessorAvgIdleTooLow||param={}||config={}||result={}||errMsg=get metrics failed",
param, singleConfig, metricsResult);
return null;
}
checkResult.setPassed(avgIdle >= singleConfig.getValue()? 1: 0);
checkResult.setPassed(avgIdle >= singleConfig.getValue()? Constant.YES: Constant.NO);
return checkResult;
}
@@ -96,7 +103,7 @@ public class HealthCheckBrokerService extends AbstractHealthCheckService {
/**
* Broker请求队列满
*/
private HealthCheckResult checkBrokerRequestQueueFull(Tuple<ClusterPhyParam, BaseClusterHealthConfig> paramTuple) {
private HealthCheckResult checkBrokerRequestQueueFull(Tuple<ClusterParam, BaseClusterHealthConfig> paramTuple) {
BrokerParam param = (BrokerParam) paramTuple.getV1();
HealthCompareValueConfig singleConfig = (HealthCompareValueConfig) paramTuple.getV2();
@@ -110,7 +117,7 @@ public class HealthCheckBrokerService extends AbstractHealthCheckService {
Result<BrokerMetrics> metricsResult = brokerMetricService.collectBrokerMetricsFromKafka(
param.getClusterPhyId(),
param.getBrokerId(),
Arrays.asList( BrokerMetricVersionItems.BROKER_METRIC_TOTAL_REQ_QUEUE)
Arrays.asList(BrokerMetricVersionItems.BROKER_METRIC_TOTAL_REQ_QUEUE)
);
if (metricsResult.failed()) {
@@ -119,7 +126,7 @@ public class HealthCheckBrokerService extends AbstractHealthCheckService {
return null;
}
Float queueSize = metricsResult.getData().getMetrics().get( BrokerMetricVersionItems.BROKER_METRIC_TOTAL_REQ_QUEUE);
Float queueSize = metricsResult.getData().getMetrics().get(BrokerMetricVersionItems.BROKER_METRIC_TOTAL_REQ_QUEUE);
if (queueSize == null) {
log.error("method=checkBrokerRequestQueueFull||param={}||config={}||result={}||errMsg=get metrics failed",
param, singleConfig, metricsResult);

View File

@@ -6,6 +6,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.Ba
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.HealthCompareValueConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ClusterMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckNameEnum;
@@ -13,7 +14,7 @@ import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimension
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterMetricService;
import com.xiaojukeji.know.streaming.km.core.service.health.checker.AbstractHealthCheckService;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.ClusterMetricVersionItems;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.ClusterMetricVersionItems;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
@@ -34,7 +35,7 @@ public class HealthCheckClusterService extends AbstractHealthCheckService {
}
@Override
public List<ClusterPhyParam> getResList(Long clusterPhyId) {
public List<ClusterParam> getResList(Long clusterPhyId) {
return Arrays.asList(new ClusterPhyParam(clusterPhyId));
}
@@ -43,16 +44,21 @@ public class HealthCheckClusterService extends AbstractHealthCheckService {
return HealthCheckDimensionEnum.CLUSTER;
}
@Override
public Integer getDimensionCodeIfSupport(Long kafkaClusterPhyId) {
return this.getHealthCheckDimensionEnum().getDimension();
}
/**
* 检查NoController
*/
private HealthCheckResult checkClusterNoController(Tuple<ClusterPhyParam, BaseClusterHealthConfig> singleConfigSimpleTuple) {
ClusterPhyParam param = singleConfigSimpleTuple.getV1();
private HealthCheckResult checkClusterNoController(Tuple<ClusterParam, BaseClusterHealthConfig> singleConfigSimpleTuple) {
ClusterPhyParam param =(ClusterPhyParam) singleConfigSimpleTuple.getV1();
HealthCompareValueConfig valueConfig = (HealthCompareValueConfig) singleConfigSimpleTuple.getV2();
Result<ClusterMetrics> clusterMetricsResult = clusterMetricService.getLatestMetricsFromES(param.getClusterPhyId(), Arrays.asList(ClusterMetricVersionItems.CLUSTER_METRIC_ACTIVE_CONTROLLER_COUNT));
if (clusterMetricsResult.failed() || !clusterMetricsResult.hasData()) {
log.error("method=checkClusterNoController||param={}||config={}||result={}||errMsg=get metrics failed",
log.error("method=checkClusterNoController||param={}||config={}||result={}||errMsg=get metrics from es failed",
param, valueConfig, clusterMetricsResult);
return null;
}
@@ -65,7 +71,11 @@ public class HealthCheckClusterService extends AbstractHealthCheckService {
);
Float activeController = clusterMetricsResult.getData().getMetric(ClusterMetricVersionItems.CLUSTER_METRIC_ACTIVE_CONTROLLER_COUNT);
if (activeController == null) {
log.error("method=checkClusterNoController||param={}||config={}||errMsg=get metrics from es failed, activeControllerCount is null",
param, valueConfig);
return null;
}
checkResult.setPassed(activeController.intValue() != valueConfig.getValue().intValue() ? 0: 1);

View File

@@ -0,0 +1,109 @@
package com.xiaojukeji.know.streaming.km.core.service.health.checker.connect;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.BaseClusterHealthConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.HealthCompareValueConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectClusterMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ConnectClusterParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckNameEnum;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterMetricService;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.health.checker.AbstractHealthCheckService;
import com.xiaojukeji.know.streaming.km.persistence.connect.cache.LoadedConnectClusterCache;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import javax.annotation.PostConstruct;
import java.util.ArrayList;
import java.util.List;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect.ConnectClusterMetricVersionItems.CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_FAILURE_PERCENTAGE;
/**
* @author wyb
* @date 2022/11/9
*/
@Service
public class HealthCheckConnectClusterService extends AbstractHealthCheckService {
private static final ILog log = LogFactory.getLog(HealthCheckConnectClusterService.class);
@Autowired
private ConnectClusterService connectClusterService;
@Autowired
private ConnectClusterMetricService connectClusterMetricService;
@PostConstruct
private void init() {
functionMap.putIfAbsent(HealthCheckNameEnum.CONNECT_CLUSTER_TASK_STARTUP_FAILURE_PERCENTAGE.getConfigName(), this::checkStartupFailurePercentage);
}
@Override
public List<ClusterParam> getResList(Long connectClusterId) {
List<ClusterParam> paramList = new ArrayList<>();
if (LoadedConnectClusterCache.containsByPhyId(connectClusterId)) {
paramList.add(new ConnectClusterParam(connectClusterId));
}
return paramList;
}
@Override
public HealthCheckDimensionEnum getHealthCheckDimensionEnum() {
return HealthCheckDimensionEnum.CONNECT_CLUSTER;
}
@Override
public Integer getDimensionCodeIfSupport(Long kafkaClusterPhyId) {
List<ConnectCluster> clusterList = connectClusterService.listByKafkaCluster(kafkaClusterPhyId);
if (ValidateUtils.isEmptyList(clusterList)) {
return null;
}
return this.getHealthCheckDimensionEnum().getDimension();
}
private HealthCheckResult checkStartupFailurePercentage(Tuple<ClusterParam, BaseClusterHealthConfig> paramTuple) {
ConnectClusterParam param = (ConnectClusterParam) paramTuple.getV1();
HealthCompareValueConfig compareConfig = (HealthCompareValueConfig) paramTuple.getV2();
Long connectClusterId = param.getConnectClusterId();
String metricName = CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_FAILURE_PERCENTAGE;
Result<ConnectClusterMetrics> ret = connectClusterMetricService.collectConnectClusterMetricsFromKafka(connectClusterId, metricName);
if (!ret.hasData()) {
log.error("method=checkStartupFailurePercentage||connectClusterId={}||metricName={}||errMsg=get metrics failed",
param.getConnectClusterId(), metricName);
return null;
}
Float value = ret.getData().getMetric(metricName);
if (value == null) {
log.error("method=checkStartupFailurePercentage||connectClusterId={}||metricName={}||errMsg=get metrics failed",
param.getConnectClusterId(), metricName);
return null;
}
ConnectCluster connectCluster = LoadedConnectClusterCache.getByPhyId(connectClusterId);
HealthCheckResult checkResult = new HealthCheckResult(
HealthCheckDimensionEnum.CONNECT_CLUSTER.getDimension(),
HealthCheckNameEnum.CONNECT_CLUSTER_TASK_STARTUP_FAILURE_PERCENTAGE.getConfigName(),
connectCluster.getKafkaClusterPhyId(),
String.valueOf(connectClusterId)
);
checkResult.setPassed(value <= compareConfig.getValue() ? Constant.YES : Constant.NO);
return checkResult;
}
}

View File

@@ -0,0 +1,133 @@
package com.xiaojukeji.know.streaming.km.core.service.health.checker.connect;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.BaseClusterHealthConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.HealthCompareValueConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectorMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.connect.ConnectorParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckNameEnum;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorMetricService;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.health.checker.AbstractHealthCheckService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import javax.annotation.PostConstruct;
import java.util.ArrayList;
import java.util.List;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect.ConnectorMetricVersionItems.CONNECTOR_METRIC_CONNECTOR_FAILED_TASK_COUNT;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect.ConnectorMetricVersionItems.CONNECTOR_METRIC_CONNECTOR_UNASSIGNED_TASK_COUNT;
/**
* @author wyb
* @date 2022/11/8
*/
@Service
public class HealthCheckConnectorService extends AbstractHealthCheckService {
private static final ILog log = LogFactory.getLog(HealthCheckConnectorService.class);
@Autowired
private ConnectorService connectorService;
@Autowired
private ConnectClusterService connectClusterService;
@Autowired
private ConnectorMetricService connectorMetricService;
@PostConstruct
private void init() {
functionMap.putIfAbsent(HealthCheckNameEnum.CONNECTOR_FAILED_TASK_COUNT.getConfigName(), this::checkFailedTaskCount);
functionMap.putIfAbsent(HealthCheckNameEnum.CONNECTOR_UNASSIGNED_TASK_COUNT.getConfigName(), this::checkUnassignedTaskCount);
}
@Override
public List<ClusterParam> getResList(Long connectClusterId) {
List<ClusterParam> paramList = new ArrayList<>();
List<ConnectorPO> connectorPOList = connectorService.listByConnectClusterIdFromDB(connectClusterId);
for (ConnectorPO connectorPO : connectorPOList) {
paramList.add(new ConnectorParam(connectClusterId, connectorPO.getConnectorName(), connectorPO.getConnectorType()));
}
return paramList;
}
@Override
public HealthCheckDimensionEnum getHealthCheckDimensionEnum() {
return HealthCheckDimensionEnum.CONNECTOR;
}
@Override
public Integer getDimensionCodeIfSupport(Long kafkaClusterPhyId) {
List<ConnectCluster> clusterList = connectClusterService.listByKafkaCluster(kafkaClusterPhyId);
if (ValidateUtils.isEmptyList(clusterList)) {
return null;
}
return this.getHealthCheckDimensionEnum().getDimension();
}
private HealthCheckResult checkFailedTaskCount(Tuple<ClusterParam, BaseClusterHealthConfig> paramTuple) {
ConnectorParam param = (ConnectorParam) paramTuple.getV1();
HealthCompareValueConfig compareConfig = (HealthCompareValueConfig) paramTuple.getV2();
Long connectClusterId = param.getConnectClusterId();
String connectorName = param.getConnectorName();
String connectorType = param.getConnectorType();
Double compareValue = compareConfig.getValue();
return this.getHealthCompareResult(connectClusterId, connectorName, connectorType, HealthCheckDimensionEnum.CONNECTOR.getDimension(), CONNECTOR_METRIC_CONNECTOR_FAILED_TASK_COUNT, HealthCheckNameEnum.CONNECTOR_FAILED_TASK_COUNT, compareValue);
}
private HealthCheckResult checkUnassignedTaskCount(Tuple<ClusterParam, BaseClusterHealthConfig> paramTuple) {
ConnectorParam param = (ConnectorParam) paramTuple.getV1();
HealthCompareValueConfig compareConfig = (HealthCompareValueConfig) paramTuple.getV2();
Long connectClusterId = param.getConnectClusterId();
String connectorName = param.getConnectorName();
String connectorType = param.getConnectorType();
Double compareValue = compareConfig.getValue();
return this.getHealthCompareResult(connectClusterId, connectorName, connectorType, HealthCheckDimensionEnum.CONNECTOR.getDimension(), CONNECTOR_METRIC_CONNECTOR_UNASSIGNED_TASK_COUNT, HealthCheckNameEnum.CONNECTOR_UNASSIGNED_TASK_COUNT, compareValue);
}
public HealthCheckResult getHealthCompareResult(Long connectClusterId, String connectorName, String connectorType, Integer dimension, String metricName, HealthCheckNameEnum healthCheckNameEnum, Double compareValue) {
Result<ConnectorMetrics> ret = connectorMetricService.collectConnectClusterMetricsFromKafka(connectClusterId, connectorName, metricName , ConnectorTypeEnum.getByName(connectorType));
if (!ret.hasData() || ret.getData().getMetric(metricName) == null) {
log.error("method=getHealthCompareResult||connectClusterId={}||connectorName={}||metricName={}||errMsg=get metrics failed",
connectClusterId, connectorName, metricName);
return null;
}
Float value = ret.getData().getMetric(metricName);
HealthCheckResult checkResult = new HealthCheckResult(
dimension,
healthCheckNameEnum.getConfigName(),
connectClusterId,
connectorName
);
checkResult.setPassed(compareValue >= value ? Constant.YES : Constant.NO);
return checkResult;
}
}

View File

@@ -0,0 +1,205 @@
package com.xiaojukeji.know.streaming.km.core.service.health.checker.connect.mm2;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.google.common.collect.Table;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.BaseClusterHealthConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.HealthCompareValueConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.mm2.MirrorMakerTopic;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.mm2.MirrorMakerMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.connect.mm2.MirrorMakerParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckNameEnum;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.mm2.MirrorMakerMetricService;
import com.xiaojukeji.know.streaming.km.core.service.connect.mm2.MirrorMakerService;
import com.xiaojukeji.know.streaming.km.core.service.health.checker.AbstractHealthCheckService;
import com.xiaojukeji.know.streaming.km.core.service.health.checker.connect.HealthCheckConnectorService;
import com.xiaojukeji.know.streaming.km.persistence.connect.cache.LoadedConnectClusterCache;
import com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.connector.ConnectorMetricESDAO;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import javax.annotation.PostConstruct;
import java.util.*;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant.MIRROR_MAKER_SOURCE_CONNECTOR_TYPE;
import static com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum.SOURCE;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect.ConnectorMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect.MirrorMakerMetricVersionItems.MIRROR_MAKER_METRIC_REPLICATION_LATENCY_MS_MAX;
/**
* @author wyb
* @date 2022/12/21
*/
@Service
public class HealthCheckMirrorMakerService extends AbstractHealthCheckService {
private static final ILog log = LogFactory.getLog(HealthCheckMirrorMakerService.class);
@Autowired
private ConnectorService connectorService;
@Autowired
private MirrorMakerService mirrorMakerService;
@Autowired
private ConnectClusterService connectClusterService;
@Autowired
private MirrorMakerMetricService mirrorMakerMetricService;
@Autowired
private ConnectorMetricESDAO connectorMetricESDAO;
@Autowired
private HealthCheckConnectorService healthCheckConnectorService;
private static final Long TEN_MIN = 10 * 60 * 1000L;
@PostConstruct
private void init() {
functionMap.put(HealthCheckNameEnum.MIRROR_MAKER_UNASSIGNED_TASK_COUNT.getConfigName(), this::checkUnassignedTaskCount);
functionMap.put(HealthCheckNameEnum.MIRROR_MAKER_FAILED_TASK_COUNT.getConfigName(), this::checkFailedTaskCount);
functionMap.put(HealthCheckNameEnum.MIRROR_MAKER_REPLICATION_LATENCY_MS_MAX.getConfigName(), this::checkReplicationLatencyMsMax);
functionMap.put(HealthCheckNameEnum.MIRROR_MAKER_TOTAL_RECORD_ERRORS.getConfigName(), this::checkTotalRecordErrors);
}
@Override
public List<ClusterParam> getResList(Long connectClusterId) {
List<ClusterParam> paramList = new ArrayList<>();
List<ConnectorPO> mirrorMakerList = connectorService.listByConnectClusterIdFromDB(connectClusterId).stream().filter(elem -> elem.getConnectorType().equals(SOURCE.name()) && elem.getConnectorClassName().equals(MIRROR_MAKER_SOURCE_CONNECTOR_TYPE)).collect(Collectors.toList());
if (mirrorMakerList.isEmpty()) {
return paramList;
}
Result<Map<String, MirrorMakerTopic>> ret = mirrorMakerService.getMirrorMakerTopicMap(connectClusterId);
if (!ret.hasData()) {
log.error("method=getResList||connectClusterId={}||get MirrorMakerTopicMap failed!", connectClusterId);
return paramList;
}
Map<String, MirrorMakerTopic> mirrorMakerTopicMap = ret.getData();
for (ConnectorPO mirrorMaker : mirrorMakerList) {
List<MirrorMakerTopic> mirrorMakerTopicList = mirrorMakerService.getMirrorMakerTopicList(mirrorMaker, mirrorMakerTopicMap);
paramList.add(new MirrorMakerParam(connectClusterId, mirrorMaker.getConnectorType(), mirrorMaker.getConnectorName(), mirrorMakerTopicList));
}
return paramList;
}
@Override
public HealthCheckDimensionEnum getHealthCheckDimensionEnum() {
return HealthCheckDimensionEnum.MIRROR_MAKER;
}
@Override
public Integer getDimensionCodeIfSupport(Long kafkaClusterPhyId) {
List<ConnectCluster> clusterList = connectClusterService.listByKafkaCluster(kafkaClusterPhyId);
if (ValidateUtils.isEmptyList(clusterList)) {
return null;
}
return this.getHealthCheckDimensionEnum().getDimension();
}
private HealthCheckResult checkFailedTaskCount(Tuple<ClusterParam, BaseClusterHealthConfig> paramTuple) {
MirrorMakerParam param = (MirrorMakerParam) paramTuple.getV1();
HealthCompareValueConfig compareConfig = (HealthCompareValueConfig) paramTuple.getV2();
Long connectClusterId = param.getConnectClusterId();
String mirrorMakerName = param.getMirrorMakerName();
String connectorType = param.getConnectorType();
Double compareValue = compareConfig.getValue();
return healthCheckConnectorService.getHealthCompareResult(connectClusterId, mirrorMakerName, connectorType, HealthCheckDimensionEnum.MIRROR_MAKER.getDimension(), CONNECTOR_METRIC_CONNECTOR_FAILED_TASK_COUNT, HealthCheckNameEnum.MIRROR_MAKER_FAILED_TASK_COUNT, compareValue);
}
private HealthCheckResult checkUnassignedTaskCount(Tuple<ClusterParam, BaseClusterHealthConfig> paramTuple) {
MirrorMakerParam param = (MirrorMakerParam) paramTuple.getV1();
HealthCompareValueConfig compareConfig = (HealthCompareValueConfig) paramTuple.getV2();
Long connectClusterId = param.getConnectClusterId();
String mirrorMakerName = param.getMirrorMakerName();
String connectorType = param.getConnectorType();
Double compareValue = compareConfig.getValue();
return healthCheckConnectorService.getHealthCompareResult(connectClusterId, mirrorMakerName, connectorType, HealthCheckDimensionEnum.MIRROR_MAKER.getDimension(), CONNECTOR_METRIC_CONNECTOR_UNASSIGNED_TASK_COUNT, HealthCheckNameEnum.MIRROR_MAKER_UNASSIGNED_TASK_COUNT, compareValue);
}
private HealthCheckResult checkReplicationLatencyMsMax(Tuple<ClusterParam, BaseClusterHealthConfig> paramTuple) {
MirrorMakerParam param = (MirrorMakerParam) paramTuple.getV1();
HealthCompareValueConfig compareConfig = (HealthCompareValueConfig) paramTuple.getV2();
Long connectClusterId = param.getConnectClusterId();
String mirrorMakerName = param.getMirrorMakerName();
List<MirrorMakerTopic> mirrorMakerTopicList = param.getMirrorMakerTopicList();
String metricName = MIRROR_MAKER_METRIC_REPLICATION_LATENCY_MS_MAX;
Result<MirrorMakerMetrics> ret = mirrorMakerMetricService.collectMirrorMakerMetricsFromKafka(connectClusterId, mirrorMakerName, mirrorMakerTopicList, metricName);
if (!ret.hasData() || ret.getData().getMetric(metricName) == null) {
log.error("method=checkReplicationLatencyMsMax||connectClusterId={}||metricName={}||errMsg=get metrics failed",
param.getConnectClusterId(), metricName);
return null;
}
Float value = ret.getData().getMetric(metricName);
HealthCheckResult checkResult = new HealthCheckResult(
HealthCheckDimensionEnum.MIRROR_MAKER.getDimension(),
HealthCheckNameEnum.MIRROR_MAKER_REPLICATION_LATENCY_MS_MAX.getConfigName(),
connectClusterId,
mirrorMakerName
);
checkResult.setPassed(value <= compareConfig.getValue() ? Constant.YES : Constant.NO);
return checkResult;
}
private HealthCheckResult checkTotalRecordErrors(Tuple<ClusterParam, BaseClusterHealthConfig> paramTuple){
MirrorMakerParam param = (MirrorMakerParam) paramTuple.getV1();
HealthCompareValueConfig compareConfig = (HealthCompareValueConfig) paramTuple.getV2();
Long connectClusterId = param.getConnectClusterId();
String mirrorMakerName = param.getMirrorMakerName();
List<MirrorMakerTopic> mirrorMakerTopicList = param.getMirrorMakerTopicList();
ConnectCluster connectCluster = LoadedConnectClusterCache.getByPhyId(connectClusterId);
Long endTime = System.currentTimeMillis();
Long startTime = endTime - TEN_MIN;
Tuple<Long, String> connectClusterIdAndName = new Tuple<>(connectClusterId, mirrorMakerName);
String metricName = CONNECTOR_METRIC_TOTAL_RECORD_ERRORS;
Table<String, Tuple<Long, String>, List<MetricPointVO>> table = connectorMetricESDAO.listMetricsByConnectors(connectCluster.getKafkaClusterPhyId(), Arrays.asList(metricName), "avg", Arrays.asList(connectClusterIdAndName), startTime, endTime);
List<MetricPointVO> pointVOList = table.get(metricName, connectClusterIdAndName);
Collections.sort(pointVOList, (p1, p2) -> p2.getTimeStamp().compareTo(p1.getTimeStamp()));
HealthCheckResult checkResult = new HealthCheckResult(
HealthCheckDimensionEnum.MIRROR_MAKER.getDimension(),
HealthCheckNameEnum.MIRROR_MAKER_TOTAL_RECORD_ERRORS.getConfigName(),
connectClusterId,
mirrorMakerName
);
double diff = 0;
if (pointVOList.size() > 1) {
diff = Double.valueOf(pointVOList.get(0).getValue()) - Double.valueOf(pointVOList.get(1).getValue());
}
checkResult.setPassed(diff <= compareConfig.getValue() ? Constant.YES : Constant.NO);
return checkResult;
}
}

View File

@@ -5,10 +5,11 @@ import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.BaseClusterHealthConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.HealthDetectedInLatestMinutesConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.GroupParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchTerm;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupStateEnum;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckNameEnum;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum;
@@ -16,7 +17,6 @@ import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.core.service.group.GroupMetricService;
import com.xiaojukeji.know.streaming.km.core.service.group.GroupService;
import com.xiaojukeji.know.streaming.km.core.service.health.checker.AbstractHealthCheckService;
import lombok.Data;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
@@ -24,9 +24,8 @@ import javax.annotation.PostConstruct;
import java.util.List;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.GroupMetricVersionItems.GROUP_METRIC_STATE;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.GroupMetricVersionItems.GROUP_METRIC_STATE;
@Data
@Service
public class HealthCheckGroupService extends AbstractHealthCheckService {
private static final ILog log = LogFactory.getLog(HealthCheckGroupService.class);
@@ -43,7 +42,7 @@ public class HealthCheckGroupService extends AbstractHealthCheckService {
}
@Override
public List<ClusterPhyParam> getResList(Long clusterPhyId) {
public List<ClusterParam> getResList(Long clusterPhyId) {
return groupService.getGroupsFromDB(clusterPhyId).stream().map(elem -> new GroupParam(clusterPhyId, elem)).collect(Collectors.toList());
}
@@ -52,10 +51,15 @@ public class HealthCheckGroupService extends AbstractHealthCheckService {
return HealthCheckDimensionEnum.GROUP;
}
@Override
public Integer getDimensionCodeIfSupport(Long kafkaClusterPhyId) {
return this.getHealthCheckDimensionEnum().getDimension();
}
/**
* 检查Group re-balance太频繁
*/
private HealthCheckResult checkReBalanceTooFrequently(Tuple<ClusterPhyParam, BaseClusterHealthConfig> paramTuple) {
private HealthCheckResult checkReBalanceTooFrequently(Tuple<ClusterParam, BaseClusterHealthConfig> paramTuple) {
GroupParam param = (GroupParam) paramTuple.getV1();
HealthDetectedInLatestMinutesConfig singleConfig = (HealthDetectedInLatestMinutesConfig) paramTuple.getV2();
@@ -80,7 +84,7 @@ public class HealthCheckGroupService extends AbstractHealthCheckService {
return null;
}
checkResult.setPassed(countResult.getData() >= singleConfig.getDetectedTimes()? 0: 1);
checkResult.setPassed(countResult.getData() >= singleConfig.getDetectedTimes() ? Constant.NO : Constant.YES);
return checkResult;
}

View File

@@ -6,7 +6,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.Ba
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.HealthCompareValueConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.HealthDetectedInLatestMinutesConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.partition.Partition;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
@@ -27,11 +27,11 @@ import javax.annotation.PostConstruct;
import java.util.ArrayList;
import java.util.List;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.TopicMetricVersionItems.TOPIC_METRIC_UNDER_REPLICA_PARTITIONS;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.TopicMetricVersionItems.TOPIC_METRIC_UNDER_REPLICA_PARTITIONS;
@Service
public class HealthCheckTopicService extends AbstractHealthCheckService {
private static final ILog log = LogFactory.getLog(HealthCheckTopicService.class);
private static final ILog LOGGER = LogFactory.getLog(HealthCheckTopicService.class);
@Autowired
private TopicService topicService;
@@ -49,9 +49,9 @@ public class HealthCheckTopicService extends AbstractHealthCheckService {
}
@Override
public List<ClusterPhyParam> getResList(Long clusterPhyId) {
List<ClusterPhyParam> paramList = new ArrayList<>();
for (Topic topic: topicService.listTopicsFromDB(clusterPhyId)) {
public List<ClusterParam> getResList(Long clusterPhyId) {
List<ClusterParam> paramList = new ArrayList<>();
for (Topic topic: topicService.listTopicsFromCacheFirst(clusterPhyId)) {
paramList.add(new TopicParam(clusterPhyId, topic.getTopicName()));
}
return paramList;
@@ -62,10 +62,15 @@ public class HealthCheckTopicService extends AbstractHealthCheckService {
return HealthCheckDimensionEnum.TOPIC;
}
@Override
public Integer getDimensionCodeIfSupport(Long kafkaClusterPhyId) {
return this.getHealthCheckDimensionEnum().getDimension();
}
/**
* 检查Topic长期未同步
*/
private HealthCheckResult checkTopicUnderReplicatedPartition(Tuple<ClusterPhyParam, BaseClusterHealthConfig> paramTuple) {
private HealthCheckResult checkTopicUnderReplicatedPartition(Tuple<ClusterParam, BaseClusterHealthConfig> paramTuple) {
TopicParam param = (TopicParam) paramTuple.getV1();
HealthDetectedInLatestMinutesConfig singleConfig = (HealthDetectedInLatestMinutesConfig) paramTuple.getV2();
@@ -85,19 +90,19 @@ public class HealthCheckTopicService extends AbstractHealthCheckService {
);
if (countResult.failed() || !countResult.hasData()) {
log.error("method=checkTopicUnderReplicatedPartition||param={}||config={}||result={}||errMsg=get metrics failed",
LOGGER.error("method=checkTopicUnderReplicatedPartition||param={}||config={}||result={}||errMsg=search metrics from es failed",
param, singleConfig, countResult);
return null;
}
checkResult.setPassed(countResult.getData() >= singleConfig.getDetectedTimes()? 0: 1);
checkResult.setPassed(countResult.getData() >= singleConfig.getDetectedTimes()? Constant.NO: Constant.YES);
return checkResult;
}
/**
* 检查NoLeader
*/
private HealthCheckResult checkTopicNoLeader(Tuple<ClusterPhyParam, BaseClusterHealthConfig> singleConfigSimpleTuple) {
private HealthCheckResult checkTopicNoLeader(Tuple<ClusterParam, BaseClusterHealthConfig> singleConfigSimpleTuple) {
TopicParam param = (TopicParam) singleConfigSimpleTuple.getV1();
List<Partition> partitionList = partitionService.listPartitionFromCacheFirst(param.getClusterPhyId(), param.getTopicName());
@@ -109,7 +114,7 @@ public class HealthCheckTopicService extends AbstractHealthCheckService {
param.getTopicName()
);
checkResult.setPassed(partitionList.stream().filter(elem -> elem.getLeaderBrokerId().equals(Constant.INVALID_CODE)).count() >= valueConfig.getValue()? 0: 1);
checkResult.setPassed(partitionList.stream().filter(elem -> elem.getLeaderBrokerId().equals(Constant.INVALID_CODE)).count() >= valueConfig.getValue() ? Constant.NO : Constant.YES);
return checkResult;
}

View File

@@ -6,10 +6,9 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.ZKConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.BaseClusterHealthConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.HealthAmountRatioConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.HealthCompareValueConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ZookeeperMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.metric.ZookeeperMetricParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.zookeeper.ZookeeperParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
@@ -20,27 +19,25 @@ import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckNameEnum;
import com.xiaojukeji.know.streaming.km.common.enums.zookeeper.ZKRoleEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.common.utils.zookeeper.ZookeeperUtils;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.health.checker.AbstractHealthCheckService;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.ZookeeperMetricVersionItems;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.ZookeeperMetricVersionItems;
import com.xiaojukeji.know.streaming.km.core.service.zookeeper.ZookeeperMetricService;
import com.xiaojukeji.know.streaming.km.core.service.zookeeper.ZookeeperService;
import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import javax.annotation.PostConstruct;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
@Service
public class HealthCheckZookeeperService extends AbstractHealthCheckService {
private static final ILog log = LogFactory.getLog(HealthCheckZookeeperService.class);
@Autowired
private ClusterPhyService clusterPhyService;
@Autowired
private ZookeeperService zookeeperService;
@@ -58,23 +55,25 @@ public class HealthCheckZookeeperService extends AbstractHealthCheckService {
}
@Override
public List<ClusterPhyParam> getResList(Long clusterPhyId) {
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(clusterPhyId);
if (clusterPhy == null) {
public List<ClusterParam> getResList(Long clusterPhyId) {
ClusterPhy clusterPhy = LoadedClusterPhyCache.getByPhyId(clusterPhyId);
if (clusterPhy == null || ValidateUtils.isBlank(clusterPhy.getZookeeper())) {
return new ArrayList<>();
}
try {
return Arrays.asList(new ZookeeperParam(
clusterPhyId,
ZookeeperUtils.connectStringParser(clusterPhy.getZookeeper()),
ConvertUtil.str2ObjByJson(clusterPhy.getZkProperties(), ZKConfig.class)
));
return Collections.singletonList(
new ZookeeperParam(
clusterPhyId,
ZookeeperUtils.connectStringParser(clusterPhy.getZookeeper()),
ConvertUtil.str2ObjByJson(clusterPhy.getZkProperties(), ZKConfig.class)
)
);
} catch (Exception e) {
log.error("class=HealthCheckZookeeperService||method=getResList||clusterPhyId={}||errMsg=exception!", clusterPhyId, e);
log.error("method=getResList||clusterPhyId={}||errMsg=exception!", clusterPhyId, e);
}
return new ArrayList<>();
return Collections.emptyList();
}
@Override
@@ -82,9 +81,17 @@ public class HealthCheckZookeeperService extends AbstractHealthCheckService {
return HealthCheckDimensionEnum.ZOOKEEPER;
}
private HealthCheckResult checkBrainSplit(Tuple<ClusterPhyParam, BaseClusterHealthConfig> singleConfigSimpleTuple) {
@Override
public Integer getDimensionCodeIfSupport(Long kafkaClusterPhyId) {
if (ValidateUtils.isEmptyList(this.getResList(kafkaClusterPhyId))) {
return null;
}
return this.getHealthCheckDimensionEnum().getDimension();
}
private HealthCheckResult checkBrainSplit(Tuple<ClusterParam, BaseClusterHealthConfig> singleConfigSimpleTuple) {
ZookeeperParam param = (ZookeeperParam) singleConfigSimpleTuple.getV1();
HealthCompareValueConfig valueConfig = (HealthCompareValueConfig) singleConfigSimpleTuple.getV2();
List<ZookeeperInfo> infoList = zookeeperService.listFromDBByCluster(param.getClusterPhyId());
HealthCheckResult checkResult = new HealthCheckResult(
@@ -96,17 +103,18 @@ public class HealthCheckZookeeperService extends AbstractHealthCheckService {
long value = infoList.stream().filter(elem -> ZKRoleEnum.LEADER.getRole().equals(elem.getRole())).count();
checkResult.setPassed(value == valueConfig.getValue().longValue() ? Constant.YES : Constant.NO);
checkResult.setPassed(value == 1 ? Constant.YES : Constant.NO);
return checkResult;
}
private HealthCheckResult checkOutstandingRequests(Tuple<ClusterPhyParam, BaseClusterHealthConfig> singleConfigSimpleTuple) {
private HealthCheckResult checkOutstandingRequests(Tuple<ClusterParam, BaseClusterHealthConfig> singleConfigSimpleTuple) {
ZookeeperParam param = (ZookeeperParam) singleConfigSimpleTuple.getV1();
Long clusterPhyId = param.getClusterPhyId();
HealthAmountRatioConfig valueConfig = (HealthAmountRatioConfig) singleConfigSimpleTuple.getV2();
Result<ZookeeperMetrics> metricsResult = zookeeperMetricService.collectMetricsFromZookeeper(
new ZookeeperMetricParam(
param.getClusterPhyId(),
clusterPhyId,
param.getZkAddressList(),
param.getZkConfig(),
ZookeeperMetricVersionItems.ZOOKEEPER_METRIC_OUTSTANDING_REQUESTS
@@ -114,8 +122,7 @@ public class HealthCheckZookeeperService extends AbstractHealthCheckService {
);
if (metricsResult.failed() || !metricsResult.hasData()) {
log.error(
"class=HealthCheckZookeeperService||method=checkOutstandingRequests||param={}||config={}||result={}||errMsg=get metrics failed",
param, valueConfig, metricsResult
"method=checkOutstandingRequests||clusterPhyId={}||param={}||config={}||result={}||errMsg=get metrics failed, may be collect failed or zk srvr command not in whitelist.",clusterPhyId ,param, valueConfig, metricsResult
);
return null;
}
@@ -128,14 +135,26 @@ public class HealthCheckZookeeperService extends AbstractHealthCheckService {
);
Float value = metricsResult.getData().getMetric(ZookeeperMetricVersionItems.ZOOKEEPER_METRIC_OUTSTANDING_REQUESTS);
if(null == value){
log.error("method=checkOutstandingRequests||clusterPhyId={}|| errMsg=get OutstandingRequests metric failed, may be collect failed or zk srvr command not in whitelist.", clusterPhyId);
return null;
}
Integer amount = valueConfig.getAmount();
Double ratio = valueConfig.getRatio();
if (null == amount || null == ratio) {
log.error("method=checkOutstandingRequests||clusterPhyId={}||result={}||errMsg=get valueConfig amount/ratio config failed", clusterPhyId,valueConfig);
return null;
}
double configValue = amount.doubleValue() * ratio;
checkResult.setPassed(value.intValue() <= valueConfig.getAmount().doubleValue() * valueConfig.getRatio().doubleValue() ? Constant.YES : Constant.NO);
checkResult.setPassed(value.doubleValue() <= configValue ? Constant.YES : Constant.NO);
return checkResult;
}
private HealthCheckResult checkWatchCount(Tuple<ClusterPhyParam, BaseClusterHealthConfig> singleConfigSimpleTuple) {
private HealthCheckResult checkWatchCount(Tuple<ClusterParam, BaseClusterHealthConfig> singleConfigSimpleTuple) {
ZookeeperParam param = (ZookeeperParam) singleConfigSimpleTuple.getV1();
HealthAmountRatioConfig valueConfig = (HealthAmountRatioConfig) singleConfigSimpleTuple.getV2();
@@ -150,7 +169,7 @@ public class HealthCheckZookeeperService extends AbstractHealthCheckService {
if (metricsResult.failed() || !metricsResult.hasData()) {
log.error(
"class=HealthCheckZookeeperService||method=checkWatchCount||param={}||config={}||result={}||errMsg=get metrics failed",
"method=checkWatchCount||param={}||config={}||result={}||errMsg=get metrics failed, may be collect failed or zk mntr command not in whitelist.",
param, valueConfig, metricsResult
);
return null;
@@ -171,7 +190,7 @@ public class HealthCheckZookeeperService extends AbstractHealthCheckService {
return checkResult;
}
private HealthCheckResult checkAliveConnections(Tuple<ClusterPhyParam, BaseClusterHealthConfig> singleConfigSimpleTuple) {
private HealthCheckResult checkAliveConnections(Tuple<ClusterParam, BaseClusterHealthConfig> singleConfigSimpleTuple) {
ZookeeperParam param = (ZookeeperParam) singleConfigSimpleTuple.getV1();
HealthAmountRatioConfig valueConfig = (HealthAmountRatioConfig) singleConfigSimpleTuple.getV2();
@@ -186,7 +205,7 @@ public class HealthCheckZookeeperService extends AbstractHealthCheckService {
if (metricsResult.failed() || !metricsResult.hasData()) {
log.error(
"class=HealthCheckZookeeperService||method=checkAliveConnections||param={}||config={}||result={}||errMsg=get metrics failed",
"method=checkAliveConnections||param={}||config={}||result={}||errMsg=get metrics failed, may be collect failed or zk srvr command not in whitelist.",
param, valueConfig, metricsResult
);
return null;
@@ -207,7 +226,7 @@ public class HealthCheckZookeeperService extends AbstractHealthCheckService {
return checkResult;
}
private HealthCheckResult checkApproximateDataSize(Tuple<ClusterPhyParam, BaseClusterHealthConfig> singleConfigSimpleTuple) {
private HealthCheckResult checkApproximateDataSize(Tuple<ClusterParam, BaseClusterHealthConfig> singleConfigSimpleTuple) {
ZookeeperParam param = (ZookeeperParam) singleConfigSimpleTuple.getV1();
HealthAmountRatioConfig valueConfig = (HealthAmountRatioConfig) singleConfigSimpleTuple.getV2();
@@ -222,7 +241,7 @@ public class HealthCheckZookeeperService extends AbstractHealthCheckService {
if (metricsResult.failed() || !metricsResult.hasData()) {
log.error(
"class=HealthCheckZookeeperService||method=checkApproximateDataSize||param={}||config={}||result={}||errMsg=get metrics failed",
"method=checkApproximateDataSize||param={}||config={}||result={}||errMsg=get metrics failed, may be collect failed or zk srvr command not in whitelist.",
param, valueConfig, metricsResult
);
return null;
@@ -243,7 +262,7 @@ public class HealthCheckZookeeperService extends AbstractHealthCheckService {
return checkResult;
}
private HealthCheckResult checkSentRate(Tuple<ClusterPhyParam, BaseClusterHealthConfig> singleConfigSimpleTuple) {
private HealthCheckResult checkSentRate(Tuple<ClusterParam, BaseClusterHealthConfig> singleConfigSimpleTuple) {
ZookeeperParam param = (ZookeeperParam) singleConfigSimpleTuple.getV1();
HealthAmountRatioConfig valueConfig = (HealthAmountRatioConfig) singleConfigSimpleTuple.getV2();
@@ -258,7 +277,7 @@ public class HealthCheckZookeeperService extends AbstractHealthCheckService {
if (metricsResult.failed() || !metricsResult.hasData()) {
log.error(
"class=HealthCheckZookeeperService||method=checkSentRate||param={}||config={}||result={}||errMsg=get metrics failed",
"method=checkSentRate||param={}||config={}||result={}||errMsg=get metrics failed, may be collect failed or zk srvr command not in whitelist.",
param, valueConfig, metricsResult
);
return null;

View File

@@ -1,25 +1,31 @@
package com.xiaojukeji.know.streaming.km.core.service.health.checkresult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.BaseClusterHealthConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckAggResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult;
import com.xiaojukeji.know.streaming.km.common.bean.po.health.HealthCheckResultPO;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum;
import java.util.Date;
import java.util.List;
import java.util.Map;
public interface HealthCheckResultService {
int replace(HealthCheckResult healthCheckResult);
List<HealthCheckAggResult> getHealthCheckAggResult(Long clusterPhyId, HealthCheckDimensionEnum dimensionEnum, String resNme);
List<HealthCheckAggResult> getHealthCheckAggResult(Long clusterPhyId, HealthCheckDimensionEnum dimensionEnum);
int deleteByUpdateTimeBeforeInDB(Long clusterPhyId, Date beforeTime);
List<HealthCheckResultPO> listAll();
List<HealthCheckResultPO> listCheckResult(Long clusterPhyId);
List<HealthCheckResultPO> listCheckResult(Long clusterPhyId, Integer resDimension);
List<HealthCheckResultPO> listCheckResult(Long clusterPhyId, Integer resDimension, String resNme);
List<HealthCheckResultPO> getClusterHealthCheckResult(Long clusterPhyId);
List<HealthCheckResultPO> getClusterResourcesHealthCheckResult(Long clusterPhyId, Integer resDimension);
List<HealthCheckResultPO> getResHealthCheckResult(Long clusterPhyId, Integer dimension, String resNme);
List<HealthCheckResultPO> listCheckResultFromCache(Long clusterPhyId, HealthCheckDimensionEnum dimensionEnum);
List<HealthCheckResultPO> listCheckResultFromCache(Long clusterPhyId, HealthCheckDimensionEnum dimensionEnum, String resNme);
Map<String, BaseClusterHealthConfig> getClusterHealthConfig(Long clusterPhyId);
void batchReplace(Long clusterPhyId, List<HealthCheckResult> healthCheckResults);
void batchReplace(Long clusterPhyId, Integer dimension, List<HealthCheckResult> healthCheckResults);
List<HealthCheckResultPO> getConnectorHealthCheckResult(Long clusterPhyId);
List<HealthCheckResultPO> getMirrorMakerHealthCheckResult(Long clusterPhyId);
}

View File

@@ -3,70 +3,115 @@ package com.xiaojukeji.know.streaming.km.core.service.health.checkresult.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.google.common.collect.Lists;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.BaseClusterHealthConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckAggResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult;
import com.xiaojukeji.know.streaming.km.common.bean.po.config.PlatformClusterConfigPO;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectClusterPO;
import com.xiaojukeji.know.streaming.km.common.bean.po.health.HealthCheckResultPO;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.enums.config.ConfigGroupEnum;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckNameEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.persistence.cache.DataBaseDataLocalCache;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.config.PlatformClusterConfigService;
import com.xiaojukeji.know.streaming.km.core.service.health.checkresult.HealthCheckResultService;
import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.ConnectClusterDAO;
import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.ConnectorDAO;
import com.xiaojukeji.know.streaming.km.persistence.mysql.health.HealthCheckResultDAO;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import java.util.*;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum.CONNECTOR;
import static com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum.MIRROR_MAKER;
@Service
public class HealthCheckResultServiceImpl implements HealthCheckResultService {
private static final ILog log = LogFactory.getLog(HealthCheckResultServiceImpl.class);
private static final ILog LOGGER = LogFactory.getLog(HealthCheckResultServiceImpl.class);
@Autowired
private HealthCheckResultDAO healthCheckResultDAO;
@Autowired
private ConnectClusterDAO connectClusterDAO;
@Autowired
private ConnectorDAO connectorDAO;
@Autowired
private PlatformClusterConfigService platformClusterConfigService;
@Override
public int replace(HealthCheckResult healthCheckResult) {
return healthCheckResultDAO.replace(ConvertUtil.obj2Obj(healthCheckResult, HealthCheckResultPO.class));
public List<HealthCheckAggResult> getHealthCheckAggResult(Long clusterPhyId, HealthCheckDimensionEnum dimensionEnum, String resNme) {
List<HealthCheckResultPO> poList = this.listCheckResultFromCache(clusterPhyId, dimensionEnum, resNme);
return this.convert2HealthCheckAggResultList(poList, dimensionEnum.getDimension());
}
@Override
public int deleteByUpdateTimeBeforeInDB(Long clusterPhyId, Date beforeTime) {
LambdaQueryWrapper<HealthCheckResultPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(HealthCheckResultPO::getClusterPhyId, clusterPhyId);
lambdaQueryWrapper.le(HealthCheckResultPO::getUpdateTime, beforeTime);
return healthCheckResultDAO.delete(lambdaQueryWrapper);
public List<HealthCheckAggResult> getHealthCheckAggResult(Long clusterPhyId, HealthCheckDimensionEnum dimensionEnum) {
List<HealthCheckResultPO> poList = this.listCheckResultFromCache(clusterPhyId, dimensionEnum);
return this.convert2HealthCheckAggResultList(poList, dimensionEnum.getDimension());
}
@Override
public List<HealthCheckResultPO> getClusterHealthCheckResult(Long clusterPhyId) {
public List<HealthCheckResultPO> listAll() {
return healthCheckResultDAO.selectList(null);
}
@Override
public List<HealthCheckResultPO> listCheckResult(Long clusterPhyId) {
LambdaQueryWrapper<HealthCheckResultPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(HealthCheckResultPO::getClusterPhyId, clusterPhyId);
return healthCheckResultDAO.selectList(lambdaQueryWrapper);
}
@Override
public List<HealthCheckResultPO> getClusterResourcesHealthCheckResult(Long clusterPhyId, Integer resDimension) {
public List<HealthCheckResultPO> listCheckResult(Long clusterPhyId, Integer resDimension) {
LambdaQueryWrapper<HealthCheckResultPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(HealthCheckResultPO::getDimension, resDimension);
lambdaQueryWrapper.eq(HealthCheckResultPO::getClusterPhyId, clusterPhyId);
return healthCheckResultDAO.selectList(lambdaQueryWrapper);
}
@Override
public List<HealthCheckResultPO> getResHealthCheckResult(Long clusterPhyId, Integer resDimension, String resNme) {
public List<HealthCheckResultPO> listCheckResult(Long clusterPhyId, Integer resDimension, String resNme) {
LambdaQueryWrapper<HealthCheckResultPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(HealthCheckResultPO::getDimension, resDimension);
lambdaQueryWrapper.eq(HealthCheckResultPO::getClusterPhyId, clusterPhyId);
lambdaQueryWrapper.eq(HealthCheckResultPO::getResName, resNme);
return healthCheckResultDAO.selectList(lambdaQueryWrapper);
}
@Override
public List<HealthCheckResultPO> listCheckResultFromCache(Long clusterPhyId, HealthCheckDimensionEnum dimensionEnum) {
Map<String, List<HealthCheckResultPO>> poMap = DataBaseDataLocalCache.getHealthCheckResults(clusterPhyId, dimensionEnum);
if (poMap != null) {
return poMap.values().stream().collect(ArrayList::new, ArrayList::addAll, ArrayList::addAll);
}
return new ArrayList<>();
}
@Override
public List<HealthCheckResultPO> listCheckResultFromCache(Long clusterPhyId, HealthCheckDimensionEnum dimensionEnum, String resNme) {
Map<String, List<HealthCheckResultPO>> poMap = DataBaseDataLocalCache.getHealthCheckResults(clusterPhyId, dimensionEnum);
if (poMap != null) {
return poMap.getOrDefault(resNme, new ArrayList<>());
}
return new ArrayList<>();
}
@Override
public Map<String, BaseClusterHealthConfig> getClusterHealthConfig(Long clusterPhyId) {
Map<String, PlatformClusterConfigPO> configPOMap = platformClusterConfigService.getByClusterAndGroupWithoutDefault(clusterPhyId, ConfigGroupEnum.HEALTH.name());
@@ -76,7 +121,7 @@ public class HealthCheckResultServiceImpl implements HealthCheckResultService {
try {
HealthCheckNameEnum nameEnum = HealthCheckNameEnum.getByName(po.getValueName());
if (HealthCheckNameEnum.UNKNOWN.equals(nameEnum)) {
log.warn("method=getClusterHealthConfig||config={}||errMsg=config name illegal", po);
LOGGER.warn("method=getClusterHealthConfig||config={}||errMsg=config name illegal", po);
continue;
}
@@ -85,22 +130,97 @@ public class HealthCheckResultServiceImpl implements HealthCheckResultService {
healthConfig.setClusterPhyId(clusterPhyId);
configMap.put(po.getValueName(), healthConfig);
} catch (Exception e) {
log.error("method=getClusterHealthConfig||config={}||errMsg=exception!", po, e);
LOGGER.error("method=getClusterHealthConfig||config={}||errMsg=exception!", po, e);
}
}
return configMap;
}
@Override
public void batchReplace(Long clusterPhyId, List<HealthCheckResult> healthCheckResults) {
List<List<HealthCheckResult>> healthCheckResultPartitions = Lists.partition(healthCheckResults, Constant.PER_BATCH_MAX_VALUE);
for (List<HealthCheckResult> checkResultPartition : healthCheckResultPartitions) {
List<HealthCheckResultPO> healthCheckResultPos = ConvertUtil.list2List(checkResultPartition, HealthCheckResultPO.class);
public List<HealthCheckResultPO> getConnectorHealthCheckResult(Long clusterPhyId) {
List<HealthCheckResultPO> resultPOList = new ArrayList<>();
//查找connect集群
LambdaQueryWrapper<ConnectClusterPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectClusterPO::getKafkaClusterPhyId, clusterPhyId);
List<Long> connectClusterIdList = connectClusterDAO.selectList(lambdaQueryWrapper).stream().map(elem -> elem.getId()).collect(Collectors.toList());
if (ValidateUtils.isEmptyList(connectClusterIdList)) {
return resultPOList;
}
LambdaQueryWrapper<HealthCheckResultPO> wrapper = new LambdaQueryWrapper<>();
wrapper.eq(HealthCheckResultPO::getDimension, CONNECTOR.getDimension());
wrapper.in(HealthCheckResultPO::getClusterPhyId, connectClusterIdList);
resultPOList = healthCheckResultDAO.selectList(wrapper);
return resultPOList;
}
@Override
public List<HealthCheckResultPO> getMirrorMakerHealthCheckResult(Long clusterPhyId) {
List<HealthCheckResultPO> resultPOList = new ArrayList<>();
//查找connect集群
LambdaQueryWrapper<ConnectClusterPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectClusterPO::getKafkaClusterPhyId, clusterPhyId);
List<Long> connectClusterIdList = connectClusterDAO.selectList(lambdaQueryWrapper).stream().map(elem -> elem.getId()).collect(Collectors.toList());
if (ValidateUtils.isEmptyList(connectClusterIdList)) {
return resultPOList;
}
LambdaQueryWrapper<HealthCheckResultPO> wrapper = new LambdaQueryWrapper<>();
wrapper.eq(HealthCheckResultPO::getDimension, MIRROR_MAKER.getDimension());
wrapper.in(HealthCheckResultPO::getClusterPhyId, connectClusterIdList);
resultPOList = healthCheckResultDAO.selectList(wrapper);
return resultPOList;
}
@Override
public void batchReplace(Long clusterPhyId, Integer dimension, List<HealthCheckResult> healthCheckResults) {
List<HealthCheckResultPO> inDBList = this.listCheckResult(clusterPhyId, dimension);
// list 转 map
Map<String, HealthCheckResultPO> inDBMap = new HashMap<>(inDBList.size());
inDBList.forEach(elem -> inDBMap.put(elem.getConfigName() + elem.getResName(), elem));
for (HealthCheckResult checkResult: healthCheckResults) {
HealthCheckResultPO inDB = inDBMap.remove(checkResult.getConfigName() + checkResult.getResName());
try {
healthCheckResultDAO.batchReplace(healthCheckResultPos);
} catch (Exception e) {
log.error("method=batchReplace||clusterPhyId={}||checkResultList={}||errMsg=exception!", clusterPhyId, healthCheckResultPos, e);
HealthCheckResultPO newPO = ConvertUtil.obj2Obj(checkResult, HealthCheckResultPO.class);
if (inDB == null) {
healthCheckResultDAO.insert(newPO);
} else {
newPO.setId(inDB.getId());
newPO.setUpdateTime(new Date());
healthCheckResultDAO.updateById(newPO);
}
} catch (DuplicateKeyException dke) {
// ignore
}
}
inDBMap.values().forEach(elem -> {
if (System.currentTimeMillis() - elem.getUpdateTime().getTime() <= 1200000) {
// 20分钟之内的数据不进行删除
return;
}
healthCheckResultDAO.deleteById(elem.getId());
});
}
private List<HealthCheckAggResult> convert2HealthCheckAggResultList(List<HealthCheckResultPO> poList, Integer dimensionCode) {
Map<String /*检查名*/, List<HealthCheckResultPO> /*检查结果列表*/> groupByCheckNamePOMap = new HashMap<>();
for (HealthCheckResultPO po: poList) {
groupByCheckNamePOMap.putIfAbsent(po.getConfigName(), new ArrayList<>());
groupByCheckNamePOMap.get(po.getConfigName()).add(po);
}
List<HealthCheckAggResult> stateList = new ArrayList<>();
for (HealthCheckNameEnum nameEnum: HealthCheckNameEnum.getByDimensionCode(dimensionCode)) {
stateList.add(new HealthCheckAggResult(nameEnum, groupByCheckNamePOMap.getOrDefault(nameEnum.getConfigName(), new ArrayList<>())));
}
return stateList;
}
}

View File

@@ -2,49 +2,28 @@ package com.xiaojukeji.know.streaming.km.core.service.health.state;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthScoreResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.*;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectorMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.mm2.MirrorMakerMetrics;
import java.util.List;
public interface HealthStateService {
/**
* 集群健康指标
* 健康指标
*/
ClusterMetrics calClusterHealthMetrics(Long clusterPhyId);
/**
* 获取Broker健康指标
*/
BrokerMetrics calBrokerHealthMetrics(Long clusterPhyId, Integer brokerId);
/**
* 获取Topic健康指标
*/
TopicMetrics calTopicHealthMetrics(Long clusterPhyId, String topicName);
/**
* 获取Group健康指标
*/
GroupMetrics calGroupHealthMetrics(Long clusterPhyId, String groupName);
/**
* 获取Zookeeper健康指标
*/
ZookeeperMetrics calZookeeperHealthMetrics(Long clusterPhyId);
ConnectorMetrics calConnectorHealthMetrics(Long connectClusterId, String connectorName);
MirrorMakerMetrics calMirrorMakerHealthMetrics(Long connectClusterId, String mirrorMakerName);
/**
* 获取集群健康检查结果
*/
List<HealthScoreResult> getClusterHealthResult(Long clusterPhyId);
/**
* 获取集群某个维度健康检查结果
*/
List<HealthScoreResult> getDimensionHealthResult(Long clusterPhyId, HealthCheckDimensionEnum dimensionEnum);
/**
* 获取集群某个资源的健康检查结果
*/
List<HealthScoreResult> getResHealthResult(Long clusterPhyId, Integer dimension, String resNme);
List<HealthScoreResult> getAllDimensionHealthResult(Long clusterPhyId);
List<HealthScoreResult> getDimensionHealthResult(Long clusterPhyId, List<Integer> dimensionCodeList);
List<HealthScoreResult> getResHealthResult(Long clusterPhyId, Long clusterId, Integer dimension, String resNme);
}

View File

@@ -1,36 +1,46 @@
package com.xiaojukeji.know.streaming.km.core.service.health.state.impl;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.BaseClusterHealthConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckAggResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthScoreResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafkacontroller.KafkaController;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.*;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectorMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.mm2.MirrorMakerMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.po.health.HealthCheckResultPO;
import com.xiaojukeji.know.streaming.km.common.component.SpringTool;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckNameEnum;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthStateEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.health.checker.AbstractHealthCheckService;
import com.xiaojukeji.know.streaming.km.core.service.health.checkresult.HealthCheckResultService;
import com.xiaojukeji.know.streaming.km.core.service.health.state.HealthStateService;
import com.xiaojukeji.know.streaming.km.core.service.kafkacontroller.KafkaControllerService;
import com.xiaojukeji.know.streaming.km.core.service.zookeeper.ZookeeperService;
import org.apache.commons.collections.CollectionUtils;
import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache;
import com.xiaojukeji.know.streaming.km.persistence.connect.cache.LoadedConnectClusterCache;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.*;
import java.util.stream.Collectors;
import java.util.List;
import java.util.Map;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.BrokerMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.BrokerMetricVersionItems.BROKER_METRIC_HEALTH_STATE;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.ClusterMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.GroupMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.GroupMetricVersionItems.GROUP_METRIC_HEALTH_CHECK_TOTAL;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.TopicMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.TopicMetricVersionItems.TOPIC_METRIC_HEALTH_CHECK_TOTAL;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.ZookeeperMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum.*;
import static com.xiaojukeji.know.streaming.km.common.enums.health.HealthStateEnum.DEAD;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect.ConnectorMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect.MirrorMakerMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.BrokerMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.ClusterMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.GroupMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.TopicMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.ZookeeperMetricVersionItems.*;
@Service
@@ -44,12 +54,18 @@ public class HealthStateServiceImpl implements HealthStateService {
@Autowired
private BrokerService brokerService;
@Autowired
private ConnectClusterService connectClusterService;
@Autowired
private KafkaControllerService kafkaControllerService;
@Override
public ClusterMetrics calClusterHealthMetrics(Long clusterPhyId) {
ClusterMetrics metrics = new ClusterMetrics(clusterPhyId);
// 集群维度指标
List<HealthCheckAggResult> resultList = this.getDimensionHealthCheckAggResult(clusterPhyId, HealthCheckDimensionEnum.CLUSTER);
List<HealthCheckAggResult> resultList = healthCheckResultService.getHealthCheckAggResult(clusterPhyId, HealthCheckDimensionEnum.CLUSTER);
if (ValidateUtils.isEmptyList(resultList)) {
metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_PASSED_CLUSTER, 0.0f);
metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_CLUSTER, 0.0f);
@@ -65,6 +81,8 @@ public class HealthStateServiceImpl implements HealthStateService {
metrics.putMetric(this.calClusterTopicsHealthMetrics(clusterPhyId).getMetrics());
metrics.putMetric(this.calClusterGroupsHealthMetrics(clusterPhyId).getMetrics());
metrics.putMetric(this.calZookeeperHealthMetrics(clusterPhyId).getMetrics());
metrics.putMetric(this.calClusterConnectsHealthMetrics(clusterPhyId).getMetrics());
metrics.putMetric(this.calClusterMirrorMakersHealthMetrics(clusterPhyId).getMetrics());
// 统计最终结果
Float passed = 0.0f;
@@ -73,6 +91,8 @@ public class HealthStateServiceImpl implements HealthStateService {
passed += metrics.getMetric(CLUSTER_METRIC_HEALTH_CHECK_PASSED_BROKERS);
passed += metrics.getMetric(CLUSTER_METRIC_HEALTH_CHECK_PASSED_GROUPS);
passed += metrics.getMetric(CLUSTER_METRIC_HEALTH_CHECK_PASSED_CLUSTER);
passed += metrics.getMetric(CLUSTER_METRIC_HEALTH_CHECK_PASSED_CONNECTOR);
passed += metrics.getMetric(CLUSTER_METRIC_HEALTH_CHECK_PASSED_MIRROR_MAKER);
Float total = 0.0f;
total += metrics.getMetric(ZOOKEEPER_METRIC_HEALTH_CHECK_TOTAL);
@@ -80,6 +100,8 @@ public class HealthStateServiceImpl implements HealthStateService {
total += metrics.getMetric(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_BROKERS);
total += metrics.getMetric(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_GROUPS);
total += metrics.getMetric(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_CLUSTER);
total += metrics.getMetric(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_CONNECTOR);
total += metrics.getMetric(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_MIRROR_MAKER);
// 状态
Float state = 0.0f;
@@ -88,6 +110,12 @@ public class HealthStateServiceImpl implements HealthStateService {
state = Math.max(state, metrics.getMetric(CLUSTER_METRIC_HEALTH_STATE_BROKERS));
state = Math.max(state, metrics.getMetric(CLUSTER_METRIC_HEALTH_STATE_GROUPS));
state = Math.max(state, metrics.getMetric(CLUSTER_METRIC_HEALTH_STATE_CLUSTER));
state = Math.max(state, metrics.getMetric(CLUSTER_METRIC_HEALTH_STATE_CONNECTOR));
state = Math.max(state, metrics.getMetric(CLUSTER_METRIC_HEALTH_STATE_MIRROR_MAKER));
if (isKafkaClusterDown(clusterPhyId)) {
state = Float.valueOf(HealthStateEnum.DEAD.getDimension());
}
metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_PASSED, passed);
metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_TOTAL, total);
@@ -98,16 +126,16 @@ public class HealthStateServiceImpl implements HealthStateService {
@Override
public BrokerMetrics calBrokerHealthMetrics(Long clusterPhyId, Integer brokerId) {
List<HealthScoreResult> healthScoreResultList = this.getResHealthResult(clusterPhyId, HealthCheckDimensionEnum.BROKER.getDimension(), String.valueOf(brokerId));
List<HealthCheckAggResult> aggResultList = healthCheckResultService.getHealthCheckAggResult(clusterPhyId, HealthCheckDimensionEnum.BROKER, String.valueOf(brokerId));
BrokerMetrics metrics = new BrokerMetrics(clusterPhyId, brokerId);
if (ValidateUtils.isEmptyList(healthScoreResultList)) {
if (ValidateUtils.isEmptyList(aggResultList)) {
metrics.getMetrics().put(BROKER_METRIC_HEALTH_STATE, (float)HealthStateEnum.GOOD.getDimension());
metrics.getMetrics().put(BROKER_METRIC_HEALTH_CHECK_PASSED, 0.0f);
metrics.getMetrics().put(BROKER_METRIC_HEALTH_CHECK_TOTAL, 0.0f);
} else {
metrics.getMetrics().put(BROKER_METRIC_HEALTH_CHECK_PASSED, getHealthCheckResultPassed(healthScoreResultList));
metrics.getMetrics().put(BROKER_METRIC_HEALTH_CHECK_TOTAL, Float.valueOf(healthScoreResultList.size()));
metrics.getMetrics().put(BROKER_METRIC_HEALTH_CHECK_PASSED, this.getHealthCheckPassed(aggResultList));
metrics.getMetrics().put(BROKER_METRIC_HEALTH_CHECK_TOTAL, (float)aggResultList.size());
// 计算健康状态
Broker broker = brokerService.getBrokerFromCacheFirst(clusterPhyId, brokerId);
@@ -117,7 +145,7 @@ public class HealthStateServiceImpl implements HealthStateService {
} else if (!broker.alive()) {
metrics.getMetrics().put(BROKER_METRIC_HEALTH_STATE, (float)HealthStateEnum.DEAD.getDimension());
} else {
metrics.getMetrics().put(BROKER_METRIC_HEALTH_STATE, (float)this.calHealthScoreResultState(healthScoreResultList).getDimension());
metrics.getMetrics().put(BROKER_METRIC_HEALTH_STATE, (float)this.calHealthState(aggResultList).getDimension());
}
}
@@ -126,17 +154,17 @@ public class HealthStateServiceImpl implements HealthStateService {
@Override
public TopicMetrics calTopicHealthMetrics(Long clusterPhyId, String topicName) {
List<HealthScoreResult> healthScoreResultList = this.getResHealthResult(clusterPhyId, HealthCheckDimensionEnum.TOPIC.getDimension(), topicName);
List<HealthCheckAggResult> aggResultList = healthCheckResultService.getHealthCheckAggResult(clusterPhyId, HealthCheckDimensionEnum.TOPIC, topicName);
TopicMetrics metrics = new TopicMetrics(topicName, clusterPhyId,true);
if (ValidateUtils.isEmptyList(healthScoreResultList)) {
if (ValidateUtils.isEmptyList(aggResultList)) {
metrics.getMetrics().put(TOPIC_METRIC_HEALTH_STATE, (float)HealthStateEnum.GOOD.getDimension());
metrics.getMetrics().put(TOPIC_METRIC_HEALTH_CHECK_PASSED, 0.0f);
metrics.getMetrics().put(TOPIC_METRIC_HEALTH_CHECK_TOTAL, 0.0f);
} else {
metrics.getMetrics().put(TOPIC_METRIC_HEALTH_STATE, (float)this.calHealthScoreResultState(healthScoreResultList).getDimension());
metrics.getMetrics().put(TOPIC_METRIC_HEALTH_CHECK_PASSED, this.getHealthCheckResultPassed(healthScoreResultList));
metrics.getMetrics().put(TOPIC_METRIC_HEALTH_CHECK_TOTAL, Float.valueOf(healthScoreResultList.size()));
metrics.getMetrics().put(TOPIC_METRIC_HEALTH_STATE, (float)this.calHealthState(aggResultList).getDimension());
metrics.getMetrics().put(TOPIC_METRIC_HEALTH_CHECK_PASSED, this.getHealthCheckPassed(aggResultList));
metrics.getMetrics().put(TOPIC_METRIC_HEALTH_CHECK_TOTAL, (float)aggResultList.size());
}
return metrics;
@@ -144,17 +172,17 @@ public class HealthStateServiceImpl implements HealthStateService {
@Override
public GroupMetrics calGroupHealthMetrics(Long clusterPhyId, String groupName) {
List<HealthScoreResult> healthScoreResultList = this.getResHealthResult(clusterPhyId, HealthCheckDimensionEnum.GROUP.getDimension(), groupName);
List<HealthCheckAggResult> aggResultList = healthCheckResultService.getHealthCheckAggResult(clusterPhyId, HealthCheckDimensionEnum.GROUP, groupName);
GroupMetrics metrics = new GroupMetrics(clusterPhyId, groupName, true);
if (ValidateUtils.isEmptyList(healthScoreResultList)) {
if (ValidateUtils.isEmptyList(aggResultList)) {
metrics.getMetrics().put(GROUP_METRIC_HEALTH_STATE, (float)HealthStateEnum.GOOD.getDimension());
metrics.getMetrics().put(GROUP_METRIC_HEALTH_CHECK_PASSED, 0.0f);
metrics.getMetrics().put(GROUP_METRIC_HEALTH_CHECK_TOTAL, 0.0f);
} else {
metrics.getMetrics().put(GROUP_METRIC_HEALTH_STATE, (float)this.calHealthScoreResultState(healthScoreResultList).getDimension());
metrics.getMetrics().put(GROUP_METRIC_HEALTH_CHECK_PASSED, getHealthCheckResultPassed(healthScoreResultList));
metrics.getMetrics().put(GROUP_METRIC_HEALTH_CHECK_TOTAL, Float.valueOf(healthScoreResultList.size()));
metrics.getMetrics().put(GROUP_METRIC_HEALTH_STATE, (float)this.calHealthState(aggResultList).getDimension());
metrics.getMetrics().put(GROUP_METRIC_HEALTH_CHECK_PASSED, this.getHealthCheckPassed(aggResultList));
metrics.getMetrics().put(GROUP_METRIC_HEALTH_CHECK_TOTAL, (float)aggResultList.size());
}
return metrics;
@@ -162,15 +190,15 @@ public class HealthStateServiceImpl implements HealthStateService {
@Override
public ZookeeperMetrics calZookeeperHealthMetrics(Long clusterPhyId) {
List<HealthCheckAggResult> resultList = this.getDimensionHealthCheckAggResult(clusterPhyId, HealthCheckDimensionEnum.ZOOKEEPER);
List<HealthCheckAggResult> aggResultList = healthCheckResultService.getHealthCheckAggResult(clusterPhyId, HealthCheckDimensionEnum.ZOOKEEPER);
ZookeeperMetrics metrics = new ZookeeperMetrics(clusterPhyId);
if (ValidateUtils.isEmptyList(resultList)) {
if (ValidateUtils.isEmptyList(aggResultList)) {
metrics.getMetrics().put(ZOOKEEPER_METRIC_HEALTH_CHECK_PASSED, 0.0f);
metrics.getMetrics().put(ZOOKEEPER_METRIC_HEALTH_CHECK_TOTAL, 0.0f);
} else {
metrics.getMetrics().put(ZOOKEEPER_METRIC_HEALTH_CHECK_PASSED, this.getHealthCheckPassed(resultList));
metrics.getMetrics().put(ZOOKEEPER_METRIC_HEALTH_CHECK_TOTAL, (float)resultList.size());
metrics.getMetrics().put(ZOOKEEPER_METRIC_HEALTH_CHECK_PASSED, this.getHealthCheckPassed(aggResultList));
metrics.getMetrics().put(ZOOKEEPER_METRIC_HEALTH_CHECK_TOTAL, (float)aggResultList.size());
}
if (zookeeperService.allServerDown(clusterPhyId)) {
@@ -186,88 +214,104 @@ public class HealthStateServiceImpl implements HealthStateService {
}
// 服务未挂时,依据检查结果计算状态
metrics.getMetrics().put(ZOOKEEPER_METRIC_HEALTH_STATE, (float)this.calHealthState(resultList).getDimension());
metrics.getMetrics().put(ZOOKEEPER_METRIC_HEALTH_STATE, (float)this.calHealthState(aggResultList).getDimension());
return metrics;
}
@Override
public List<HealthScoreResult> getClusterHealthResult(Long clusterPhyId) {
List<HealthCheckResultPO> poList = healthCheckResultService.getClusterHealthCheckResult(clusterPhyId);
public ConnectorMetrics calConnectorHealthMetrics(Long connectClusterId, String connectorName) {
ConnectCluster connectCluster = LoadedConnectClusterCache.getByPhyId(connectClusterId);
ConnectorMetrics metrics = new ConnectorMetrics(connectClusterId, connectorName);
// <检查项,<检查结果>>
Map<String, List<HealthCheckResultPO>> checkResultMap = new HashMap<>();
for (HealthCheckResultPO po: poList) {
checkResultMap.putIfAbsent(po.getConfigName(), new ArrayList<>());
checkResultMap.get(po.getConfigName()).add(po);
// 找不到connect集群
if (connectCluster == null) {
metrics.putMetric(CONNECTOR_METRIC_HEALTH_STATE, (float) HealthStateEnum.DEAD.getDimension());
return metrics;
}
Map<String, BaseClusterHealthConfig> configMap = healthCheckResultService.getClusterHealthConfig(clusterPhyId);
List<HealthCheckAggResult> resultList = healthCheckResultService.getHealthCheckAggResult(connectClusterId, HealthCheckDimensionEnum.CONNECTOR, connectorName);
List<HealthScoreResult> healthScoreResultList = new ArrayList<>();
for (HealthCheckNameEnum nameEnum: HealthCheckNameEnum.values()) {
BaseClusterHealthConfig baseConfig = configMap.get(nameEnum.getConfigName());
if (baseConfig == null) {
continue;
}
healthScoreResultList.add(new HealthScoreResult(
nameEnum,
baseConfig,
checkResultMap.getOrDefault(nameEnum.getConfigName(), new ArrayList<>()))
);
if (ValidateUtils.isEmptyList(resultList)) {
metrics.getMetrics().put(CONNECTOR_METRIC_HEALTH_CHECK_PASSED, 0.0f);
metrics.getMetrics().put(CONNECTOR_METRIC_HEALTH_CHECK_TOTAL, 0.0f);
} else {
metrics.getMetrics().put(CONNECTOR_METRIC_HEALTH_CHECK_PASSED, this.getHealthCheckPassed(resultList));
metrics.getMetrics().put(CONNECTOR_METRIC_HEALTH_CHECK_TOTAL, (float) resultList.size());
}
return healthScoreResultList;
metrics.putMetric(CONNECTOR_METRIC_HEALTH_STATE, (float) this.calHealthState(resultList).getDimension());
return metrics;
}
@Override
public List<HealthScoreResult> getDimensionHealthResult(Long clusterPhyId, HealthCheckDimensionEnum dimensionEnum) {
List<HealthCheckResultPO> poList = healthCheckResultService.getClusterResourcesHealthCheckResult(clusterPhyId, dimensionEnum.getDimension());
public MirrorMakerMetrics calMirrorMakerHealthMetrics(Long connectClusterId, String mirrorMakerName) {
ConnectCluster connectCluster = LoadedConnectClusterCache.getByPhyId(connectClusterId);
MirrorMakerMetrics metrics = new MirrorMakerMetrics(connectClusterId, mirrorMakerName);
// <检查项,<通过的数量,不通过的数量>>
Map<String, List<HealthCheckResultPO>> checkResultMap = new HashMap<>();
for (HealthCheckResultPO po: poList) {
checkResultMap.putIfAbsent(po.getConfigName(), new ArrayList<>());
checkResultMap.get(po.getConfigName()).add(po);
if (connectCluster == null) {
metrics.putMetric(MIRROR_MAKER_METRIC_HEALTH_STATE, (float) HealthStateEnum.DEAD.getDimension());
return metrics;
}
Map<String, BaseClusterHealthConfig> configMap = healthCheckResultService.getClusterHealthConfig(clusterPhyId);
List<HealthCheckAggResult> resultList = healthCheckResultService.getHealthCheckAggResult(connectClusterId, HealthCheckDimensionEnum.MIRROR_MAKER, mirrorMakerName);
List<HealthScoreResult> healthScoreResultList = new ArrayList<>();
for (HealthCheckNameEnum nameEnum: HealthCheckNameEnum.getByDimension(dimensionEnum)) {
BaseClusterHealthConfig baseConfig = configMap.get(nameEnum.getConfigName());
if (baseConfig == null) {
if (ValidateUtils.isEmptyList(resultList)) {
metrics.getMetrics().put(MIRROR_MAKER_METRIC_HEALTH_CHECK_PASSED, 0.0f);
metrics.getMetrics().put(MIRROR_MAKER_METRIC_HEALTH_CHECK_TOTAL, 0.0f);
} else {
metrics.getMetrics().put(MIRROR_MAKER_METRIC_HEALTH_CHECK_PASSED, this.getHealthCheckPassed(resultList));
metrics.getMetrics().put(MIRROR_MAKER_METRIC_HEALTH_CHECK_TOTAL, (float) resultList.size());
}
metrics.putMetric(MIRROR_MAKER_METRIC_HEALTH_STATE, (float) this.calHealthState(resultList).getDimension());
return metrics;
}
@Override
public List<HealthScoreResult> getAllDimensionHealthResult(Long clusterPhyId) {
List<Integer> supportedDimensionCodeList = new ArrayList<>();
// 获取支持的code
for (AbstractHealthCheckService service: SpringTool.getBeansOfType(AbstractHealthCheckService.class).values()) {
Integer dimensionCode = service.getDimensionCodeIfSupport(clusterPhyId);
if (dimensionCode == null) {
continue;
}
healthScoreResultList.add(new HealthScoreResult(nameEnum, baseConfig, checkResultMap.getOrDefault(nameEnum.getConfigName(), new ArrayList<>())));
supportedDimensionCodeList.add(dimensionCode);
}
return healthScoreResultList;
return this.getDimensionHealthResult(clusterPhyId, supportedDimensionCodeList);
}
@Override
public List<HealthScoreResult> getResHealthResult(Long clusterPhyId, Integer dimension, String resNme) {
List<HealthCheckResultPO> poList = healthCheckResultService.getResHealthCheckResult(clusterPhyId, dimension, resNme);
public List<HealthScoreResult> getDimensionHealthResult(Long clusterPhyId, List<Integer> dimensionCodeList) {
//查找健康巡查结果
List<HealthCheckResultPO> poList = new ArrayList<>();
for (Integer dimensionCode : dimensionCodeList) {
if (dimensionCode.equals(HealthCheckDimensionEnum.CONNECTOR.getDimension())) {
poList.addAll(healthCheckResultService.getConnectorHealthCheckResult(clusterPhyId));
} else if (dimensionCode.equals(HealthCheckDimensionEnum.MIRROR_MAKER.getDimension())) {
poList.addAll(healthCheckResultService.getMirrorMakerHealthCheckResult(clusterPhyId));
} else {
poList.addAll(healthCheckResultService.listCheckResult(clusterPhyId, dimensionCode));
}
}
return this.getResHealthResult(clusterPhyId, dimensionCodeList, poList);
}
@Override
public List<HealthScoreResult> getResHealthResult(Long clusterPhyId, Long clusterId, Integer dimension, String resNme) {
List<HealthCheckResultPO> poList = healthCheckResultService.listCheckResult(clusterId, dimension, resNme);
Map<String, List<HealthCheckResultPO>> checkResultMap = new HashMap<>();
for (HealthCheckResultPO po: poList) {
checkResultMap.putIfAbsent(po.getConfigName(), new ArrayList<>());
checkResultMap.get(po.getConfigName()).add(po);
}
Map<String, BaseClusterHealthConfig> configMap = healthCheckResultService.getClusterHealthConfig(clusterPhyId);
List<HealthScoreResult> healthScoreResultList = new ArrayList<>();
for (HealthCheckNameEnum nameEnum: HealthCheckNameEnum.getByDimensionCode(dimension)) {
BaseClusterHealthConfig baseConfig = configMap.get(nameEnum.getConfigName());
if (baseConfig == null) {
continue;
}
healthScoreResultList.add(new HealthScoreResult(nameEnum, baseConfig, checkResultMap.getOrDefault(nameEnum.getConfigName(), new ArrayList<>())));
}
return healthScoreResultList;
return this.convert2HealthScoreResultList(clusterPhyId, poList, dimension);
}
@@ -275,7 +319,7 @@ public class HealthStateServiceImpl implements HealthStateService {
private ClusterMetrics calClusterTopicsHealthMetrics(Long clusterPhyId) {
List<HealthCheckAggResult> resultList = this.getDimensionHealthCheckAggResult(clusterPhyId, HealthCheckDimensionEnum.TOPIC);
List<HealthCheckAggResult> resultList = healthCheckResultService.getHealthCheckAggResult(clusterPhyId, HealthCheckDimensionEnum.TOPIC);
ClusterMetrics metrics = new ClusterMetrics(clusterPhyId);
if (ValidateUtils.isEmptyList(resultList)) {
@@ -292,7 +336,7 @@ public class HealthStateServiceImpl implements HealthStateService {
}
private ClusterMetrics calClusterGroupsHealthMetrics(Long clusterPhyId) {
List<HealthCheckAggResult> resultList = this.getDimensionHealthCheckAggResult(clusterPhyId, HealthCheckDimensionEnum.GROUP);
List<HealthCheckAggResult> resultList = healthCheckResultService.getHealthCheckAggResult(clusterPhyId, HealthCheckDimensionEnum.GROUP);
ClusterMetrics metrics = new ClusterMetrics(clusterPhyId);
if (ValidateUtils.isEmptyList(resultList)) {
@@ -309,7 +353,7 @@ public class HealthStateServiceImpl implements HealthStateService {
}
private ClusterMetrics calClusterBrokersHealthMetrics(Long clusterPhyId) {
List<HealthCheckAggResult> resultList = this.getDimensionHealthCheckAggResult(clusterPhyId, HealthCheckDimensionEnum.BROKER);
List<HealthCheckAggResult> resultList = healthCheckResultService.getHealthCheckAggResult(clusterPhyId, HealthCheckDimensionEnum.BROKER);
ClusterMetrics metrics = new ClusterMetrics(clusterPhyId);
if (ValidateUtils.isEmptyList(resultList)) {
@@ -337,29 +381,148 @@ public class HealthStateServiceImpl implements HealthStateService {
return metrics;
}
private List<HealthCheckAggResult> getDimensionHealthCheckAggResult(Long clusterPhyId, HealthCheckDimensionEnum dimensionEnum) {
List<HealthCheckResultPO> poList = healthCheckResultService.getClusterResourcesHealthCheckResult(clusterPhyId, dimensionEnum.getDimension());
private ClusterMetrics calClusterConnectsHealthMetrics(Long clusterPhyId) {
//获取健康巡检结果
List<HealthCheckResultPO> connectHealthCheckResult = healthCheckResultService.getConnectorHealthCheckResult(clusterPhyId);
Map<String /*检查名*/, List<HealthCheckResultPO> /*检查结果列表*/> groupByCheckNamePOMap = new HashMap<>();
connectHealthCheckResult.addAll(healthCheckResultService.listCheckResult(clusterPhyId, CONNECT_CLUSTER.getDimension()));
List<Integer> dimensionCodeList = Arrays.asList(CONNECTOR.getDimension(), CONNECT_CLUSTER.getDimension());
List<HealthCheckAggResult> resultList = this.getDimensionHealthCheckAggResult(connectHealthCheckResult, dimensionCodeList);
ClusterMetrics metrics = new ClusterMetrics(clusterPhyId);
if (ValidateUtils.isEmptyList(resultList)) {
metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_PASSED_CONNECTOR, 0.0f);
metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_CONNECTOR, 0.0f);
} else {
metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_PASSED_CONNECTOR, this.getHealthCheckPassed(resultList));
metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_CONNECTOR, (float) resultList.size());
}
// 先根据connect集群状态判断
if (connectClusterService.existConnectClusterDown(clusterPhyId)) {
metrics.putMetric(CLUSTER_METRIC_HEALTH_STATE_CONNECTOR, (float) HealthStateEnum.POOR.getDimension());
return metrics;
}
metrics.putMetric(CLUSTER_METRIC_HEALTH_STATE_CONNECTOR, (float) this.calHealthState(resultList).getDimension());
return metrics;
}
private ClusterMetrics calClusterMirrorMakersHealthMetrics(Long clusterPhyId){
List<HealthCheckResultPO> mirrorMakerHealthCheckResult = healthCheckResultService.getMirrorMakerHealthCheckResult(clusterPhyId);
List<HealthCheckAggResult> resultList = this.getDimensionHealthCheckAggResult(mirrorMakerHealthCheckResult, Arrays.asList(MIRROR_MAKER.getDimension()));
ClusterMetrics metrics = new ClusterMetrics(clusterPhyId);
if (ValidateUtils.isEmptyList(resultList)) {
metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_PASSED_MIRROR_MAKER, 0.0f);
metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_MIRROR_MAKER, 0.0f);
} else {
metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_PASSED_MIRROR_MAKER, this.getHealthCheckPassed(resultList));
metrics.getMetrics().put(CLUSTER_METRIC_HEALTH_CHECK_TOTAL_MIRROR_MAKER, (float) resultList.size());
}
metrics.putMetric(CLUSTER_METRIC_HEALTH_STATE_MIRROR_MAKER, (float) this.calHealthState(resultList).getDimension());
return metrics;
}
/**************************************************** 聚合数据 ****************************************************/
public List<HealthScoreResult> convert2HealthScoreResultList(Long clusterPhyId, List<HealthCheckResultPO> poList, Integer dimensionCode) {
Map<String, List<HealthCheckResultPO>> checkResultMap = new HashMap<>();
for (HealthCheckResultPO po: poList) {
groupByCheckNamePOMap.putIfAbsent(po.getConfigName(), new ArrayList<>());
groupByCheckNamePOMap.get(po.getConfigName()).add(po);
checkResultMap.putIfAbsent(po.getConfigName(), new ArrayList<>());
checkResultMap.get(po.getConfigName()).add(po);
}
Map<String, BaseClusterHealthConfig> configMap = healthCheckResultService.getClusterHealthConfig(clusterPhyId);
List<HealthCheckNameEnum> nameEnums =
dimensionCode == null?
Arrays.stream(HealthCheckNameEnum.values()).collect(Collectors.toList()): HealthCheckNameEnum.getByDimensionCode(dimensionCode);
List<HealthScoreResult> resultList = new ArrayList<>();
for (HealthCheckNameEnum nameEnum: nameEnums) {
BaseClusterHealthConfig baseConfig = configMap.get(nameEnum.getConfigName());
if (baseConfig == null) {
continue;
}
resultList.add(new HealthScoreResult(nameEnum, baseConfig, checkResultMap.getOrDefault(nameEnum.getConfigName(), new ArrayList<>())));
}
return resultList;
}
/**************************************************** 计算指标 ****************************************************/
private List<HealthCheckAggResult> getDimensionHealthCheckAggResult(List<HealthCheckResultPO> poList, List<Integer> dimensionCodeList) {
Map<String /*检查名*/, List<HealthCheckResultPO> /*检查结果列表*/> checkResultMap = new HashMap<>();
for (HealthCheckResultPO po : poList) {
checkResultMap.putIfAbsent(po.getConfigName(), new ArrayList<>());
checkResultMap.get(po.getConfigName()).add(po);
}
List<HealthCheckAggResult> stateList = new ArrayList<>();
for (HealthCheckNameEnum nameEnum: HealthCheckNameEnum.getByDimension(dimensionEnum)) {
stateList.add(new HealthCheckAggResult(nameEnum, groupByCheckNamePOMap.getOrDefault(nameEnum.getConfigName(), new ArrayList<>())));
}
for (Integer dimensionCode : dimensionCodeList) {
HealthCheckDimensionEnum dimensionEnum = HealthCheckDimensionEnum.getByCode(dimensionCode);
if (dimensionEnum.equals(UNKNOWN)) {
continue;
}
for (HealthCheckNameEnum nameEnum : HealthCheckNameEnum.getByDimension(dimensionEnum)) {
stateList.add(new HealthCheckAggResult(nameEnum, checkResultMap.getOrDefault(nameEnum.getConfigName(), new ArrayList<>())));
}
}
return stateList;
}
private float getHealthCheckPassed(List<HealthCheckAggResult> resultList){
if(ValidateUtils.isEmptyList(resultList)) {
private List<HealthScoreResult> getResHealthResult(Long clusterPhyId, List<Integer> dimensionCodeList, List<HealthCheckResultPO> poList) {
Map<String /*检查名*/, List<HealthCheckResultPO> /*检查结果列表*/> checkResultMap = new HashMap<>();
for (HealthCheckResultPO po : poList) {
checkResultMap.putIfAbsent(po.getConfigName(), new ArrayList<>());
checkResultMap.get(po.getConfigName()).add(po);
}
Map<String, BaseClusterHealthConfig> configMap = healthCheckResultService.getClusterHealthConfig(clusterPhyId);
List<HealthScoreResult> healthScoreResultList = new ArrayList<>();
for (Integer dimensionCode : dimensionCodeList) {
HealthCheckDimensionEnum dimensionEnum = HealthCheckDimensionEnum.getByCode(dimensionCode);
//该维度不存在,则跳过
if (dimensionEnum.equals(HealthCheckDimensionEnum.UNKNOWN)){
continue;
}
for (HealthCheckNameEnum nameEnum : HealthCheckNameEnum.getByDimension(dimensionEnum)) {
BaseClusterHealthConfig baseConfig = configMap.get(nameEnum.getConfigName());
if (baseConfig == null) {
continue;
}
healthScoreResultList.add(new HealthScoreResult(nameEnum, baseConfig, checkResultMap.getOrDefault(nameEnum.getConfigName(), new ArrayList<>())));
}
}
return healthScoreResultList;
}
private float getHealthCheckPassed(List<HealthCheckAggResult> aggResultList){
if(ValidateUtils.isEmptyList(aggResultList)) {
return 0f;
}
return Float.valueOf(resultList.stream().filter(elem -> elem.getPassed()).count());
return Float.valueOf(aggResultList.stream().filter(elem -> elem.getPassed()).count());
}
private HealthStateEnum calHealthState(List<HealthCheckAggResult> resultList) {
@@ -381,28 +544,15 @@ public class HealthStateServiceImpl implements HealthStateService {
return existNotPassed? HealthStateEnum.MEDIUM: HealthStateEnum.GOOD;
}
private float getHealthCheckResultPassed(List<HealthScoreResult> healthScoreResultList){
if(CollectionUtils.isEmpty(healthScoreResultList)){return 0f;}
return Float.valueOf(healthScoreResultList.stream().filter(elem -> elem.getPassed()).count());
}
private HealthStateEnum calHealthScoreResultState(List<HealthScoreResult> resultList) {
if(ValidateUtils.isEmptyList(resultList)) {
return HealthStateEnum.GOOD;
private boolean isKafkaClusterDown(Long clusterPhyId) {
ClusterPhy clusterPhy = LoadedClusterPhyCache.getByPhyId(clusterPhyId);
KafkaController kafkaController = kafkaControllerService.getKafkaControllerFromDB(clusterPhyId);
if (kafkaController != null && !kafkaController.alive()) {
return true;
} else if ((System.currentTimeMillis() - clusterPhy.getCreateTime().getTime() >= 5 * 60 * 1000) && kafkaController == null) {
// 集群接入时间是在近5分钟内同时kafkaController信息不存在则设置为down
return true;
}
boolean existNotPassed = false;
for (HealthScoreResult aggResult: resultList) {
if (aggResult.getCheckNameEnum().isAvailableChecker() && !aggResult.getPassed()) {
return HealthStateEnum.POOR;
}
if (!aggResult.getPassed()) {
existNotPassed = true;
}
}
return existNotPassed? HealthStateEnum.MEDIUM: HealthStateEnum.GOOD;
return false;
}
}

View File

@@ -21,7 +21,7 @@ import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.core.service.job.JobHandler;
import com.xiaojukeji.know.streaming.km.core.service.reassign.ReassignJobService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicMetricService;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.TopicMetricVersionItems;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.TopicMetricVersionItems;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.*;

View File

@@ -46,7 +46,7 @@ import java.util.*;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.BrokerMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.BrokerMetricVersionItems.*;
@Service
public class JobServiceImpl implements JobService {

View File

@@ -12,7 +12,7 @@ import java.util.Map;
public interface KafkaControllerService {
Result<KafkaController> getControllerFromKafka(ClusterPhy clusterPhy);
int insertAndIgnoreDuplicateException(KafkaController kafkaController);
int insertAndIgnoreDuplicateException(KafkaController kafkaController, String controllerHost, String controllerRack);
int setNoKafkaController(Long clusterPhyId, Long triggerTime);

View File

@@ -5,7 +5,6 @@ import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafkacontroller.KafkaController;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
@@ -15,7 +14,6 @@ import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
import com.xiaojukeji.know.streaming.km.common.enums.cluster.ClusterRunStateEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.kafkacontroller.KafkaControllerService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.mysql.kafkacontroller.KafkaControllerDAO;
@@ -32,9 +30,6 @@ import java.util.*;
public class KafkaControllerServiceImpl implements KafkaControllerService {
private static final ILog log = LogFactory.getLog(KafkaControllerServiceImpl.class);
@Autowired
private BrokerService brokerService;
@Autowired
private KafkaAdminClient kafkaAdminClient;
@@ -54,16 +49,14 @@ public class KafkaControllerServiceImpl implements KafkaControllerService {
}
@Override
public int insertAndIgnoreDuplicateException(KafkaController kafkaController) {
public int insertAndIgnoreDuplicateException(KafkaController kafkaController, String controllerHost, String controllerRack) {
try {
Broker broker = brokerService.getBrokerFromCacheFirst(kafkaController.getClusterPhyId(), kafkaController.getBrokerId());
KafkaControllerPO kafkaControllerPO = new KafkaControllerPO();
kafkaControllerPO.setClusterPhyId(kafkaController.getClusterPhyId());
kafkaControllerPO.setBrokerId(kafkaController.getBrokerId());
kafkaControllerPO.setTimestamp(kafkaController.getTimestamp());
kafkaControllerPO.setBrokerHost(broker != null? broker.getHost(): "");
kafkaControllerPO.setBrokerRack(broker != null? broker.getRack(): "");
kafkaControllerPO.setBrokerHost(controllerHost != null? controllerHost: "");
kafkaControllerPO.setBrokerRack(controllerRack != null? controllerRack: "");
kafkaControllerDAO.insert(kafkaControllerPO);
} catch (DuplicateKeyException dke) {
// ignore
@@ -92,7 +85,7 @@ public class KafkaControllerServiceImpl implements KafkaControllerService {
// 归一化到秒, 并且将去1秒避免gc导致时间不对
noKafkaController.setTimestamp(triggerTime);
return this.insertAndIgnoreDuplicateException(noKafkaController);
return this.insertAndIgnoreDuplicateException(noKafkaController, "", "");
}
@Override
@@ -140,7 +133,7 @@ public class KafkaControllerServiceImpl implements KafkaControllerService {
try {
adminClient = kafkaAdminClient.getClient(clusterPhy.getId());
} catch (Exception e) {
log.error("class=KafkaControllerServiceImpl||method=getControllerFromAdminClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
log.error("method=getControllerFromAdminClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
// 集群已经加载进来但是创建admin-client失败则设置无controller
return Result.buildSuc();
@@ -178,7 +171,7 @@ public class KafkaControllerServiceImpl implements KafkaControllerService {
));
} catch (Exception e) {
log.error(
"class=KafkaControllerServiceImpl||method=getControllerFromAdminClient||clusterPhyId={}||tryTime={}||errMsg=exception",
"method=getControllerFromAdminClient||clusterPhyId={}||tryTime={}||errMsg=exception",
clusterPhy.getId(), i, e
);
}
@@ -192,7 +185,7 @@ public class KafkaControllerServiceImpl implements KafkaControllerService {
try {
return Result.buildSuc(kafkaZKDAO.getKafkaController(clusterPhy.getId(), false));
} catch (Exception e) {
log.error("class=KafkaControllerServiceImpl||method=getControllerFromZKClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
log.error("method=getControllerFromZKClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
}

View File

@@ -27,6 +27,7 @@ import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.kafkauser.KafkaUserService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
@@ -54,7 +55,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.
@Service
public class KafkaUserServiceImpl extends BaseVersionControlService implements KafkaUserService {
public class KafkaUserServiceImpl extends BaseKafkaVersionControlService implements KafkaUserService {
private static final ILog log = LogFactory.getLog(KafkaUserServiceImpl.class);
private static final String KAFKA_USER_REPLACE = "replaceKafkaUser";

View File

@@ -7,6 +7,9 @@ import com.didiglobal.logi.security.service.OplogService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.mock.web.MockHttpServletRequest;
import org.springframework.web.context.request.RequestContextHolder;
import org.springframework.web.context.request.ServletRequestAttributes;
@Service
public class OpLogWrapServiceImpl implements OpLogWrapService {
@@ -18,6 +21,12 @@ public class OpLogWrapServiceImpl implements OpLogWrapService {
@Override
public Integer saveOplogAndIgnoreException(OplogDTO oplogDTO) {
try {
// fix request that cannot find thread binding (issue#743)
ServletRequestAttributes servletRequestAttributes = (ServletRequestAttributes) RequestContextHolder.getRequestAttributes();
if (null == servletRequestAttributes) {
servletRequestAttributes = new ServletRequestAttributes(new MockHttpServletRequest());
RequestContextHolder.setRequestAttributes(servletRequestAttributes, true);
}
return oplogService.saveOplog(oplogDTO);
} catch (Exception e) {
log.error("method=saveOplogAndIgnoreException||oplogDTO={}||errMsg=exception.", oplogDTO, e);

View File

@@ -26,6 +26,5 @@ public interface PartitionMetricService {
* 从ES获取指标
*/
PartitionMetrics getLatestMetricsFromES(Long clusterPhyId, String topic, Integer brokerId, Integer partitionId, List<String> metricNameList);
Result<List<PartitionMetrics>> getLatestMetricsFromES(Long clusterPhyId, String topicName, List<String> metricNameList);
}

View File

@@ -1,10 +1,11 @@
package com.xiaojukeji.know.streaming.km.core.service.partition;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.offset.KSOffsetSpec;
import com.xiaojukeji.know.streaming.km.common.bean.entity.partition.Partition;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.partition.PartitionPO;
import org.apache.kafka.clients.admin.OffsetSpec;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import org.apache.kafka.common.TopicPartition;
import java.util.List;
@@ -12,49 +13,40 @@ import java.util.Map;
import java.util.Set;
public interface PartitionService {
/**
* 从Kafka获取分区信息
*/
Result<Map<String, List<Partition>>> listPartitionsFromKafka(ClusterPhy clusterPhy);
Result<List<Partition>> listPartitionsFromKafka(ClusterPhy clusterPhy, String topicName);
/**
* 从DB获取分区信息
*/
List<Partition> listPartitionByCluster(Long clusterPhyId);
List<PartitionPO> listPartitionPOByCluster(Long clusterPhyId);
/**
* Topic下的分区列表
*/
List<Partition> listPartitionByTopic(Long clusterPhyId, String topicName);
/**
* Broker下的分区列表
*/
List<Partition> listPartitionByBroker(Long clusterPhyId, Integer brokerId);
/**
* 获取具体分区信息
*/
Partition getPartitionByTopicAndPartitionId(Long clusterPhyId, String topicName, Integer partitionId);
/**************************************************** 优先从缓存获取分区信息 ****************************************************/
/**
* 优先从缓存获取分区信息缓存中没有时从DB获取分区信息
*/
List<Partition> listPartitionFromCacheFirst(Long clusterPhyId);
List<Partition> listPartitionFromCacheFirst(Long clusterPhyId, Integer brokerId);
List<Partition> listPartitionFromCacheFirst(Long clusterPhyId, String topicName);
Partition getPartitionFromCacheFirst(Long clusterPhyId, String topicName, Integer partitionId);
/**
* 获取集群下分区数
* 获取分区Offset信息
*/
Integer getPartitionSizeByClusterId(Long clusterPhyId);
Integer getLeaderPartitionSizeByClusterId(Long clusterPhyId);
Integer getNoLeaderPartitionSizeByClusterId(Long clusterPhyId);
Result<Map<TopicPartition, Long>> getPartitionOffsetFromKafka(Long clusterPhyId, String topicName, OffsetSpec offsetSpec, Long timestamp);
Result<Map<TopicPartition, Long>> getPartitionOffsetFromKafka(Long clusterPhyId, String topicName, Integer partitionId, OffsetSpec offsetSpec, Long timestamp);
Result<Map<TopicPartition, Long>> getAllPartitionOffsetFromKafka(Long clusterPhyId, KSOffsetSpec offsetSpec);
Result<Map<TopicPartition, Long>> getPartitionOffsetFromKafka(Long clusterPhyId, String topicName, KSOffsetSpec offsetSpec);
Result<Tuple<Map<TopicPartition, Long>/*begin offset*/, Map<TopicPartition, Long>/*end offset*/>> getPartitionBeginAndEndOffsetFromKafka(Long clusterPhyId, String topicName);
Result<Map<TopicPartition, Long>> getPartitionOffsetFromKafka(Long clusterPhyId, String topicName, Integer partitionId, KSOffsetSpec offsetSpec);
Result<Map<TopicPartition, Long>> getPartitionOffsetFromKafka(Long clusterPhyId, List<TopicPartition> tpList, KSOffsetSpec offsetSpec);
/**
* 修改分区信息
*/
int updatePartitions(Long clusterPhyId, String topicName, List<Partition> kafkaPartitionList, List<PartitionPO> dbPartitionList);
void deletePartitionsIfNotIn(Long clusterPhyId, Set<String> topicNameSet);
}

View File

@@ -10,7 +10,7 @@ import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.core.service.partition.OpPartitionService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
import kafka.zk.KafkaZkClient;
@@ -36,7 +36,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemT
* @author didi
*/
@Service
public class OpPartitionServiceImpl extends BaseVersionControlService implements OpPartitionService {
public class OpPartitionServiceImpl extends BaseKafkaVersionControlService implements OpPartitionService {
private static final ILog LOGGER = LogFactory.getLog(OpPartitionServiceImpl.class);
@Autowired
@@ -84,7 +84,7 @@ public class OpPartitionServiceImpl extends BaseVersionControlService implements
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"class=OpPartitionServiceImpl||method=preferredReplicaElectionByZKClient||clusterPhyId={}||errMsg=exception",
"method=preferredReplicaElectionByZKClient||clusterPhyId={}||errMsg=exception",
partitionParam.getClusterPhyId(), e
);
@@ -109,7 +109,7 @@ public class OpPartitionServiceImpl extends BaseVersionControlService implements
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"class=OpPartitionServiceImpl||method=preferredReplicaElectionByKafkaClient||clusterPhyId={}||errMsg=exception",
"method=preferredReplicaElectionByKafkaClient||clusterPhyId={}||errMsg=exception",
partitionParam.getClusterPhyId(), e
);

View File

@@ -15,6 +15,7 @@ import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistExcept
import com.xiaojukeji.know.streaming.km.common.jmx.JmxConnectorWrap;
import com.xiaojukeji.know.streaming.km.common.utils.BeanUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.cache.CollectedMetricsLocalCache;
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionMetricService;
@@ -22,7 +23,6 @@ import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseMetricService;
import com.xiaojukeji.know.streaming.km.persistence.es.dao.PartitionMetricESDAO;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaJMXClient;
import org.apache.kafka.clients.admin.OffsetSpec;
import org.apache.kafka.common.TopicPartition;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
@@ -34,7 +34,7 @@ import java.util.function.Function;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.PartitionMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.PartitionMetricVersionItems.*;
/**
* @author didi
@@ -176,50 +176,45 @@ public class PartitionMetricServiceImpl extends BaseMetricService implements Par
Map<Integer, PartitionMetrics> metricsMap = new HashMap<>();
// begin offset 指标
Result<Map<TopicPartition, Long>> beginOffsetMapResult = partitionService.getPartitionOffsetFromKafka(clusterPhyId, topicName, OffsetSpec.earliest(), null);
if (beginOffsetMapResult.hasData()) {
for (Map.Entry<TopicPartition, Long> entry: beginOffsetMapResult.getData().entrySet()) {
Partition partition = partitionMap.get(entry.getKey().partition());
PartitionMetrics metrics = metricsMap.getOrDefault(
entry.getKey().partition(),
new PartitionMetrics(clusterPhyId, topicName, partition != null? partition.getLeaderBrokerId(): KafkaConstant.NO_LEADER, entry.getKey().partition())
);
metrics.putMetric(PARTITION_METRIC_LOG_START_OFFSET, entry.getValue().floatValue());
metricsMap.put(entry.getKey().partition(), metrics);
}
} else {
// offset 指标
Result<Tuple<Map<TopicPartition, Long>, Map<TopicPartition, Long>>> offsetResult = partitionService.getPartitionBeginAndEndOffsetFromKafka(clusterPhyId, topicName);
if (offsetResult.failed()) {
LOGGER.warn(
"class=PartitionMetricServiceImpl||method=getOffsetRelevantMetrics||clusterPhyId={}||topicName={}||resultMsg={}||msg=get begin offset failed",
clusterPhyId, topicName, beginOffsetMapResult.getMessage()
"method=getOffsetRelevantMetrics||clusterPhyId={}||topicName={}||result={}||msg=get offset failed",
clusterPhyId, topicName, offsetResult
);
return Result.buildFromIgnoreData(offsetResult);
}
// begin offset 指标
for (Map.Entry<TopicPartition, Long> entry: offsetResult.getData().v1().entrySet()) {
Partition partition = partitionMap.get(entry.getKey().partition());
PartitionMetrics metrics = metricsMap.getOrDefault(
entry.getKey().partition(),
new PartitionMetrics(clusterPhyId, topicName, partition != null? partition.getLeaderBrokerId(): KafkaConstant.NO_LEADER, entry.getKey().partition())
);
metrics.putMetric(PARTITION_METRIC_LOG_START_OFFSET, entry.getValue().floatValue());
metricsMap.put(entry.getKey().partition(), metrics);
}
// end offset 指标
Result<Map<TopicPartition, Long>> endOffsetMapResult = partitionService.getPartitionOffsetFromKafka(clusterPhyId, topicName, OffsetSpec.latest(), null);
if (endOffsetMapResult.hasData()) {
for (Map.Entry<TopicPartition, Long> entry: endOffsetMapResult.getData().entrySet()) {
Partition partition = partitionMap.get(entry.getKey().partition());
PartitionMetrics metrics = metricsMap.getOrDefault(
entry.getKey().partition(),
new PartitionMetrics(clusterPhyId, topicName, partition != null? partition.getLeaderBrokerId(): KafkaConstant.NO_LEADER, entry.getKey().partition())
);
metrics.putMetric(PARTITION_METRIC_LOG_END_OFFSET, entry.getValue().floatValue());
metricsMap.put(entry.getKey().partition(), metrics);
}
} else {
LOGGER.warn(
"class=PartitionMetricServiceImpl||method=getOffsetRelevantMetrics||clusterPhyId={}||topicName={}||resultMsg={}||msg=get end offset failed",
clusterPhyId, topicName, endOffsetMapResult.getMessage()
for (Map.Entry<TopicPartition, Long> entry: offsetResult.getData().v2().entrySet()) {
Partition partition = partitionMap.get(entry.getKey().partition());
PartitionMetrics metrics = metricsMap.getOrDefault(
entry.getKey().partition(),
new PartitionMetrics(clusterPhyId, topicName, partition != null? partition.getLeaderBrokerId(): KafkaConstant.NO_LEADER, entry.getKey().partition())
);
metrics.putMetric(PARTITION_METRIC_LOG_END_OFFSET, entry.getValue().floatValue());
metricsMap.put(entry.getKey().partition(), metrics);
}
// messages 指标
if (endOffsetMapResult.hasData() && beginOffsetMapResult.hasData()) {
for (Map.Entry<TopicPartition, Long> entry: endOffsetMapResult.getData().entrySet()) {
Long beginOffset = beginOffsetMapResult.getData().get(entry.getKey());
if (!ValidateUtils.isEmptyMap(offsetResult.getData().v1()) && !ValidateUtils.isEmptyMap(offsetResult.getData().v2())) {
for (Map.Entry<TopicPartition, Long> entry: offsetResult.getData().v2().entrySet()) {
Long beginOffset = offsetResult.getData().v1().get(entry.getKey());
if (beginOffset == null) {
continue;
}
@@ -235,8 +230,8 @@ public class PartitionMetricServiceImpl extends BaseMetricService implements Par
}
} else {
LOGGER.warn(
"class=PartitionMetricServiceImpl||method=getOffsetRelevantMetrics||clusterPhyId={}||topicName={}||endResultMsg={}||beginResultMsg={}||msg=get messages failed",
clusterPhyId, topicName, endOffsetMapResult.getMessage(), beginOffsetMapResult.getMessage()
"method=getOffsetRelevantMetrics||clusterPhyId={}||topicName={}||offsetData={}||msg=get messages failed",
clusterPhyId, topicName, ConvertUtil.obj2Json(offsetResult.getData())
);
}
@@ -283,10 +278,9 @@ public class PartitionMetricServiceImpl extends BaseMetricService implements Par
} catch (InstanceNotFoundException e) {
// ignore
continue;
} catch (Exception e) {
LOGGER.error(
"class=PartitionMetricServiceImpl||method=getMetricFromJmx||clusterPhyId={}||topicName={}||partitionId={}||leaderBrokerId={}||metricName={}||msg={}",
"method=getMetricFromJmx||clusterPhyId={}||topicName={}||partitionId={}||leaderBrokerId={}||metricName={}||msg={}",
clusterPhyId, topicName, partition.getPartitionId(), partition.getLeaderBrokerId(), metricName, e.getClass().getName()
);
}
@@ -326,7 +320,7 @@ public class PartitionMetricServiceImpl extends BaseMetricService implements Par
// 4、获取jmx指标
String value = jmxConnectorWrap.getAttribute(new ObjectName(jmxInfo.getJmxObjectName() + ",topic=" + topicName), jmxInfo.getJmxAttribute()).toString();
Long leaderCount = partitionList.stream().filter(elem -> elem.getLeaderBrokerId().equals(partition.getLeaderBrokerId())).count();
long leaderCount = partitionList.stream().filter(elem -> elem.getLeaderBrokerId().equals(partition.getLeaderBrokerId())).count();
if (leaderCount <= 0) {
// leader已经切换走了
continue;
@@ -338,10 +332,9 @@ public class PartitionMetricServiceImpl extends BaseMetricService implements Par
} catch (InstanceNotFoundException e) {
// ignore
continue;
} catch (Exception e) {
LOGGER.error(
"class=PartitionMetricServiceImpl||method=getTopicAvgMetricFromJmx||clusterPhyId={}||topicName={}||partitionId={}||leaderBrokerId={}||metricName={}||msg={}",
"method=getTopicAvgMetricFromJmx||clusterPhyId={}||topicName={}||partitionId={}||leaderBrokerId={}||metricName={}||msg={}",
clusterPhyId, topicName, partition.getPartitionId(), partition.getLeaderBrokerId(), metricName, e.getClass().getName()
);
}

View File

@@ -3,9 +3,8 @@ package com.xiaojukeji.know.streaming.km.core.service.partition.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.offset.KSOffsetSpec;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.partition.PartitionOffsetParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.partition.Partition;
@@ -20,11 +19,14 @@ import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.CommonUtils;
import com.xiaojukeji.know.streaming.km.common.utils.Triple;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.persistence.cache.DataBaseDataLocalCache;
import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.brokers.PartitionMap;
import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.brokers.PartitionState;
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaConsumerClient;
import com.xiaojukeji.know.streaming.km.persistence.mysql.partition.PartitionDAO;
@@ -44,7 +46,6 @@ import org.springframework.stereotype.Service;
import javax.annotation.PostConstruct;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.stream.Collectors;
@@ -55,8 +56,8 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemT
/**
* @author didi
*/
@Service("partitionService")
public class PartitionServiceImpl extends BaseVersionControlService implements PartitionService {
@Service
public class PartitionServiceImpl extends BaseKafkaVersionControlService implements PartitionService {
private static final ILog log = LogFactory.getLog(PartitionServiceImpl.class);
private static final String PARTITION_OFFSET_GET = "getPartitionOffset";
@@ -78,15 +79,10 @@ public class PartitionServiceImpl extends BaseVersionControlService implements P
return SERVICE_OP_PARTITION;
}
private final Cache<String, List<Partition>> partitionsCache = Caffeine.newBuilder()
.expireAfterWrite(90, TimeUnit.SECONDS)
.maximumSize(1000)
.build();
@PostConstruct
private void init() {
registerVCHandler(PARTITION_OFFSET_GET, V_0_10_0_0, V_0_11_0_0, "getPartitionOffsetFromKafkaConsumerClient", this::getPartitionOffsetFromKafkaConsumerClient);
registerVCHandler(PARTITION_OFFSET_GET, V_0_11_0_0, V_MAX, "getPartitionOffsetFromKafkaAdminClient", this::getPartitionOffsetFromKafkaAdminClient);
registerVCHandler(PARTITION_OFFSET_GET, V_0_10_0_0, V_0_11_0_0, "batchGetPartitionOffsetFromKafkaConsumerClient", this::batchGetPartitionOffsetFromKafkaConsumerClient);
registerVCHandler(PARTITION_OFFSET_GET, V_0_11_0_0, V_MAX, "batchGetPartitionOffsetFromKafkaAdminClient", this::batchGetPartitionOffsetFromKafkaAdminClient);
}
@Override
@@ -133,17 +129,32 @@ public class PartitionServiceImpl extends BaseVersionControlService implements P
}
@Override
public List<Partition> listPartitionFromCacheFirst(Long clusterPhyId, String topicName) {
String clusterPhyIdAndTopicKey = MsgConstant.getClusterTopicKey(clusterPhyId, topicName);
List<Partition> partitionList = partitionsCache.getIfPresent(clusterPhyIdAndTopicKey);
public List<Partition> listPartitionFromCacheFirst(Long clusterPhyId) {
Map<String, List<Partition>> partitionMap = DataBaseDataLocalCache.getPartitions(clusterPhyId);
if (!ValidateUtils.isNull(partitionList)) {
return partitionList;
if (partitionMap != null) {
return partitionMap.values().stream().collect(ArrayList::new, ArrayList::addAll, ArrayList::addAll);
}
partitionList = this.listPartitionByTopic(clusterPhyId, topicName);
partitionsCache.put(clusterPhyIdAndTopicKey, partitionList);
return partitionList;
return this.listPartitionByCluster(clusterPhyId);
}
@Override
public List<Partition> listPartitionFromCacheFirst(Long clusterPhyId, Integer brokerId) {
List<Partition> partitionList = this.listPartitionFromCacheFirst(clusterPhyId);
return partitionList.stream().filter(elem -> elem.getAssignReplicaList().contains(brokerId)).collect(Collectors.toList());
}
@Override
public List<Partition> listPartitionFromCacheFirst(Long clusterPhyId, String topicName) {
Map<String, List<Partition>> partitionMap = DataBaseDataLocalCache.getPartitions(clusterPhyId);
if (partitionMap != null) {
return partitionMap.getOrDefault(topicName, new ArrayList<>());
}
return this.listPartitionByTopic(clusterPhyId, topicName);
}
@Override
@@ -162,16 +173,6 @@ public class PartitionServiceImpl extends BaseVersionControlService implements P
return null;
}
@Override
public List<Partition> listPartitionByBroker(Long clusterPhyId, Integer brokerId) {
LambdaQueryWrapper<PartitionPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(PartitionPO::getClusterPhyId, clusterPhyId);
List<Partition> partitionList = this.convert2PartitionList(partitionDAO.selectList(lambdaQueryWrapper));
return partitionList.stream().filter(elem -> elem.getAssignReplicaList().contains(brokerId)).collect(Collectors.toList());
}
@Override
public Partition getPartitionByTopicAndPartitionId(Long clusterPhyId, String topicName, Integer partitionId) {
LambdaQueryWrapper<PartitionPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
@@ -183,71 +184,127 @@ public class PartitionServiceImpl extends BaseVersionControlService implements P
}
@Override
public Integer getPartitionSizeByClusterId(Long clusterPhyId) {
LambdaQueryWrapper<PartitionPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(PartitionPO::getClusterPhyId, clusterPhyId);
return partitionDAO.selectCount(lambdaQueryWrapper);
}
@Override
public Integer getLeaderPartitionSizeByClusterId(Long clusterPhyId) {
LambdaQueryWrapper<PartitionPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(PartitionPO::getClusterPhyId, clusterPhyId);
lambdaQueryWrapper.ne(PartitionPO::getLeaderBrokerId, -1);
return partitionDAO.selectCount(lambdaQueryWrapper);
}
@Override
public Integer getNoLeaderPartitionSizeByClusterId(Long clusterPhyId) {
LambdaQueryWrapper<PartitionPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(PartitionPO::getClusterPhyId, clusterPhyId);
lambdaQueryWrapper.eq(PartitionPO::getLeaderBrokerId, -1);
return partitionDAO.selectCount(lambdaQueryWrapper);
}
@Override
public Result<Map<TopicPartition, Long>> getPartitionOffsetFromKafka(Long clusterPhyId, String topicName, OffsetSpec offsetSpec, Long timestamp) {
Map<TopicPartition, OffsetSpec> topicPartitionOffsets = new HashMap<>();
List<Partition> partitionList = this.listPartitionByTopic(clusterPhyId, topicName);
if (partitionList == null || partitionList.isEmpty()) {
// Topic不存在
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getTopicNotExist(clusterPhyId, topicName));
}
partitionList.stream()
public Result<Map<TopicPartition, Long>> getAllPartitionOffsetFromKafka(Long clusterPhyId, KSOffsetSpec offsetSpec) {
List<TopicPartition> tpList = this.listPartitionFromCacheFirst(clusterPhyId).stream()
.filter(item -> !item.getLeaderBrokerId().equals(KafkaConstant.NO_LEADER))
.forEach(elem -> topicPartitionOffsets.put(new TopicPartition(topicName, elem.getPartitionId()), offsetSpec));
if (topicPartitionOffsets.isEmpty()) {
// 所有分区no-leader
return Result.buildFromRSAndMsg(ResultStatus.OPERATION_FAILED, MsgConstant.getPartitionNoLeader(clusterPhyId, topicName));
}
.map(elem -> new TopicPartition(elem.getTopicName(), elem.getPartitionId()))
.collect(Collectors.toList());
try {
return (Result<Map<TopicPartition, Long>>) doVCHandler(clusterPhyId, PARTITION_OFFSET_GET, new PartitionOffsetParam(clusterPhyId, topicName, topicPartitionOffsets, timestamp));
Result<List<Tuple<KSOffsetSpec, Map<TopicPartition, Long>>>> listResult =
(Result<List<Tuple<KSOffsetSpec, Map<TopicPartition, Long>>>>) doVCHandler(clusterPhyId, PARTITION_OFFSET_GET, new PartitionOffsetParam(clusterPhyId, offsetSpec, tpList));
return this.convert2OffsetMapResult(listResult);
} catch (VCHandlerNotExistException e) {
return Result.buildFailure(VC_HANDLE_NOT_EXIST);
}
}
@Override
public Result<Map<TopicPartition, Long>> getPartitionOffsetFromKafka(Long clusterPhyId, String topicName, Integer partitionId, OffsetSpec offsetSpec, Long timestamp) {
if (partitionId == null) {
return this.getPartitionOffsetFromKafka(clusterPhyId, topicName, offsetSpec, timestamp);
public Result<Map<TopicPartition, Long>> getPartitionOffsetFromKafka(Long clusterPhyId, String topicName, KSOffsetSpec offsetSpec) {
List<TopicPartition> tpList = this.listPartitionFromCacheFirst(clusterPhyId, topicName).stream()
.filter(item -> !item.getLeaderBrokerId().equals(KafkaConstant.NO_LEADER))
.map(elem -> new TopicPartition(topicName, elem.getPartitionId()))
.collect(Collectors.toList());
if (tpList.isEmpty()) {
// 所有分区no-leader
return Result.buildFromRSAndMsg(ResultStatus.OPERATION_FAILED, MsgConstant.getPartitionNoLeader(clusterPhyId, topicName));
}
Map<TopicPartition, OffsetSpec> topicPartitionOffsets = new HashMap<>();
this.listPartitionByTopic(clusterPhyId, topicName)
.stream()
.filter(elem -> elem.getPartitionId().equals(partitionId))
.forEach(elem -> topicPartitionOffsets.put(new TopicPartition(topicName, elem.getPartitionId()), offsetSpec));
try {
Result<List<Tuple<KSOffsetSpec, Map<TopicPartition, Long>>>> listResult =
(Result<List<Tuple<KSOffsetSpec, Map<TopicPartition, Long>>>>) doVCHandler(clusterPhyId, PARTITION_OFFSET_GET, new PartitionOffsetParam(clusterPhyId, topicName, offsetSpec, tpList));
return this.convert2OffsetMapResult(listResult);
} catch (VCHandlerNotExistException e) {
return Result.buildFailure(VC_HANDLE_NOT_EXIST);
}
}
@Override
public Result<Tuple<Map<TopicPartition, Long>, Map<TopicPartition, Long>>> getPartitionBeginAndEndOffsetFromKafka(Long clusterPhyId, String topicName) {
List<TopicPartition> tpList = this.listPartitionFromCacheFirst(clusterPhyId, topicName).stream()
.filter(item -> !item.getLeaderBrokerId().equals(KafkaConstant.NO_LEADER))
.map(elem -> new TopicPartition(topicName, elem.getPartitionId()))
.collect(Collectors.toList());
if (tpList.isEmpty()) {
// 所有分区no-leader
return Result.buildFromRSAndMsg(ResultStatus.OPERATION_FAILED, MsgConstant.getPartitionNoLeader(clusterPhyId, topicName));
}
try {
return (Result<Map<TopicPartition, Long>>) doVCHandler(clusterPhyId, PARTITION_OFFSET_GET, new PartitionOffsetParam(clusterPhyId, topicName, topicPartitionOffsets, timestamp));
Result<List<Tuple<KSOffsetSpec, Map<TopicPartition, Long>>>> listResult =
(Result<List<Tuple<KSOffsetSpec, Map<TopicPartition, Long>>>>) doVCHandler(clusterPhyId, PARTITION_OFFSET_GET, new PartitionOffsetParam(clusterPhyId, topicName, Arrays.asList(KSOffsetSpec.earliest(), KSOffsetSpec.latest()), tpList));
if (listResult.failed()) {
return Result.buildFromIgnoreData(listResult);
} else if (ValidateUtils.isEmptyList(listResult.getData())) {
return Result.buildSuc(new Tuple<Map<TopicPartition, Long>, Map<TopicPartition, Long>>(new HashMap<>(0), new HashMap<>(0)));
}
Tuple<Map<TopicPartition, Long>, Map<TopicPartition, Long>> tuple = new Tuple<>(new HashMap<>(0), new HashMap<>(0));
listResult.getData().forEach(elem -> {
if (elem.getV1() instanceof KSOffsetSpec.KSEarliestSpec) {
tuple.setV1(elem.v2());
} else if (elem.v1() instanceof KSOffsetSpec.KSLatestSpec) {
tuple.setV2(elem.v2());
}
});
return Result.buildSuc(tuple);
} catch (VCHandlerNotExistException e) {
return Result.buildFailure(VC_HANDLE_NOT_EXIST);
}
}
@Override
public Result<Map<TopicPartition, Long>> getPartitionOffsetFromKafka(Long clusterPhyId, String topicName, Integer partitionId, KSOffsetSpec offsetSpec) {
if (partitionId == null) {
return this.getPartitionOffsetFromKafka(clusterPhyId, topicName, offsetSpec);
}
List<TopicPartition> tpList = this.listPartitionFromCacheFirst(clusterPhyId, topicName).stream()
.filter(item -> !item.getLeaderBrokerId().equals(KafkaConstant.NO_LEADER))
.filter(partition -> partition.getPartitionId().equals(partitionId))
.map(elem -> new TopicPartition(topicName, elem.getPartitionId()))
.collect(Collectors.toList());
if (ValidateUtils.isEmptyList(tpList)) {
return Result.buildSuc(new HashMap<>(0));
}
try {
Result<List<Tuple<KSOffsetSpec, Map<TopicPartition, Long>>>> listResult =
(Result<List<Tuple<KSOffsetSpec, Map<TopicPartition, Long>>>>) doVCHandler(clusterPhyId, PARTITION_OFFSET_GET, new PartitionOffsetParam(clusterPhyId, topicName, offsetSpec, tpList));
return this.convert2OffsetMapResult(listResult);
} catch (VCHandlerNotExistException e) {
return Result.buildFailure(VC_HANDLE_NOT_EXIST);
}
}
@Override
public Result<Map<TopicPartition, Long>> getPartitionOffsetFromKafka(Long clusterPhyId, List<TopicPartition> tpList, KSOffsetSpec offsetSpec) {
// 集群具有leader的分区列表
Set<TopicPartition> existLeaderTPSet = this.listPartitionFromCacheFirst(clusterPhyId).stream()
.filter(item -> !item.getLeaderBrokerId().equals(KafkaConstant.NO_LEADER))
.map(elem -> new TopicPartition(elem.getTopicName(), elem.getPartitionId()))
.collect(Collectors.toSet());
List<TopicPartition> existLeaderTPList = tpList.stream().filter(elem -> existLeaderTPSet.contains(elem)).collect(Collectors.toList());
if (existLeaderTPList.isEmpty()) {
return Result.buildSuc(new HashMap<>(0));
}
try {
Result<List<Tuple<KSOffsetSpec, Map<TopicPartition, Long>>>> listResult = (Result<List<Tuple<KSOffsetSpec, Map<TopicPartition, Long>>>>) doVCHandler(
clusterPhyId,
PARTITION_OFFSET_GET,
new PartitionOffsetParam(clusterPhyId, offsetSpec, existLeaderTPList)
);
return this.convert2OffsetMapResult(listResult);
} catch (VCHandlerNotExistException e) {
return Result.buildFailure(VC_HANDLE_NOT_EXIST);
}
@@ -267,6 +324,10 @@ public class PartitionServiceImpl extends BaseVersionControlService implements P
}
PartitionPO presentPartitionPO = this.convert2PartitionPO(partition);
if (presentPartitionPO.equals(dbPartitionPO)) {
// 数据一样不进行DB操作
continue;
}
presentPartitionPO.setId(dbPartitionPO.getId());
partitionDAO.updateById(presentPartitionPO);
}
@@ -306,64 +367,137 @@ public class PartitionServiceImpl extends BaseVersionControlService implements P
/**************************************************** private method ****************************************************/
private Result<Map<TopicPartition, Long>> getPartitionOffsetFromKafkaAdminClient(VersionItemParam itemParam) {
private Result<List<Tuple<KSOffsetSpec, Map<TopicPartition, Long>>>> batchGetPartitionOffsetFromKafkaAdminClient(VersionItemParam itemParam) {
PartitionOffsetParam offsetParam = (PartitionOffsetParam) itemParam;
if (offsetParam.getOffsetSpecList().isEmpty()) {
return Result.buildSuc(Collections.emptyList());
}
List<Triple<String, KSOffsetSpec, ListOffsetsResult>> resultList = new ArrayList<>();
for (Triple<String, KSOffsetSpec, List<TopicPartition>> elem: offsetParam.getOffsetSpecList()) {
Result<ListOffsetsResult> offsetsResult = this.getPartitionOffsetFromKafkaAdminClient(
offsetParam.getClusterPhyId(),
elem.v1(),
elem.v2(),
elem.v3()
);
if (offsetsResult.failed() && offsetParam.getOffsetSpecList().size() == 1) {
return Result.buildFromIgnoreData(offsetsResult);
}
if (offsetsResult.hasData()) {
resultList.add(new Triple<>(elem.v1(), elem.v2(), offsetsResult.getData()));
}
}
List<Tuple<KSOffsetSpec, Map<TopicPartition, Long>>> offsetMapList = new ArrayList<>();
for (Triple<String, KSOffsetSpec, ListOffsetsResult> triple: resultList) {
try {
Map<TopicPartition, Long> offsetMap = new HashMap<>();
triple.v3().all().get().entrySet().stream().forEach(elem -> offsetMap.put(elem.getKey(), elem.getValue().offset()));
offsetMapList.add(new Tuple<>(triple.v2(), offsetMap));
} catch (Exception e) {
log.error(
"method=batchGetPartitionOffsetFromKafkaAdminClient||clusterPhyId={}||topicName={}||offsetSpec={}||errMsg=exception!",
offsetParam.getClusterPhyId(), triple.v1(), triple.v2(), e
);
}
}
return Result.buildSuc(offsetMapList);
}
private Result<ListOffsetsResult> getPartitionOffsetFromKafkaAdminClient(Long clusterPhyId, String topicName, KSOffsetSpec offsetSpec, List<TopicPartition> tpList) {
try {
AdminClient adminClient = kafkaAdminClient.getClient(offsetParam.getClusterPhyId());
AdminClient adminClient = kafkaAdminClient.getClient(clusterPhyId);
ListOffsetsResult listOffsetsResult = adminClient.listOffsets(offsetParam.getTopicPartitionOffsets(), new ListOffsetsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS));
Map<TopicPartition, OffsetSpec> kafkaOffsetSpecMap = new HashMap<>(tpList.size());
tpList.forEach(elem -> {
if (offsetSpec instanceof KSOffsetSpec.KSEarliestSpec) {
kafkaOffsetSpecMap.put(elem, OffsetSpec.earliest());
} else if (offsetSpec instanceof KSOffsetSpec.KSLatestSpec) {
kafkaOffsetSpecMap.put(elem, OffsetSpec.latest());
} else if (offsetSpec instanceof KSOffsetSpec.KSTimestampSpec) {
kafkaOffsetSpecMap.put(elem, OffsetSpec.forTimestamp(((KSOffsetSpec.KSTimestampSpec) offsetSpec).timestamp()));
}
});
Map<TopicPartition, Long> offsetMap = new HashMap<>();
listOffsetsResult.all().get().entrySet().stream().forEach(elem -> offsetMap.put(elem.getKey(), elem.getValue().offset()));
ListOffsetsResult listOffsetsResult = adminClient.listOffsets(kafkaOffsetSpecMap, new ListOffsetsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS));
return Result.buildSuc(offsetMap);
return Result.buildSuc(listOffsetsResult);
} catch (NotExistException nee) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getClusterPhyNotExist(offsetParam.getClusterPhyId()));
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getClusterPhyNotExist(clusterPhyId));
} catch (Exception e) {
log.error(
"class=PartitionServiceImpl||method=getPartitionOffsetFromKafkaAdminClient||clusterPhyId={}||topicName={}||errMsg=exception!",
offsetParam.getClusterPhyId(), offsetParam.getTopicName(), e
"method=getPartitionOffsetFromKafkaAdminClient||clusterPhyId={}||topicName={}||errMsg=exception!",
clusterPhyId, topicName, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
}
}
private Result<Map<TopicPartition, Long>> getPartitionOffsetFromKafkaConsumerClient(VersionItemParam itemParam) {
private Result<List<Tuple<KSOffsetSpec, Map<TopicPartition, Long>>>> batchGetPartitionOffsetFromKafkaConsumerClient(VersionItemParam itemParam) {
PartitionOffsetParam offsetParam = (PartitionOffsetParam) itemParam;
if (offsetParam.getOffsetSpecList().isEmpty()) {
return Result.buildSuc(Collections.emptyList());
}
List<Tuple<KSOffsetSpec, Map<TopicPartition, Long>>> offsetMapList = new ArrayList<>();
for (Triple<String, KSOffsetSpec, List<TopicPartition>> triple: offsetParam.getOffsetSpecList()) {
Result<Map<TopicPartition, Long>> subOffsetMapResult = this.getPartitionOffsetFromKafkaConsumerClient(
offsetParam.getClusterPhyId(),
triple.v1(),
triple.v2(),
triple.v3()
);
if (subOffsetMapResult.failed() && offsetParam.getOffsetSpecList().size() == 1) {
return Result.buildFromIgnoreData(subOffsetMapResult);
}
if (subOffsetMapResult.hasData()) {
offsetMapList.add(new Tuple<>(triple.v2(), subOffsetMapResult.getData()));
}
}
return Result.buildSuc(offsetMapList);
}
private Result<Map<TopicPartition, Long>> getPartitionOffsetFromKafkaConsumerClient(Long clusterPhyId, String topicName, KSOffsetSpec offsetSpec, List<TopicPartition> tpList) {
KafkaConsumer<String, String> kafkaConsumer = null;
PartitionOffsetParam offsetParam = (PartitionOffsetParam) itemParam;
try {
if (ValidateUtils.isEmptyMap(offsetParam.getTopicPartitionOffsets())) {
if (ValidateUtils.isEmptyList(tpList)) {
return Result.buildSuc(new HashMap<>());
}
kafkaConsumer = kafkaConsumerClient.getClient(offsetParam.getClusterPhyId());
kafkaConsumer = kafkaConsumerClient.getClient(clusterPhyId);
OffsetSpec offsetSpec = new ArrayList<>(offsetParam.getTopicPartitionOffsets().values()).get(0);
if (offsetSpec instanceof OffsetSpec.LatestSpec) {
if (offsetSpec instanceof KSOffsetSpec.KSLatestSpec) {
return Result.buildSuc(
kafkaConsumer.endOffsets(
offsetParam.getTopicPartitionOffsets().keySet(),
tpList,
Duration.ofMillis(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS)
)
);
}
if (offsetSpec instanceof OffsetSpec.EarliestSpec) {
if (offsetSpec instanceof KSOffsetSpec.KSEarliestSpec) {
return Result.buildSuc(
kafkaConsumer.beginningOffsets(
offsetParam.getTopicPartitionOffsets().keySet(),
tpList,
Duration.ofMillis(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS)
)
);
}
if (offsetSpec instanceof OffsetSpec.TimestampSpec) {
if (offsetSpec instanceof KSOffsetSpec.KSTimestampSpec) {
// 按照时间进行查找
Map<TopicPartition, Long> timestampMap = new HashMap<>();
offsetParam.getTopicPartitionOffsets().entrySet().stream().forEach(elem -> timestampMap.put(elem.getKey(), offsetParam.getTimestamp()));
tpList.forEach(elem -> timestampMap.put(elem, ((KSOffsetSpec.KSTimestampSpec) offsetSpec).timestamp()));
Map<TopicPartition, OffsetAndTimestamp> offsetMetadataMap = kafkaConsumer.offsetsForTimes(
timestampMap,
@@ -377,17 +511,17 @@ public class PartitionServiceImpl extends BaseVersionControlService implements P
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "OffsetSpec type illegal");
} catch (NotExistException nee) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getClusterPhyNotExist(offsetParam.getClusterPhyId()));
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getClusterPhyNotExist(clusterPhyId));
} catch (Exception e) {
log.error(
"class=PartitionServiceImpl||method=getPartitionOffsetFromKafkaConsumerClient||clusterPhyId={}||topicName={}||errMsg=exception!",
offsetParam.getClusterPhyId(), offsetParam.getTopicName(), e
"method=getPartitionOffsetFromKafkaConsumerClient||clusterPhyId={}||topicName={}||errMsg=exception!",
clusterPhyId, topicName, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
} finally {
if (kafkaConsumer != null) {
kafkaConsumerClient.returnClient(offsetParam.getClusterPhyId(), kafkaConsumer);
kafkaConsumerClient.returnClient(clusterPhyId, kafkaConsumer);
}
}
}
@@ -411,7 +545,7 @@ public class PartitionServiceImpl extends BaseVersionControlService implements P
return Result.buildSuc(partitionMap);
} catch (Exception e) {
log.error("class=PartitionServiceImpl||method=getPartitionsFromAdminClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
log.error("method=getPartitionsFromAdminClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
}
@@ -430,7 +564,7 @@ public class PartitionServiceImpl extends BaseVersionControlService implements P
}
return Result.buildSuc(partitionMap);
} catch (Exception e) {
log.error("class=PartitionServiceImpl||method=getPartitionsFromZKClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
log.error("method=getPartitionsFromZKClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
}
@@ -447,7 +581,7 @@ public class PartitionServiceImpl extends BaseVersionControlService implements P
TopicDescription description = describeTopicsResult.all().get().get(topicName);
return Result.buildSuc(PartitionConverter.convert2PartitionList(clusterPhy.getId(), description));
}catch (Exception e) {
log.error("class=PartitionServiceImpl||method=getPartitionsFromAdminClientByClusterTopicName||clusterPhyId={}||topicName={}||errMsg=exception", clusterPhy.getId(),topicName, e);
log.error("method=getPartitionsFromAdminClientByClusterTopicName||clusterPhyId={}||topicName={}||errMsg=exception", clusterPhy.getId(),topicName, e);
return Result.buildFailure(ResultStatus.KAFKA_OPERATE_FAILED);
}
}
@@ -470,7 +604,7 @@ public class PartitionServiceImpl extends BaseVersionControlService implements P
}
return Result.buildSuc(partitionList);
} catch (Exception e) {
log.error("class=PartitionServiceImpl||method=getPartitionsFromZKClientByClusterTopicName||clusterPhyId={}||topicName={}||errMsg=exception", clusterPhy.getId(),topicName, e);
log.error("method=getPartitionsFromZKClientByClusterTopicName||clusterPhyId={}||topicName={}||errMsg=exception", clusterPhy.getId(),topicName, e);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
}
}
@@ -482,21 +616,24 @@ public class PartitionServiceImpl extends BaseVersionControlService implements P
List<Partition> partitionList = new ArrayList<>();
for (PartitionPO po: poList) {
if(null != po){partitionList.add(convert2Partition(po));}
if(null != po) {
partitionList.add(this.convert2Partition(po));
}
}
return partitionList;
}
private List<PartitionPO> convert2PartitionPOList(List<Partition> partitionList) {
if (partitionList == null) {
return new ArrayList<>();
private Result<Map<TopicPartition, Long>> convert2OffsetMapResult(Result<List<Tuple<KSOffsetSpec, Map<TopicPartition, Long>>>> listResult) {
if (listResult.failed()) {
return Result.buildFromIgnoreData(listResult);
} else if (ValidateUtils.isEmptyList(listResult.getData())) {
return Result.buildSuc(new HashMap<>(0));
}
List<PartitionPO> poList = new ArrayList<>();
for (Partition partition: partitionList) {
poList.add(this.convert2PartitionPO(partition));
}
return poList;
Map<TopicPartition, Long> offsetMap = new HashMap<>();
listResult.getData().forEach(elem -> offsetMap.putAll(elem.v2()));
return Result.buildSuc(offsetMap);
}
private PartitionPO convert2PartitionPO(Partition partition) {

View File

@@ -28,6 +28,7 @@ import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum
import com.xiaojukeji.know.streaming.km.common.utils.CommonUtils;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.common.utils.kafka.KafkaReassignUtil;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
@@ -38,7 +39,7 @@ import com.xiaojukeji.know.streaming.km.core.service.reassign.ReassignService;
import com.xiaojukeji.know.streaming.km.core.service.replica.ReplicaMetricService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicConfigService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.ReplicaMetricVersionItems;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.ReplicaMetricVersionItems;
import com.xiaojukeji.know.streaming.km.persistence.mysql.reassign.ReassignJobDAO;
import com.xiaojukeji.know.streaming.km.persistence.mysql.reassign.ReassignSubJobDAO;
import org.apache.kafka.common.TopicPartition;
@@ -385,11 +386,13 @@ public class ReassignJobServiceImpl implements ReassignJobService {
// 更新任务状态
rv = this.checkAndSetSuccessIfFinished(jobPO, rrr.getData());
if (rv.successful()){
//如果任务还未完成,先返回,不必考虑优先副本的重新选举。
if (!rv.successful()) {
return Result.buildFromIgnoreData(rv);
}
//已完成
//任务已完成,检查是否需要重新选举,并进行选举。
rv = this.preferredReplicaElection(jobId);
@@ -500,16 +503,15 @@ public class ReassignJobServiceImpl implements ReassignJobService {
List<ReassignSubJobPO> subJobPOList = this.getSubJobsByJobId(jobId);
List<TopicPartition> topicPartitions = new ArrayList<>();
subJobPOList.stream().forEach(reassignPO -> {
Integer targetLeader = CommonUtils.string2IntList(reassignPO.getReassignBrokerIds()).get(0);
Integer originalLeader = CommonUtils.string2IntList(reassignPO.getOriginalBrokerIds()).get(0);
//替换过leader的添加到优先副本选举任务列表
if (!originalLeader.equals(targetLeader)){
if (!CommonUtils.checkFirstElementIsEquals(reassignPO.getReassignBrokerIds(), reassignPO.getOriginalBrokerIds())) {
topicPartitions.add(new TopicPartition(reassignPO.getTopicName(), reassignPO.getPartitionId()));
}
});
if (!topicPartitions.isEmpty()){
return opPartitionService.preferredReplicaElection(jobPO.getClusterPhyId(), topicPartitions);
//无论优先副本选举是否成功都返回成功以保证job的数据更新
if (!topicPartitions.isEmpty()) {
opPartitionService.preferredReplicaElection(jobPO.getClusterPhyId(), topicPartitions);
}
return Result.buildSuc();
@@ -533,8 +535,12 @@ public class ReassignJobServiceImpl implements ReassignJobService {
if (dbSubPO == null) {
// DB中不存在
reassignSubJobDAO.insert(elem);
return;
}
//补全缺失信息
this.completeInfo(elem,dbSubPO);
// 已存在则进行更新
elem.setId(dbSubPO.getId());
reassignSubJobDAO.updateById(elem);
@@ -564,13 +570,10 @@ public class ReassignJobServiceImpl implements ReassignJobService {
long now = System.currentTimeMillis();
boolean existNotFinished = false;
boolean unNeedPreferredReplicaElection = true;
boolean jobSucceed = false;
List<ReassignSubJobPO> subJobPOList = this.getSubJobsByJobId(jobPO.getId());
for (ReassignSubJobPO subJobPO: subJobPOList) {
if (!reassignmentResult.checkPreferredReplicaElectionUnNeed(subJobPO.getReassignBrokerIds(),subJobPO.getOriginalBrokerIds())) {
unNeedPreferredReplicaElection = false;
}
if (!reassignmentResult.checkPartitionFinished(subJobPO.getTopicName(), subJobPO.getPartitionId())) {
existNotFinished = true;
@@ -590,12 +593,13 @@ public class ReassignJobServiceImpl implements ReassignJobService {
// 当前没有分区处于迁移中, 并且没有任务并不处于执行中
ReassignJobPO newJobPO = new ReassignJobPO();
newJobPO.setId(jobPO.getId());
jobSucceed = true;
newJobPO.setStatus(JobStatusEnum.SUCCESS.getStatus());
newJobPO.setFinishedTime(new Date(now));
reassignJobDAO.updateById(newJobPO);
}
return Result.build(unNeedPreferredReplicaElection);
return Result.build(jobSucceed);
}
private Result<List<ReassignSubJobPO>> setJobInRunning(ReassignJobPO jobPO) {
@@ -860,4 +864,25 @@ public class ReassignJobServiceImpl implements ReassignJobService {
return returnRV;
}
private void completeInfo(ReassignSubJobPO newPO, ReassignSubJobPO dbPO) {
if (newPO.getJobId() == null) {
newPO.setJobId(dbPO.getJobId());
}
if (newPO.getTopicName() == null) {
newPO.setTopicName(dbPO.getTopicName());
}
if (newPO.getClusterPhyId() == null) {
newPO.setClusterPhyId(dbPO.getClusterPhyId());
}
if (newPO.getPartitionId() == null) {
newPO.setPartitionId(dbPO.getPartitionId());
}
if (newPO.getOriginalBrokerIds() == null || newPO.getOriginalBrokerIds().isEmpty()) {
newPO.setOriginalBrokerIds(dbPO.getOriginalBrokerIds());
}
if (newPO.getReassignBrokerIds() == null || newPO.getReassignBrokerIds().isEmpty()) {
newPO.setReassignBrokerIds(dbPO.getReassignBrokerIds());
}
}
}

View File

@@ -19,7 +19,7 @@ import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.reassign.ReassignService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
import kafka.admin.ReassignPartitionsCommand;
@@ -42,7 +42,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.SERVICE_OP_REASSIGNMENT;
@Service
public class ReassignServiceImpl extends BaseVersionControlService implements ReassignService {
public class ReassignServiceImpl extends BaseKafkaVersionControlService implements ReassignService {
private static final ILog log = LogFactory.getLog(ReassignServiceImpl.class);
private static final String EXECUTE_TASK = "executeTask";

View File

@@ -11,7 +11,7 @@ import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.reassign.ReassignStrategyService;
import com.xiaojukeji.know.streaming.km.core.service.replica.ReplicaMetricService;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.ReplicaMetricVersionItems;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.ReplicaMetricVersionItems;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

View File

@@ -1,9 +1,7 @@
package com.xiaojukeji.know.streaming.km.core.service.replica;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.MetricDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ReplicationMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import java.util.List;
@@ -14,13 +12,4 @@ public interface ReplicaMetricService {
*/
Result<ReplicationMetrics> collectReplicaMetricsFromKafka(Long clusterId, String topic, Integer partitionId, Integer brokerId, String metric);
Result<ReplicationMetrics> collectReplicaMetricsFromKafka(Long clusterId, String topicName, Integer partitionId, Integer brokerId, List<String> metricNameList);
/**
* 从ES中获取指标
*/
@Deprecated
Result<List<MetricPointVO>> getMetricPointsFromES(Long clusterPhyId, Integer brokerId, String topicName, Integer partitionId, MetricDTO dto);
@Deprecated
Result<ReplicationMetrics> getLatestMetricsFromES(Long clusterPhyId, Integer brokerId, String topicName, Integer partitionId, List<String> metricNames);
}

View File

@@ -2,7 +2,6 @@ package com.xiaojukeji.know.streaming.km.core.service.replica.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.MetricDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ReplicationMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.metric.ReplicationMetricParam;
@@ -10,31 +9,26 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.partition.Partition;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionJmxInfo;
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.ReplicationMetricPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.common.jmx.JmxConnectorWrap;
import com.xiaojukeji.know.streaming.km.common.utils.BeanUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
import com.xiaojukeji.know.streaming.km.core.service.replica.ReplicaMetricService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseMetricService;
import com.xiaojukeji.know.streaming.km.persistence.es.dao.ReplicationMetricESDAO;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaJMXClient;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import javax.management.InstanceNotFoundException;
import javax.management.ObjectName;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus.*;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.METRIC_REPLICATION;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.ReplicaMetricVersionItems.REPLICATION_METRIC_LOG_END_OFFSET;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.ReplicaMetricVersionItems.REPLICATION_METRIC_LOG_START_OFFSET;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.ReplicaMetricVersionItems.REPLICATION_METRIC_LOG_END_OFFSET;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.ReplicaMetricVersionItems.REPLICATION_METRIC_LOG_START_OFFSET;
/**
* @author didi
@@ -54,9 +48,6 @@ public class ReplicaMetricServiceImpl extends BaseMetricService implements Repli
@Autowired
private PartitionService partitionService;
@Autowired
private ReplicationMetricESDAO replicationMetricESDAO;
@Override
protected List<String> listMetricPOFields(){
return BeanUtil.listBeanFields(ReplicationMetricPO.class);
@@ -87,8 +78,8 @@ public class ReplicaMetricServiceImpl extends BaseMetricService implements Repli
Result<ReplicationMetrics> ret = this.collectReplicaMetricsFromKafka(
clusterId,
metrics.getTopic(),
metrics.getBrokerId(),
metrics.getPartitionId(),
metrics.getBrokerId(),
metricName
);
@@ -118,21 +109,6 @@ public class ReplicaMetricServiceImpl extends BaseMetricService implements Repli
}
}
@Override
public Result<List<MetricPointVO>> getMetricPointsFromES(Long clusterPhyId, Integer brokerId, String topicName, Integer partitionId, MetricDTO dto) {
Map<String/*metric*/, MetricPointVO> metricPointMap = replicationMetricESDAO.getReplicationMetricsPoint(clusterPhyId, topicName, brokerId, partitionId,
dto.getMetricsNames(), dto.getAggType(), dto.getStartTime(), dto.getEndTime());
List<MetricPointVO> metricPoints = new ArrayList<>(metricPointMap.values());
return Result.buildSuc(metricPoints);
}
@Override
public Result<ReplicationMetrics> getLatestMetricsFromES(Long clusterPhyId, Integer brokerId, String topicName, Integer partitionId, List<String> metricNames) {
ReplicationMetricPO metricPO = replicationMetricESDAO.getReplicationLatestMetrics(clusterPhyId, brokerId, topicName, partitionId, metricNames);
return Result.buildSuc(ConvertUtil.obj2Obj(metricPO, ReplicationMetrics.class));
}
/**************************************************** private method ****************************************************/
private Result<ReplicationMetrics> doNothing(VersionItemParam param) {
ReplicationMetricParam metricParam = (ReplicationMetricParam)param;
@@ -170,8 +146,8 @@ public class ReplicaMetricServiceImpl extends BaseMetricService implements Repli
Integer brokerId = metricParam.getBrokerId();
Integer partitionId = metricParam.getPartitionId();
Result<ReplicationMetrics> endRet = this.collectReplicaMetricsFromKafka(clusterId, topic, brokerId, partitionId, REPLICATION_METRIC_LOG_END_OFFSET);
Result<ReplicationMetrics> startRet = this.collectReplicaMetricsFromKafka(clusterId, topic, brokerId, partitionId, REPLICATION_METRIC_LOG_START_OFFSET);
Result<ReplicationMetrics> endRet = this.collectReplicaMetricsFromKafka(clusterId, topic, partitionId, brokerId, REPLICATION_METRIC_LOG_END_OFFSET);
Result<ReplicationMetrics> startRet = this.collectReplicaMetricsFromKafka(clusterId, topic, partitionId, brokerId, REPLICATION_METRIC_LOG_START_OFFSET);
ReplicationMetrics replicationMetrics = new ReplicationMetrics(clusterId, topic, brokerId, partitionId);
if(null != endRet && endRet.successful() && null != startRet && startRet.successful()){
@@ -179,6 +155,8 @@ public class ReplicaMetricServiceImpl extends BaseMetricService implements Repli
Float startOffset = startRet.getData().getMetrics().get(REPLICATION_METRIC_LOG_START_OFFSET);
replicationMetrics.putMetric(metric, endOffset - startOffset);
replicationMetrics.putMetric(REPLICATION_METRIC_LOG_END_OFFSET, endOffset);
replicationMetrics.putMetric(REPLICATION_METRIC_LOG_START_OFFSET, startOffset);
}
return Result.buildSuc(replicationMetrics);

View File

@@ -23,7 +23,7 @@ public interface TopicMetricService {
/**
* 优先从本地缓存获取metrics信息
*/
Map<String, TopicMetrics> getLatestMetricsFromCacheFirst(Long clusterPhyId);
Map<String, TopicMetrics> getLatestMetricsFromCache(Long clusterPhyId);
/**
* 获取Topic在具体Broker上最新的一个指标
@@ -37,12 +37,9 @@ public interface TopicMetricService {
/**
* 获取Topic维度最新的一条指标
* @param clusterPhyId
* @param topicNames
* @param metricNameList
* @return
*/
List<TopicMetrics> listTopicLatestMetricsFromES(Long clusterPhyId, List<String> topicNames, List<String> metricNameList);
/**
* 获取Topic维度最新的一条指标
* @param clusterPhyId

View File

@@ -22,6 +22,7 @@ public interface TopicService {
* 从DB获取数据
*/
List<Topic> listTopicsFromDB(Long clusterPhyId);
List<TopicPO> listTopicPOsFromDB(Long clusterPhyId);
Topic getTopic(Long clusterPhyId, String topicName);
List<String> listRecentUpdateTopicNamesFromDB(Long clusterPhyId, Integer time); // 获取集群最近新增Topic的topic名称time单位为秒
@@ -39,6 +40,6 @@ public interface TopicService {
int addNewTopic2DB(TopicPO po);
int deleteTopicInDB(Long clusterPhyId, String topicName);
void batchReplaceMetadata(Long clusterPhyId, List<Topic> presentTopicList);
int batchReplaceConfig(Long clusterPhyId, List<TopicConfig> topicConfigList);
int batchReplaceChangedConfig(Long clusterPhyId, List<TopicConfig> topicConfigList);
Result<Void> updatePartitionNum(Long clusterPhyId, String topicName, Integer partitionNum);
}

View File

@@ -3,6 +3,7 @@ package com.xiaojukeji.know.streaming.km.core.service.topic.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.didiglobal.logi.security.common.dto.oplog.OplogDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.ha.HaActiveStandbyRelation;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicCreateParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicParam;
@@ -12,15 +13,17 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.converter.TopicConverter;
import com.xiaojukeji.know.streaming.km.common.enums.ha.HaResTypeEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.ModuleEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.core.service.ha.HaActiveStandbyRelationService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.topic.OpTopicService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO;
@@ -48,7 +51,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemT
* @author didi
*/
@Service
public class OpTopicServiceImpl extends BaseVersionControlService implements OpTopicService {
public class OpTopicServiceImpl extends BaseKafkaVersionControlService implements OpTopicService {
private static final ILog log = LogFactory.getLog(TopicConfigServiceImpl.class);
private static final String TOPIC_CREATE = "createTopic";
@@ -70,6 +73,9 @@ public class OpTopicServiceImpl extends BaseVersionControlService implements OpT
@Autowired
private KafkaZKDAO kafkaZKDAO;
@Autowired
private HaActiveStandbyRelationService haActiveStandbyRelationService;
@Override
protected VersionItemTypeEnum getVersionItemType() {
return SERVICE_OP_TOPIC;
@@ -138,6 +144,25 @@ public class OpTopicServiceImpl extends BaseVersionControlService implements OpT
// 删除DB中的Topic数据
topicService.deleteTopicInDB(param.getClusterPhyId(), param.getTopicName());
//解除高可用Topic关联
List<HaActiveStandbyRelation> haActiveStandbyRelations = haActiveStandbyRelationService.listByClusterAndType(param.getClusterPhyId(), HaResTypeEnum.MIRROR_TOPIC);
for (HaActiveStandbyRelation activeStandbyRelation : haActiveStandbyRelations) {
if (activeStandbyRelation.getResName().equals(param.getTopicName())) {
try {
KafkaZkClient kafkaZkClient = kafkaAdminZKClient.getClient(activeStandbyRelation.getStandbyClusterPhyId());
Properties haTopics = kafkaZkClient.getEntityConfigs("ha-topics", activeStandbyRelation.getResName());
if (haTopics.size() != 0) {
kafkaZkClient.setOrCreateEntityConfigs("ha-topics", activeStandbyRelation.getResName(), new Properties());
kafkaZkClient.createConfigChangeNotification("ha-topics/" + activeStandbyRelation.getResName());
}
haActiveStandbyRelationService.batchDeleteTopicHA(activeStandbyRelation.getActiveClusterPhyId(), activeStandbyRelation.getStandbyClusterPhyId(), Collections.singletonList(activeStandbyRelation.getResName()));
} catch (Exception e) {
log.error("method=deleteTopic||topicName:{}||errMsg=exception", activeStandbyRelation.getResName(), e);
return Result.buildFailure(e.getMessage());
}
}
}
// 记录操作
OplogDTO oplogDTO = new OplogDTO(operator,
OperationEnum.DELETE.getDesc(),

View File

@@ -10,7 +10,6 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.param.config.KafkaTop
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
import com.xiaojukeji.know.streaming.km.common.constant.kafka.*;
@@ -27,7 +26,7 @@ import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicConfigService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO;
@@ -47,7 +46,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.
@Service
public class TopicConfigServiceImpl extends BaseVersionControlService implements TopicConfigService {
public class TopicConfigServiceImpl extends BaseKafkaVersionControlService implements TopicConfigService {
private static final ILog log = LogFactory.getLog(TopicConfigServiceImpl.class);
private static final String GET_TOPIC_CONFIG = "getTopicConfig";
@@ -98,9 +97,9 @@ public class TopicConfigServiceImpl extends BaseVersionControlService implements
registerVCHandler(GET_TOPIC_CONFIG, V_0_10_0_0, V_0_11_0_0, "getTopicConfigByZKClient", this::getTopicConfigByZKClient);
registerVCHandler(GET_TOPIC_CONFIG, V_0_11_0_0, V_MAX, "getTopicConfigByKafkaClient", this::getTopicConfigByKafkaClient);
registerVCHandler(MODIFY_TOPIC_CONFIG, V_0_10_0_0, V_0_10_2_0, "modifyTopicConfigByZKClientAndNodeVersionV1", this::modifyTopicConfigByZKClientAndNodeVersionV1);
registerVCHandler(MODIFY_TOPIC_CONFIG, V_0_10_2_0, V_0_11_0_3, "modifyTopicConfigByZKClientAndNodeVersionV2", this::modifyTopicConfigByZKClientAndNodeVersionV2);
registerVCHandler(MODIFY_TOPIC_CONFIG, V_0_11_0_3, V_MAX, "modifyTopicConfigByKafkaClient", this::modifyTopicConfigByKafkaClient);
registerVCHandler(MODIFY_TOPIC_CONFIG, V_0_10_0_0, V_0_10_2_0, "modifyTopicConfigByZKClientAndNodeVersionV1", this::modifyTopicConfigByZKClientAndNodeVersionV1);
registerVCHandler(MODIFY_TOPIC_CONFIG, V_0_10_2_0, V_2_3_0, "modifyTopicConfigByZKClientAndNodeVersionV2", this::modifyTopicConfigByZKClientAndNodeVersionV2);
registerVCHandler(MODIFY_TOPIC_CONFIG, V_2_3_0, V_MAX, "modifyTopicConfigByKafkaClient", this::modifyTopicConfigByKafkaClient);
}
@Override
@@ -185,11 +184,9 @@ public class TopicConfigServiceImpl extends BaseVersionControlService implements
private Result<Properties> getTopicConfigByZKClient(Long clusterPhyId, String topicName) {
try {
Topic topic = topicService.getTopic(clusterPhyId, topicName);
KafkaZkClient kafkaZkClient = kafkaAdminZKClient.getClient(clusterPhyId);
Properties properties = kafkaZkClient.getEntityConfigs("topics", topic.getTopicName());
Properties properties = kafkaZkClient.getEntityConfigs("topics", topicName);
for (Object key: properties.keySet()) {
properties.getProperty((String) key);
}
@@ -209,12 +206,10 @@ public class TopicConfigServiceImpl extends BaseVersionControlService implements
try {
AdminClient adminClient = kafkaAdminClient.getClient(param.getClusterPhyId());
Topic metadata = topicService.getTopic(param.getClusterPhyId(), param.getTopicName());
ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, metadata.getTopicName());
ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, param.getTopicName());
DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs(
Arrays.asList(configResource),
buildDescribeConfigsOptions()
Collections.singletonList(configResource),
buildDescribeConfigsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS)
);
Map<ConfigResource, Config> configMap = describeConfigsResult.all().get();

View File

@@ -2,13 +2,10 @@ package com.xiaojukeji.know.streaming.km.core.service.topic.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.google.common.collect.Table;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.MetricDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.MetricsTopicDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.PartitionMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.TopicMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
@@ -21,6 +18,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionJmxInf
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.TopicMetricPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import com.xiaojukeji.know.streaming.km.common.constant.ESConstant;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.enums.AggTypeEnum;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
@@ -30,36 +28,32 @@ import com.xiaojukeji.know.streaming.km.common.utils.BeanUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.cache.CollectedMetricsLocalCache;
import com.xiaojukeji.know.streaming.km.persistence.cache.DataBaseDataLocalCache;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.health.state.HealthStateService;
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionMetricService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicMetricService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseMetricService;
import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache;
import com.xiaojukeji.know.streaming.km.persistence.es.dao.TopicMetricESDAO;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaJMXClient;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
import javax.management.InstanceNotFoundException;
import javax.management.ObjectName;
import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.PartitionMetricVersionItems.PARTITION_METRIC_MESSAGES;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.PartitionMetricVersionItems.PARTITION_METRIC_MESSAGES;
/**
*/
@Service
public class TopicMetricServiceImpl extends BaseMetricService implements TopicMetricService {
private static final ILog LOGGER = LogFactory.getLog( TopicMetricServiceImpl.class);
private static final ILog LOGGER = LogFactory.getLog(TopicMetricServiceImpl.class);
public static final String TOPIC_METHOD_DO_NOTHING = "doNothing";
public static final String TOPIC_METHOD_GET_HEALTH_SCORE = "getMetricHealthScore";
@@ -67,7 +61,7 @@ public class TopicMetricServiceImpl extends BaseMetricService implements TopicMe
public static final String TOPIC_METHOD_GET_METRIC_FROM_KAFKA_BY_TOTAL_PARTITION_OF_BROKER_JMX = "getMetricFromKafkaByTotalPartitionOfBrokerJmx";
public static final String TOPIC_METHOD_GET_MESSAGES = "getMessages";
public static final String TOPIC_METHOD_GET_REPLICAS_COUNT = "getReplicasCount";
public static final String TOPIC_METHOD_GET_TOPIC_MIRROR_FETCH_LAG = "getTopicMirrorFetchLag";
@Autowired
private HealthStateService healthStateService;
@@ -86,18 +80,6 @@ public class TopicMetricServiceImpl extends BaseMetricService implements TopicMe
@Autowired
private TopicMetricESDAO topicMetricESDAO;
private final Cache<Long, Map<String, TopicMetrics>> topicLatestMetricsCache = Caffeine.newBuilder()
.expireAfterWrite(5, TimeUnit.MINUTES)
.maximumSize(200)
.build();
@Scheduled(cron = "0 0/2 * * * ?")
private void flushClusterLatestMetricsCache() {
for (ClusterPhy clusterPhy: LoadedClusterPhyCache.listAll().values()) {
this.updateCacheAndGetMetrics(clusterPhy.getId());
}
}
@Override
protected VersionItemTypeEnum getVersionItemType() {
return VersionItemTypeEnum.METRIC_TOPIC;
@@ -116,6 +98,7 @@ public class TopicMetricServiceImpl extends BaseMetricService implements TopicMe
registerVCHandler( TOPIC_METHOD_GET_METRIC_FROM_KAFKA_BY_TOTAL_PARTITION_OF_BROKER_JMX, this::getMetricFromKafkaByTotalPartitionOfBrokerJmx );
registerVCHandler( TOPIC_METHOD_GET_REPLICAS_COUNT, this::getReplicasCount);
registerVCHandler( TOPIC_METHOD_GET_MESSAGES, this::getMessages);
registerVCHandler( TOPIC_METHOD_GET_TOPIC_MIRROR_FETCH_LAG, this::getTopicMirrorFetchLag);
}
@Override
@@ -152,13 +135,13 @@ public class TopicMetricServiceImpl extends BaseMetricService implements TopicMe
}
@Override
public Map<String, TopicMetrics> getLatestMetricsFromCacheFirst(Long clusterPhyId) {
Map<String, TopicMetrics> metricsMap = topicLatestMetricsCache.getIfPresent(clusterPhyId);
if (metricsMap != null) {
return metricsMap;
public Map<String, TopicMetrics> getLatestMetricsFromCache(Long clusterPhyId) {
Map<String, TopicMetrics> metricsMap = DataBaseDataLocalCache.getTopicMetrics(clusterPhyId);
if (metricsMap == null) {
return new HashMap<>();
}
return this.updateCacheAndGetMetrics(clusterPhyId);
return metricsMap;
}
@Override
@@ -171,9 +154,17 @@ public class TopicMetricServiceImpl extends BaseMetricService implements TopicMe
@Override
public List<TopicMetrics> listTopicLatestMetricsFromES(Long clusterPhyId, List<String> topicNames, List<String> metricNames) {
List<TopicMetricPO> topicMetricPOs = topicMetricESDAO.listTopicLatestMetric(clusterPhyId, topicNames, metricNames);
List<TopicMetricPO> poList = new ArrayList<>();
return ConvertUtil.list2List(topicMetricPOs, TopicMetrics.class);
for (int i = 0; i < topicNames.size(); i += ESConstant.SEARCH_LATEST_TOPIC_METRIC_CNT_PER_REQUEST) {
poList.addAll(topicMetricESDAO.listTopicLatestMetric(
clusterPhyId,
topicNames.subList(i, Math.min(i + ESConstant.SEARCH_LATEST_TOPIC_METRIC_CNT_PER_REQUEST, topicNames.size())),
Collections.emptyList())
);
}
return ConvertUtil.list2List(poList, TopicMetrics.class);
}
@Override
@@ -195,7 +186,7 @@ public class TopicMetricServiceImpl extends BaseMetricService implements TopicMe
Table<String/*metric*/, String/*topics*/, List<MetricPointVO>> retTable;
if(CollectionUtils.isEmpty(topics)) {
//如果 es 中获取不到topN的topic就使用从数据库中获取的topics
List<String> defaultTopics = listTopNTopics(clusterId, topN);
List<String> defaultTopics = this.listTopNTopics(clusterId, topN);
retTable = topicMetricESDAO.listTopicMetricsByTopN(clusterId, defaultTopics, metrics, aggType, topN, startTime, endTime );
}else {
retTable = topicMetricESDAO.listTopicMetricsByTopics(clusterId, metrics, aggType, topics, startTime, endTime);
@@ -308,19 +299,8 @@ public class TopicMetricServiceImpl extends BaseMetricService implements TopicMe
return Result.buildSuc(count);
}
/**************************************************** private method ****************************************************/
private Map<String, TopicMetrics> updateCacheAndGetMetrics(Long clusterPhyId) {
List<String> topicNames = topicService.listTopicsFromDB(clusterPhyId)
.stream().map(Topic::getTopicName).collect(Collectors.toList());
List<TopicMetrics> metrics = listTopicLatestMetricsFromES(clusterPhyId, topicNames, Arrays.asList());
Map<String, TopicMetrics> metricsMap = metrics.stream()
.collect(Collectors.toMap(TopicMetrics::getTopic, Function.identity()));
topicLatestMetricsCache.put(clusterPhyId, metricsMap);
return metricsMap;
}
private List<String> listTopNTopics(Long clusterId, int topN){
@@ -523,4 +503,41 @@ public class TopicMetricServiceImpl extends BaseMetricService implements TopicMe
return aliveBrokerList.stream().filter(elem -> topic.getBrokerIdSet().contains(elem.getBrokerId())).collect(Collectors.toList());
}
private Result<List<TopicMetrics>> getTopicMirrorFetchLag(VersionItemParam param) {
TopicMetricParam topicMetricParam = (TopicMetricParam)param;
String topic = topicMetricParam.getTopic();
Long clusterId = topicMetricParam.getClusterId();
String metric = topicMetricParam.getMetric();
VersionJmxInfo jmxInfo = getJMXInfo(clusterId, metric);
if(null == jmxInfo){return Result.buildFailure(VC_ITEM_JMX_NOT_EXIST);}
if (!DataBaseDataLocalCache.isHaTopic(clusterId, topic)) {
return Result.buildFailure(NOT_EXIST);
}
List<Broker> brokers = this.listAliveBrokersByTopic(clusterId, topic);
if(CollectionUtils.isEmpty(brokers)){return Result.buildFailure(BROKER_NOT_EXIST);}
Float sumLag = 0f;
for (Broker broker : brokers) {
JmxConnectorWrap jmxConnectorWrap = kafkaJMXClient.getClientWithCheck(clusterId, broker.getBrokerId());
try {
String jmxObjectName = String.format(jmxInfo.getJmxObjectName(), topic);
Set<ObjectName> objectNameSet = jmxConnectorWrap.queryNames(new ObjectName(jmxObjectName), null);
for (ObjectName name : objectNameSet) {
Object attribute = jmxConnectorWrap.getAttribute(name, jmxInfo.getJmxAttribute());
sumLag += Float.valueOf(attribute.toString());
}
} catch (Exception e) {
LOGGER.error("method=getTopicMirrorFetchLag||cluster={}||brokerId={}||topic={}||metrics={}||jmx={}||msg={}",
clusterId, broker.getBrokerId(), topic, metric, jmxInfo.getJmxObjectName(), e.getClass().getName());
}
}
TopicMetrics topicMetric = new TopicMetrics(topic, clusterId, true);
topicMetric.putMetric(metric, sumLag);
return Result.buildSuc(Arrays.asList(topicMetric));
}
}

View File

@@ -101,7 +101,15 @@ public class TopicServiceImpl implements TopicService {
@Override
public List<Topic> listTopicsFromDB(Long clusterPhyId) {
return TopicConverter.convert2TopicList(this.getTopicsFromDB(clusterPhyId));
return TopicConverter.convert2TopicList(this.listTopicPOsFromDB(clusterPhyId));
}
@Override
public List<TopicPO> listTopicPOsFromDB(Long clusterPhyId) {
LambdaQueryWrapper<TopicPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(TopicPO::getClusterPhyId, clusterPhyId);
return topicDAO.selectList(lambdaQueryWrapper);
}
@Override
@@ -116,15 +124,16 @@ public class TopicServiceImpl implements TopicService {
@Override
public List<String> listRecentUpdateTopicNamesFromDB(Long clusterPhyId, Integer time) {
Date updateTime = DateUtils.getBeforeSeconds(new Date(), time);
LambdaQueryWrapper<TopicPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(TopicPO::getClusterPhyId, clusterPhyId);
lambdaQueryWrapper.ge(TopicPO::getUpdateTime, updateTime);
List<TopicPO> topicPOS = topicDAO.selectList(lambdaQueryWrapper);
if (topicPOS.isEmpty()){
lambdaQueryWrapper.ge(TopicPO::getClusterPhyId, clusterPhyId);
lambdaQueryWrapper.ge(TopicPO::getUpdateTime, DateUtils.getBeforeSeconds(new Date(), time));
List<TopicPO> poList = topicDAO.selectList(lambdaQueryWrapper);
if (poList.isEmpty()){
return new ArrayList<>();
}
return topicPOS.stream().map(TopicPO::getTopicName).collect(Collectors.toList());
return poList.stream().map(elem -> elem.getTopicName()).collect(Collectors.toList());
}
@Override
@@ -181,39 +190,46 @@ public class TopicServiceImpl implements TopicService {
@Override
public void batchReplaceMetadata(Long clusterPhyId, List<Topic> presentTopicList) {
Map<String, Topic> presentTopicMap = presentTopicList.stream().collect(Collectors.toMap(Topic::getTopicName, Function.identity()));
List<TopicPO> dbTopicPOList = this.getTopicsFromDB(clusterPhyId);
Map<String, TopicPO> inDBMap = this.listTopicPOsFromDB(clusterPhyId).stream().collect(Collectors.toMap(TopicPO::getTopicName, Function.identity()));
// 新旧合并
for (TopicPO dbTopicPO: dbTopicPOList) {
Topic topic = presentTopicMap.remove(dbTopicPO.getTopicName());
if (topic == null) {
topicDAO.deleteById(dbTopicPO.getId());
continue;
}
topicDAO.updateById(TopicConverter.mergeAndOnlyMetadata2NewTopicPO(topic, dbTopicPO));
}
// DB中没有的则插入DB
for (Topic topic: presentTopicMap.values()) {
for (Topic presentTopic: presentTopicList) {
try {
topicDAO.insert(TopicConverter.mergeAndOnlyMetadata2NewTopicPO(topic, null));
TopicPO inDBTopicPO = inDBMap.remove(presentTopic.getTopicName());
TopicPO newTopicPO = TopicConverter.mergeAndOnlyMetadata2NewTopicPO(presentTopic, inDBTopicPO);
if (inDBTopicPO == null) {
topicDAO.insert(newTopicPO);
} else if (!newTopicPO.equals(inDBTopicPO)) {
// 有变化时,则进行更新
if (presentTopic.getUpdateTime() == null) {
// 如果原数据的更新时间为null则修改为当前时间
newTopicPO.setUpdateTime(new Date());
}
topicDAO.updateById(newTopicPO);
}
// 无变化时,直接忽略更新
} catch (DuplicateKeyException dke) {
// 忽略key冲突错误多台KM可能同时做insert所以可能出现key冲突
}
}
// DB中没有的则进行删除
inDBMap.values().forEach(elem -> topicDAO.deleteById(elem.getId()));
}
@Override
public int batchReplaceConfig(Long clusterPhyId, List<TopicConfig> topicConfigList) {
public int batchReplaceChangedConfig(Long clusterPhyId, List<TopicConfig> changedConfigList) {
int effectRow = 0;
for (TopicConfig config: topicConfigList) {
for (TopicConfig config: changedConfigList) {
try {
effectRow += topicDAO.updateConfig(ConvertUtil.obj2Obj(config, TopicPO.class));
effectRow += topicDAO.updateConfigById(ConvertUtil.obj2Obj(config, TopicPO.class));
} catch (Exception e) {
log.error("method=batchReplaceConfig||config={}||errMsg=exception!", config, e);
log.error(
"method=batchReplaceConfig||clusterPhyId={}||topicName={}||retentionMs={}||errMsg=exception!",
config.getClusterPhyId(), config.getTopicName(), config.getRetentionMs(), e
);
}
}
@@ -259,7 +275,7 @@ public class TopicServiceImpl implements TopicService {
return Result.buildSuc(topicList);
} catch (Exception e) {
log.error("class=TopicServiceImpl||method=getTopicsFromAdminClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
log.error("method=getTopicsFromAdminClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
}
@@ -277,7 +293,7 @@ public class TopicServiceImpl implements TopicService {
return Result.buildSuc(topicList);
} catch (Exception e) {
log.error("class=TopicServiceImpl||method=getTopicsFromZKClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
log.error("method=getTopicsFromZKClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
}
@@ -298,11 +314,4 @@ public class TopicServiceImpl implements TopicService {
return topicDAO.selectOne(lambdaQueryWrapper);
}
private List<TopicPO> getTopicsFromDB(Long clusterPhyId) {
LambdaQueryWrapper<TopicPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(TopicPO::getClusterPhyId, clusterPhyId);
return topicDAO.selectList(lambdaQueryWrapper);
}
}

View File

@@ -0,0 +1,36 @@
package com.xiaojukeji.know.streaming.km.core.service.version;
import javax.annotation.PostConstruct;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
/**
* @author wyb
* @date 2022/11/9
*/
public abstract class BaseConnectorMetricService extends BaseConnectorVersionControlService{
private List<String> metricNames = new ArrayList<>();
@PostConstruct
public void init(){
initMetricFieldAndNameList();
initRegisterVCHandler();
}
protected void initMetricFieldAndNameList(){
metricNames = listVersionControlItems().stream().map(v -> v.getName()).collect(Collectors.toList());
}
protected abstract List<String> listMetricPOFields();
protected abstract void initRegisterVCHandler();
/**
* 检查 str 是不是一个 metricName
* @param str
*/
protected boolean isMetricName(String str){
return metricNames.contains(str);
}
}

View File

@@ -0,0 +1,55 @@
package com.xiaojukeji.know.streaming.km.core.service.version;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionConnectJmxInfo;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import org.springframework.beans.factory.annotation.Autowired;
import javax.annotation.Nullable;
/**
* @author wyb
* @date 2022/11/8
*/
public abstract class BaseConnectorVersionControlService extends BaseVersionControlService {
@Autowired
ConnectClusterService connectClusterService;
@Nullable
protected Object doVCHandler(Long connectClusterId, String action, VersionItemParam param) throws VCHandlerNotExistException {
String versionStr = connectClusterService.getClusterVersion(connectClusterId);
LOGGER.debug(
"method=doVCHandler||connectClusterId={}||action={}||type={}||param={}",
connectClusterId, action, getVersionItemType().getMessage(), ConvertUtil.obj2Json(param)
);
Tuple<Object, String> ret = doVCHandler(versionStr, action, param);
LOGGER.debug(
"method=doVCHandler||clusterId={}||action={}||methodName={}||type={}||param={}||ret={}!",
connectClusterId, action, ret != null ?ret.getV2(): "", getVersionItemType().getMessage(), ConvertUtil.obj2Json(param), ConvertUtil.obj2Json(ret)
);
return ret == null? null: ret.getV1();
}
@Nullable
protected String getMethodName(Long connectClusterId, String action) {
String versionStr = connectClusterService.getClusterVersion(connectClusterId);
return getMethodName(versionStr, action);
}
@Nullable
protected VersionConnectJmxInfo getJMXInfo(Long connectClusterId, String action) {
String versionStr = connectClusterService.getClusterVersion(connectClusterId);
return (VersionConnectJmxInfo) getJMXInfo(versionStr, action);
}
}

View File

@@ -0,0 +1,52 @@
package com.xiaojukeji.know.streaming.km.core.service.version;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionJmxInfo;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import org.springframework.beans.factory.annotation.Autowired;
import javax.annotation.Nullable;
/**
* @author didi
*/
public abstract class BaseKafkaVersionControlService extends BaseVersionControlService{
@Autowired
private ClusterPhyService clusterPhyService;
@Nullable
protected Object doVCHandler(Long clusterPhyId, String action, VersionItemParam param) throws VCHandlerNotExistException {
String versionStr = clusterPhyService.getVersionFromCacheFirst(clusterPhyId);
LOGGER.info(
"method=doVCHandler||clusterId={}||action={}||type={}||param={}",
clusterPhyId, action, getVersionItemType().getMessage(), ConvertUtil.obj2Json(param)
);
Tuple<Object, String> ret = doVCHandler(versionStr, action, param);
LOGGER.debug(
"method=doVCHandler||clusterId={}||action={}||methodName={}||type={}||param={}||ret={}!",
clusterPhyId, action, ret != null ?ret.getV2(): "", getVersionItemType().getMessage(), ConvertUtil.obj2Json(param), ConvertUtil.obj2Json(ret)
);
return ret == null? null: ret.getV1();
}
@Nullable
protected String getMethodName(Long clusterPhyId, String action) {
String versionStr = clusterPhyService.getVersionFromCacheFirst(clusterPhyId);
return getMethodName(versionStr, action);
}
@Nullable
protected VersionJmxInfo getJMXInfo(Long clusterPhyId, String action){
String versionStr = clusterPhyService.getVersionFromCacheFirst(clusterPhyId);
return getJMXInfo(versionStr, action);
}
}

View File

@@ -17,7 +17,7 @@ import java.util.stream.Collectors;
/**
* @author didi
*/
public abstract class BaseMetricService extends BaseVersionControlService {
public abstract class BaseMetricService extends BaseKafkaVersionControlService {
private static final ILog LOGGER = LogFactory.getLog(BaseMetricService.class);
private List<String> metricNames = new ArrayList<>();
@@ -38,40 +38,41 @@ public abstract class BaseMetricService extends BaseVersionControlService {
protected abstract void initRegisterVCHandler();
protected <T> List<MetricMultiLinesVO> metricMap2VO(Long clusterId,
Map<String/*metric*/, Map<T, List<MetricPointVO>>> map){
List<MetricMultiLinesVO> multiLinesVOS = new ArrayList<>();
if (map == null || map.isEmpty()) {
protected <T> List<MetricMultiLinesVO> metricMap2VO(Long clusterId, Map<String/*metric*/, Map<T, List<MetricPointVO>>> metricsMap ){
List<MetricMultiLinesVO> lineVOList = new ArrayList<>();
if (metricsMap == null || metricsMap.isEmpty()) {
// 如果为空,则直接返回
return multiLinesVOS;
return lineVOList;
}
for(String metric : map.keySet()){
for(Map.Entry<String/*metric*/, Map<T, List<MetricPointVO>>> entry : metricsMap.entrySet()){
try {
MetricMultiLinesVO multiLinesVO = new MetricMultiLinesVO();
multiLinesVO.setMetricName(metric);
multiLinesVO.setMetricName(entry.getKey());
List<MetricLineVO> metricLines = new ArrayList<>();
Map<T, List<MetricPointVO>> metricPointMap = map.get(metric);
if(null == metricPointMap || metricPointMap.isEmpty()){continue;}
for(Map.Entry<T, List<MetricPointVO>> entry : metricPointMap.entrySet()){
MetricLineVO metricLineVO = new MetricLineVO();
metricLineVO.setName(entry.getKey().toString());
metricLineVO.setMetricName(metric);
metricLineVO.setMetricPoints(entry.getValue());
metricLines.add(metricLineVO);
if(null == entry.getValue() || entry.getValue().isEmpty()){
continue;
}
List<MetricLineVO> metricLines = new ArrayList<>();
entry.getValue().entrySet().forEach(resNameAndMetricsEntry -> {
MetricLineVO metricLineVO = new MetricLineVO();
metricLineVO.setName(resNameAndMetricsEntry.getKey().toString());
metricLineVO.setMetricName(entry.getKey());
metricLineVO.setMetricPoints(resNameAndMetricsEntry.getValue());
metricLines.add(metricLineVO);
});
multiLinesVO.setMetricLines(metricLines);
multiLinesVOS.add(multiLinesVO);
}catch (Exception e){
LOGGER.error("method=metricMap2VO||cluster={}||msg=exception!", clusterId, e);
lineVOList.add(multiLinesVO);
} catch (Exception e){
LOGGER.error("method=metricMap2VO||clusterId={}||msg=exception!", clusterId, e);
}
}
return multiLinesVOS;
return lineVOList;
}
/**

View File

@@ -1,6 +1,5 @@
package com.xiaojukeji.know.streaming.km.core.service.version;
import com.alibaba.fastjson.JSON;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
@@ -10,7 +9,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMethod
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.EnvUtil;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.DependsOn;
import org.springframework.util.CollectionUtils;
@@ -25,7 +24,7 @@ import java.util.function.Function;
*/
@DependsOn("versionControlService")
public abstract class BaseVersionControlService {
protected static final ILog LOGGER = LogFactory.getLog("METRIC_LOGGER");
protected static final ILog LOGGER = LogFactory.getLog(BaseVersionControlService.class);
@Autowired
protected VersionControlService versionControlService;
@@ -57,19 +56,14 @@ public abstract class BaseVersionControlService {
}
@Nullable
protected Object doVCHandler(Long clusterPhyId, String action, VersionItemParam param) throws VCHandlerNotExistException {
String methodName = getMethodName(clusterPhyId, action);
Object ret = versionControlService.doHandler(getVersionItemType(), methodName, param);
protected Tuple<Object, String> doVCHandler(String version, String action, VersionItemParam param) throws VCHandlerNotExistException {
String methodName = getMethodName(version, action);
if(!EnvUtil.isOnline()){
LOGGER.info("method=doVCHandler||clusterId={}||action={}||methodName={}||type={}param={}||ret={}}!",
clusterPhyId, action, methodName, getVersionItemType().getMessage(), JSON.toJSONString(param), JSON.toJSONString(ret));
}
return ret;
return new Tuple<>(versionControlService.doHandler(getVersionItemType(), methodName, param), methodName);
}
protected String getMethodName(Long clusterId, String action) {
VersionControlItem item = versionControlService.getVersionControlItem(clusterId, getVersionItemType().getCode(), action);
protected String getMethodName(String version, String action) {
VersionControlItem item = versionControlService.getVersionControlItem(version, getVersionItemType().getCode(), action);
if (null == item) {
return "";
}
@@ -81,8 +75,8 @@ public abstract class BaseVersionControlService {
return "";
}
protected VersionJmxInfo getJMXInfo(Long clusterId, String action){
VersionControlItem item = versionControlService.getVersionControlItem(clusterId, getVersionItemType().getCode(), action);
protected VersionJmxInfo getJMXInfo(String version, String action){
VersionControlItem item = versionControlService.getVersionControlItem(version, getVersionItemType().getCode(), action);
if (null == item) {
return null;
}

View File

@@ -6,7 +6,6 @@ import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
/**
@@ -45,11 +44,11 @@ public interface VersionControlService {
/**
* 获取对应集群的版本兼容项
* @param clusterId
* @param version
* @param type
* @return
*/
List<VersionControlItem> listVersionControlItem(Long clusterId, Integer type);
List<VersionControlItem> listVersionControlItem(String version, Integer type);
/**
* 获取对应type所有的的版本兼容项
@@ -68,27 +67,18 @@ public interface VersionControlService {
/**
* 查询对应指标的版本兼容项
* @param clusterId
* @param version
* @param type
* @param itemName
* @return
*/
VersionControlItem getVersionControlItem(Long clusterId, Integer type, String itemName);
VersionControlItem getVersionControlItem(String version, Integer type, String itemName);
/**
* 判断 item 是否被 clusterId 对应的版本支持
* @param clusterId
* @param version
* @param item
* @return
*/
boolean isClusterSupport(Long clusterId, VersionControlItem item);
/**
* 查询对应指标的版本兼容项
* @param clusterId
* @param type
* @param itemNames
* @return
*/
Map<String, VersionControlItem> getVersionControlItems(Long clusterId, Integer type, List<String> itemNames);
boolean isClusterSupport(String version, VersionControlItem item);
}

View File

@@ -31,6 +31,11 @@ public class FrontEndControlVersionItems extends BaseMetricVersionMetric {
private static final String FE_SECURITY_ACL_CREATE_RESOURCE_TYPE_TRANSACTIONAL_ID = "FESecurityAclCreateResourceTypeTransactionalId";
private static final String FE_SECURITY_ACL_CREATE_RESOURCE_TYPE_DELEGATION_TOKEN = "FESecurityAclCreateResourceTypeDelegationToken";
private static final String FE_CREATE_TOPIC_CLEANUP_POLICY = "FECreateTopicCleanupPolicy";
private static final String FE_HA_CREATE_MIRROR_TOPIC = "FEHaCreateMirrorTopic";
private static final String FE_HA_DELETE_MIRROR_TOPIC = "FEHaDeleteMirrorTopic";
public FrontEndControlVersionItems(){}
@Override
@@ -74,6 +79,16 @@ public class FrontEndControlVersionItems extends BaseMetricVersionMetric {
itemList.add(buildItem().minVersion(VersionEnum.V_1_1_0).maxVersion(VersionEnum.V_MAX)
.name(FE_SECURITY_ACL_CREATE_RESOURCE_TYPE_DELEGATION_TOKEN).desc("Security-创建ACL-ResourceType-DelegationToken"));
// topic-创建-清理策略(delete和compact)V_0_10_1_0都可以选择
itemList.add(buildItem().minVersion(VersionEnum.V_0_10_1_0).maxVersion(VersionEnum.V_MAX)
.name(FE_CREATE_TOPIC_CLEANUP_POLICY).desc("Topic-创建Topic-Cleanup-Policy"));
// HA-Topic复制
itemList.add(buildItem().minVersion(VersionEnum.V_2_5_0_D_300).maxVersion(VersionEnum.V_2_5_0_D_MAX)
.name(FE_HA_CREATE_MIRROR_TOPIC).desc("HA-创建Topic复制"));
itemList.add(buildItem().minVersion(VersionEnum.V_2_5_0_D_300).maxVersion(VersionEnum.V_2_5_0_D_MAX)
.name(FE_HA_DELETE_MIRROR_TOPIC).desc("HA-取消Topic复制"));
return itemList;
}
}

View File

@@ -7,11 +7,8 @@ import com.xiaojukeji.know.streaming.km.common.component.SpringTool;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.VersionUtil;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.version.VersionControlMetricService;
import com.xiaojukeji.know.streaming.km.core.service.version.VersionControlService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.DependsOn;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
@@ -26,18 +23,24 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.Function;
@Slf4j
@DependsOn("springTool")
@Service("versionControlService")
public class VersionControlServiceImpl implements VersionControlService {
/**
* keyversionItemType
*/
private final Map<Integer, List<VersionControlItem>> versionItemMap = new ConcurrentHashMap<>();
@Autowired
private ClusterPhyService clusterPhyService;
/**
* keyversionItemType
* key1metricName
*/
private final Map<Integer, Map<String, List<VersionControlItem>>> versionItemMetricNameMap = new ConcurrentHashMap<>();
private final Map<Integer, List<VersionControlItem>> versionItemMap = new ConcurrentHashMap<>();
private final Map<Integer, Map<String, List<VersionControlItem>>> versionItemMetricNameMap = new ConcurrentHashMap<>();
private final Map<String, Function<VersionItemParam, Object>> functionMap = new ConcurrentHashMap<>();
/**
* key : VersionItemTypeEnum.code@methodName
*/
private final Map<String, Function<VersionItemParam, Object>> functionMap = new ConcurrentHashMap<>();
@PostConstruct
public void init(){
@@ -51,7 +54,7 @@ public class VersionControlServiceImpl implements VersionControlService {
@Override
public void registerHandler(VersionItemTypeEnum typeEnum, String methodName, Function<VersionItemParam, Object> func){
functionMap.put(typeEnum.getCode() + "@" + methodName , func);
functionMap.put(versionFunctionKey(typeEnum.getCode(), methodName), func);
}
@Override
@@ -76,24 +79,23 @@ public class VersionControlServiceImpl implements VersionControlService {
itemMap.put(action, controlItems);
versionItemMetricNameMap.put(typeCode, itemMap);
functionMap.put(typeCode + "@" + methodName , func);
functionMap.put(versionFunctionKey(typeCode, methodName), func);
}
@Nullable
@Override
public Object doHandler(VersionItemTypeEnum typeEnum, String methodName, VersionItemParam param) throws VCHandlerNotExistException {
Function<VersionItemParam, Object> func = functionMap.get(typeEnum.getCode() + "@" + methodName);
Function<VersionItemParam, Object> func = functionMap.get(versionFunctionKey(typeEnum.getCode(), methodName));
if(null == func) {
throw new VCHandlerNotExistException(typeEnum.getCode() + "@" + methodName);
throw new VCHandlerNotExistException(versionFunctionKey(typeEnum.getCode(), methodName));
}
return func.apply(param);
}
@Override
public List<VersionControlItem> listVersionControlItem(Long clusterId, Integer type) {
String versionStr = clusterPhyService.getVersionFromCacheFirst(clusterId);
long versionLong = VersionUtil.normailze(versionStr);
public List<VersionControlItem> listVersionControlItem(String version, Integer type) {
long versionLong = VersionUtil.normailze(version);
List<VersionControlItem> items = versionItemMap.get(type);
if(CollectionUtils.isEmpty(items)) {
@@ -122,8 +124,8 @@ public class VersionControlServiceImpl implements VersionControlService {
}
@Override
public VersionControlItem getVersionControlItem(Long clusterId, Integer type, String itemName) {
List<VersionControlItem> items = listVersionControlItem(clusterId, type);
public VersionControlItem getVersionControlItem(String version, Integer type, String itemName) {
List<VersionControlItem> items = listVersionControlItem(version, type);
for(VersionControlItem item : items){
if(itemName.equals(item.getName())){
@@ -135,24 +137,13 @@ public class VersionControlServiceImpl implements VersionControlService {
}
@Override
public boolean isClusterSupport(Long clusterId, VersionControlItem item){
String versionStr = clusterPhyService.getVersionFromCacheFirst(clusterId);
long versionLong = VersionUtil.normailze(versionStr);
public boolean isClusterSupport(String version, VersionControlItem item) {
long versionLong = VersionUtil.normailze(version);
return item.getMinVersion() <= versionLong && versionLong < item.getMaxVersion();
}
@Override
public Map<String, VersionControlItem> getVersionControlItems(Long clusterId, Integer type, List<String> itemNames){
Map<String, VersionControlItem> versionControlItemMap = new HashMap<>();
for(String itemName : itemNames){
VersionControlItem item = getVersionControlItem(clusterId, type, itemName);
if(null != item){
versionControlItemMap.put(itemName, item);
}
}
return versionControlItemMap;
/**************************************************** private method ****************************************************/
private String versionFunctionKey(int typeCode, String methodName){
return typeCode + "@" + methodName;
}
}

View File

@@ -1,8 +1,10 @@
package com.xiaojukeji.know.streaming.km.core.service.version.metrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionConnectJmxInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMethodInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionJmxInfo;
import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum;
import com.xiaojukeji.know.streaming.km.core.service.version.VersionControlMetricService;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.V_0_10_0_0;
@@ -58,4 +60,17 @@ public abstract class BaseMetricVersionMetric implements VersionControlMetricSer
jmxExtendInfo.setMethodName(methodName);
return jmxExtendInfo;
}
protected VersionConnectJmxInfo buildConnectJMXMethodExtend(String methodName) {
VersionConnectJmxInfo connectorJmxInfo = new VersionConnectJmxInfo();
connectorJmxInfo.setMethodName(methodName);
return connectorJmxInfo;
}
protected VersionConnectJmxInfo buildConnectJMXMethodExtend(String methodName, ConnectorTypeEnum type) {
VersionConnectJmxInfo connectorJmxInfo = new VersionConnectJmxInfo();
connectorJmxInfo.setMethodName(methodName);
connectorJmxInfo.setType(type);
return connectorJmxInfo;
}
}

View File

@@ -0,0 +1,110 @@
package com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.BaseMetricVersionMetric;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
import java.util.List;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem.CATEGORY_CLUSTER;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem.CATEGORY_PERFORMANCE;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.METRIC_CONNECT_CLUSTER;
import static com.xiaojukeji.know.streaming.km.common.jmx.JmxAttribute.*;
import static com.xiaojukeji.know.streaming.km.common.jmx.JmxName.JMX_CONNECT_WORKER_METRIC;
import static com.xiaojukeji.know.streaming.km.core.service.connect.cluster.impl.ConnectClusterMetricServiceImpl.*;
@Component
public class ConnectClusterMetricVersionItems extends BaseMetricVersionMetric {
public static final String CONNECT_CLUSTER_METRIC_CONNECTOR_COUNT = "ConnectorCount";
public static final String CONNECT_CLUSTER_METRIC_TASK_COUNT = "TaskCount";
public static final String CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_ATTEMPTS_TOTAL = "ConnectorStartupAttemptsTotal";
public static final String CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_FAILURE_PERCENTAGE = "ConnectorStartupFailurePercentage";
public static final String CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_FAILURE_TOTAL = "ConnectorStartupFailureTotal";
public static final String CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_SUCCESS_PERCENTAGE = "ConnectorStartupSuccessPercentage";
public static final String CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_SUCCESS_TOTAL = "ConnectorStartupSuccessTotal";
public static final String CONNECT_CLUSTER_METRIC_TASK_STARTUP_ATTEMPTS_TOTAL = "TaskStartupAttemptsTotal";
public static final String CONNECT_CLUSTER_METRIC_TASK_STARTUP_FAILURE_PERCENTAGE = "TaskStartupFailurePercentage";
public static final String CONNECT_CLUSTER_METRIC_TASK_STARTUP_FAILURE_TOTAL = "TaskStartupFailureTotal";
public static final String CONNECT_CLUSTER_METRIC_TASK_STARTUP_SUCCESS_PERCENTAGE = "TaskStartupSuccessPercentage";
public static final String CONNECT_CLUSTER_METRIC_TASK_STARTUP_SUCCESS_TOTAL = "TaskStartupSuccessTotal";
public static final String CONNECT_CLUSTER_METRIC_COLLECT_COST_TIME = Constant.COLLECT_METRICS_COST_TIME_METRICS_NAME;
@Override
public int versionItemType() {
return METRIC_CONNECT_CLUSTER.getCode();
}
@Override
public List<VersionMetricControlItem> init() {
List<VersionMetricControlItem> items = new ArrayList<>();
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_CONNECTOR_COUNT).unit("").desc("连接器数量").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(TASK_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_TASK_COUNT).unit("").desc("任务数量").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(TASK_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_ATTEMPTS_TOTAL).unit("").desc("连接器启动次数").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(CONNECTOR_STARTUP_ATTEMPTS_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_FAILURE_PERCENTAGE).unit("%").desc("连接器启动失败概率").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_AVG)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(CONNECTOR_STARTUP_FAILURE_PERCENTAGE)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_FAILURE_TOTAL).unit("").desc("连接器启动失败次数").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(CONNECTOR_STARTUP_FAILURE_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_SUCCESS_PERCENTAGE).unit("%").desc("连接器启动成功概率").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_AVG)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(CONNECTOR_STARTUP_SUCCESS_PERCENTAGE)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_SUCCESS_TOTAL).unit("").desc("连接器启动成功次数").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(CONNECTOR_STARTUP_SUCCESS_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_TASK_STARTUP_ATTEMPTS_TOTAL).unit("").desc("任务启动次数").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(TASK_STARTUP_ATTEMPTS_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_TASK_STARTUP_FAILURE_PERCENTAGE).unit("%").desc("任务启动失败概率").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_AVG)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(TASK_STARTUP_FAILURE_PERCENTAGE)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_TASK_STARTUP_FAILURE_TOTAL).unit("").desc("任务启动失败次数").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(TASK_STARTUP_FAILURE_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_TASK_STARTUP_SUCCESS_PERCENTAGE).unit("%").desc("任务启动成功概率").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_AVG)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(TASK_STARTUP_SUCCESS_PERCENTAGE)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_TASK_STARTUP_SUCCESS_TOTAL).unit("").desc("任务启动成功次数").category(CATEGORY_CLUSTER)
.extend(buildConnectJMXMethodExtend(CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_METRIC).jmxAttribute(TASK_STARTUP_SUCCESS_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECT_CLUSTER_METRIC_COLLECT_COST_TIME).unit("").desc("采集connect集群指标耗时").category(CATEGORY_PERFORMANCE)
.extendMethod(CONNECT_CLUSTER_METHOD_DO_NOTHING));
return items;
}
}

View File

@@ -0,0 +1,316 @@
package com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.BaseMetricVersionMetric;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
import java.util.List;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem.*;
import static com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum.SINK;
import static com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum.SOURCE;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.METRIC_CONNECT_CONNECTOR;
import static com.xiaojukeji.know.streaming.km.common.jmx.JmxAttribute.*;
import static com.xiaojukeji.know.streaming.km.common.jmx.JmxName.*;
import static com.xiaojukeji.know.streaming.km.core.service.connect.connector.impl.ConnectorMetricServiceImpl.*;
@Component
public class ConnectorMetricVersionItems extends BaseMetricVersionMetric {
public static final String CONNECTOR_METRIC_COLLECT_COST_TIME = Constant.COLLECT_METRICS_COST_TIME_METRICS_NAME;
public static final String CONNECTOR_METRIC_HEALTH_STATE = "HealthState";
public static final String CONNECTOR_METRIC_CONNECTOR_TOTAL_TASK_COUNT = "ConnectorTotalTaskCount";
public static final String CONNECTOR_METRIC_HEALTH_CHECK_PASSED = "HealthCheckPassed";
public static final String CONNECTOR_METRIC_HEALTH_CHECK_TOTAL = "HealthCheckTotal";
public static final String CONNECTOR_METRIC_CONNECTOR_RUNNING_TASK_COUNT = "ConnectorRunningTaskCount";
public static final String CONNECTOR_METRIC_CONNECTOR_PAUSED_TASK_COUNT = "ConnectorPausedTaskCount";
public static final String CONNECTOR_METRIC_CONNECTOR_FAILED_TASK_COUNT = "ConnectorFailedTaskCount";
public static final String CONNECTOR_METRIC_CONNECTOR_UNASSIGNED_TASK_COUNT = "ConnectorUnassignedTaskCount";
public static final String CONNECTOR_METRIC_BATCH_SIZE_AVG = "BatchSizeAvg";
public static final String CONNECTOR_METRIC_BATCH_SIZE_MAX = "BatchSizeMax";
public static final String CONNECTOR_METRIC_OFFSET_COMMIT_AVG_TIME_MS = "OffsetCommitAvgTimeMs";
public static final String CONNECTOR_METRIC_OFFSET_COMMIT_MAX_TIME_MS = "OffsetCommitMaxTimeMs";
public static final String CONNECTOR_METRIC_OFFSET_COMMIT_FAILURE_PERCENTAGE = "OffsetCommitFailurePercentage";
public static final String CONNECTOR_METRIC_OFFSET_COMMIT_SUCCESS_PERCENTAGE = "OffsetCommitSuccessPercentage";
public static final String CONNECTOR_METRIC_POLL_BATCH_AVG_TIME_MS = "PollBatchAvgTimeMs";
public static final String CONNECTOR_METRIC_POLL_BATCH_MAX_TIME_MS = "PollBatchMaxTimeMs";
public static final String CONNECTOR_METRIC_SOURCE_RECORD_ACTIVE_COUNT = "SourceRecordActiveCount";
public static final String CONNECTOR_METRIC_SOURCE_RECORD_ACTIVE_COUNT_AVG = "SourceRecordActiveCountAvg";
public static final String CONNECTOR_METRIC_SOURCE_RECORD_ACTIVE_COUNT_MAX = "SourceRecordActiveCountMax";
public static final String CONNECTOR_METRIC_SOURCE_RECORD_POLL_RATE = "SourceRecordPollRate";
public static final String CONNECTOR_METRIC_SOURCE_RECORD_POLL_TOTAL = "SourceRecordPollTotal";
public static final String CONNECTOR_METRIC_SOURCE_RECORD_WRITE_RATE = "SourceRecordWriteRate";
public static final String CONNECTOR_METRIC_SOURCE_RECORD_WRITE_TOTAL = "SourceRecordWriteTotal";
public static final String CONNECTOR_METRIC_OFFSET_COMMIT_COMPLETION_RATE = "OffsetCommitCompletionRate";
public static final String CONNECTOR_METRIC_OFFSET_COMMIT_COMPLETION_TOTAL = "OffsetCommitCompletionTotal";
public static final String CONNECTOR_METRIC_OFFSET_COMMIT_SKIP_RATE = "OffsetCommitSkipRate";
public static final String CONNECTOR_METRIC_OFFSET_COMMIT_SKIP_TOTAL = "OffsetCommitSkipTotal";
public static final String CONNECTOR_METRIC_PARTITION_COUNT = "PartitionCount";
public static final String CONNECTOR_METRIC_PUT_BATCH_AVG_TIME_MS = "PutBatchAvgTimeMs";
public static final String CONNECTOR_METRIC_PUT_BATCH_MAX_TIME_MS = "PutBatchMaxTimeMs";
public static final String CONNECTOR_METRIC_SINK_RECORD_ACTIVE_COUNT = "SinkRecordActiveCount";
public static final String CONNECTOR_METRIC_SINK_RECORD_ACTIVE_COUNT_AVG = "SinkRecordActiveCountAvg";
public static final String CONNECTOR_METRIC_SINK_RECORD_ACTIVE_COUNT_MAX = "SinkRecordActiveCountMax";
public static final String CONNECTOR_METRIC_SINK_RECORD_LAG_MAX = "SinkRecordLagMax";
public static final String CONNECTOR_METRIC_SINK_RECORD_READ_RATE = "SinkRecordReadRate";
public static final String CONNECTOR_METRIC_SINK_RECORD_READ_TOTAL = "SinkRecordReadTotal";
public static final String CONNECTOR_METRIC_SINK_RECORD_SEND_RATE = "SinkRecordSendRate";
public static final String CONNECTOR_METRIC_SINK_RECORD_SEND_TOTAL = "SinkRecordSendTotal";
public static final String CONNECTOR_METRIC_DEADLETTERQUEUE_PRODUCE_FAILURES = "DeadletterqueueProduceFailures";
public static final String CONNECTOR_METRIC_DEADLETTERQUEUE_PRODUCE_REQUESTS = "DeadletterqueueProduceRequests";
public static final String CONNECTOR_METRIC_LAST_ERROR_TIMESTAMP = "LastErrorTimestamp";
public static final String CONNECTOR_METRIC_TOTAL_ERRORS_LOGGED = "TotalErrorsLogged";
public static final String CONNECTOR_METRIC_TOTAL_RECORD_ERRORS = "TotalRecordErrors";
public static final String CONNECTOR_METRIC_TOTAL_RECORD_FAILURES = "TotalRecordFailures";
public static final String CONNECTOR_METRIC_TOTAL_RECORDS_SKIPPED = "TotalRecordsSkipped";
public static final String CONNECTOR_METRIC_TOTAL_RETRIES = "TotalRetries";
@Override
public int versionItemType() {
return METRIC_CONNECT_CONNECTOR.getCode();
}
@Override
public List<VersionMetricControlItem> init() {
List<VersionMetricControlItem> items = new ArrayList<>();
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_COLLECT_COST_TIME).unit("").desc("采集connector指标的耗时").category(CATEGORY_PERFORMANCE)
.extendMethod(CONNECTOR_METHOD_DO_NOTHING));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_HEALTH_STATE).unit("0:好 1:中 2:差 3:宕机").desc("健康状态(0:好 1:中 2:差 3:宕机)").category(CATEGORY_HEALTH)
.extendMethod(CONNECTOR_METHOD_GET_METRIC_HEALTH_SCORE));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_HEALTH_CHECK_PASSED).unit("").desc("健康项检查通过数").category(CATEGORY_HEALTH)
.extendMethod(CONNECTOR_METHOD_GET_METRIC_HEALTH_SCORE));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_HEALTH_CHECK_TOTAL).unit("").desc("健康项检查总数").category(CATEGORY_HEALTH)
.extendMethod(CONNECTOR_METHOD_GET_METRIC_HEALTH_SCORE));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_CONNECTOR_TOTAL_TASK_COUNT).unit("").desc("所有任务数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECT_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_CONNECTOR_METRIC).jmxAttribute(CONNECTOR_TOTAL_TASK_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_CONNECTOR_RUNNING_TASK_COUNT).unit("").desc("运行状态的任务数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECT_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_CONNECTOR_METRIC).jmxAttribute(CONNECTOR_RUNNING_TASK_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_CONNECTOR_PAUSED_TASK_COUNT).unit("").desc("暂停状态的任务数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECT_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_CONNECTOR_METRIC).jmxAttribute(CONNECTOR_PAUSED_TASK_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_CONNECTOR_FAILED_TASK_COUNT).unit("").desc("失败状态的任务数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECT_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_CONNECTOR_METRIC).jmxAttribute(CONNECTOR_FAILED_TASK_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_CONNECTOR_UNASSIGNED_TASK_COUNT).unit("").desc("未被分配的任务数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECT_WORKER_METRIC_SUM)
.jmxObjectName(JMX_CONNECT_WORKER_CONNECTOR_METRIC).jmxAttribute(CONNECTOR_UNASSIGNED_TASK_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_BATCH_SIZE_AVG).unit("").desc("批次数量平均值").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG)
.jmxObjectName(JMX_CONNECTOR_TASK_CONNECTOR_METRIC).jmxAttribute(BATCH_SIZE_AVG)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_BATCH_SIZE_MAX).unit("").desc("批次数量最大值").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_MAX)
.jmxObjectName(JMX_CONNECTOR_TASK_CONNECTOR_METRIC).jmxAttribute(BATCH_SIZE_MAX)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_OFFSET_COMMIT_AVG_TIME_MS).unit("ms").desc("位点提交平均耗时").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG)
.jmxObjectName(JMX_CONNECTOR_TASK_CONNECTOR_METRIC).jmxAttribute(OFFSET_COMMIT_AVG_TIME_MS)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_OFFSET_COMMIT_MAX_TIME_MS).unit("ms").desc("位点提交最大耗时").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_MAX)
.jmxObjectName(JMX_CONNECTOR_TASK_CONNECTOR_METRIC).jmxAttribute(OFFSET_COMMIT_MAX_TIME_MS)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_OFFSET_COMMIT_FAILURE_PERCENTAGE).unit("%").desc("位点提交失败概率").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG)
.jmxObjectName(JMX_CONNECTOR_TASK_CONNECTOR_METRIC).jmxAttribute(OFFSET_COMMIT_FAILURE_PERCENTAGE)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_OFFSET_COMMIT_SUCCESS_PERCENTAGE).unit("%").desc("位点提交成功概率").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG)
.jmxObjectName(JMX_CONNECTOR_TASK_CONNECTOR_METRIC).jmxAttribute(OFFSET_COMMIT_SUCCESS_PERCENTAGE)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_POLL_BATCH_AVG_TIME_MS).unit("ms").desc("POLL平均耗时").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG, SOURCE)
.jmxObjectName(JMX_CONNECTOR_SOURCE_TASK_METRICS).jmxAttribute(POLL_BATCH_AVG_TIME_MS)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_POLL_BATCH_MAX_TIME_MS).unit("ms").desc("POLL最大耗时").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_MAX, SOURCE)
.jmxObjectName(JMX_CONNECTOR_SOURCE_TASK_METRICS).jmxAttribute(POLL_BATCH_MAX_TIME_MS)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SOURCE_RECORD_ACTIVE_COUNT).unit("").desc("pending状态消息数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SOURCE)
.jmxObjectName(JMX_CONNECTOR_SOURCE_TASK_METRICS).jmxAttribute(SOURCE_RECORD_ACTIVE_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SOURCE_RECORD_ACTIVE_COUNT_AVG).unit("").desc("pending状态平均消息数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SOURCE)
.jmxObjectName(JMX_CONNECTOR_SOURCE_TASK_METRICS).jmxAttribute(SOURCE_RECORD_ACTIVE_COUNT_AVG)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SOURCE_RECORD_ACTIVE_COUNT_MAX).unit("").desc("pending状态最大消息数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SOURCE)
.jmxObjectName(JMX_CONNECTOR_SOURCE_TASK_METRICS).jmxAttribute(SOURCE_RECORD_ACTIVE_COUNT_MAX)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SOURCE_RECORD_POLL_RATE).unit(BYTE_PER_SEC).desc("消息读取速率").category(CATEGORY_FLOW)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SOURCE)
.jmxObjectName(JMX_CONNECTOR_SOURCE_TASK_METRICS).jmxAttribute(SOURCE_RECORD_POLL_RATE)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SOURCE_RECORD_POLL_TOTAL).unit("").desc("消息读取总数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SOURCE)
.jmxObjectName(JMX_CONNECTOR_SOURCE_TASK_METRICS).jmxAttribute(SOURCE_RECORD_POLL_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SOURCE_RECORD_WRITE_RATE).unit(BYTE_PER_SEC).desc("消息写入速率").category(CATEGORY_FLOW)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SOURCE)
.jmxObjectName(JMX_CONNECTOR_SOURCE_TASK_METRICS).jmxAttribute(SOURCE_RECORD_WRITE_RATE)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SOURCE_RECORD_WRITE_TOTAL).unit("").desc("消息写入总数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SOURCE)
.jmxObjectName(JMX_CONNECTOR_SOURCE_TASK_METRICS).jmxAttribute(SOURCE_RECORD_WRITE_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_OFFSET_COMMIT_COMPLETION_RATE).unit(BYTE_PER_SEC).desc("成功的位点提交速率").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(OFFSET_COMMIT_COMPLETION_RATE)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_OFFSET_COMMIT_COMPLETION_TOTAL).unit("").desc("成功的位点提交总数").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(OFFSET_COMMIT_COMPLETION_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_OFFSET_COMMIT_SKIP_RATE).unit("").desc("被跳过的位点提交速率").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(OFFSET_COMMIT_SKIP_RATE)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_OFFSET_COMMIT_SKIP_TOTAL).unit("").desc("被跳过的位点提交总数").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(OFFSET_COMMIT_SKIP_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_PARTITION_COUNT).unit("").desc("被分配到的分区数").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(PARTITION_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_PUT_BATCH_AVG_TIME_MS).unit("ms").desc("PUT平均耗时").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(PUT_BATCH_AVG_TIME_MS)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_PUT_BATCH_MAX_TIME_MS).unit("ms").desc("PUT最大耗时").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_MAX, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(PUT_BATCH_MAX_TIME_MS)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SINK_RECORD_ACTIVE_COUNT).unit("").desc("pending状态消息数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(SINK_RECORD_ACTIVE_COUNT)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SINK_RECORD_ACTIVE_COUNT_AVG).unit("").desc("pending状态平均消息数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(SINK_RECORD_ACTIVE_COUNT_AVG)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SINK_RECORD_ACTIVE_COUNT_MAX).unit("").desc("pending状态最大消息数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(SINK_RECORD_ACTIVE_COUNT_MAX)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SINK_RECORD_READ_RATE).unit(BYTE_PER_SEC).desc("消息读取速率").category(CATEGORY_FLOW)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(SINK_RECORD_READ_RATE)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SINK_RECORD_READ_TOTAL).unit("").desc("消息读取总数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(SINK_RECORD_READ_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SINK_RECORD_SEND_RATE).unit(BYTE_PER_SEC).desc("消息写入速率").category(CATEGORY_FLOW)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(SINK_RECORD_SEND_RATE)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_SINK_RECORD_SEND_TOTAL).unit("").desc("消息写入总数量").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, SINK)
.jmxObjectName(JMX_CONNECTOR_SINK_TASK_METRICS).jmxAttribute(SINK_RECORD_SEND_TOTAL)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_DEADLETTERQUEUE_PRODUCE_FAILURES).unit("").desc("死信队列写入失败数").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM)
.jmxObjectName(JMX_CONNECTOR_TASK_ERROR_METRICS).jmxAttribute(DEADLETTERQUEUE_PRODUCE_FAILURES)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_DEADLETTERQUEUE_PRODUCE_REQUESTS).unit("").desc("死信队列写入数").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM)
.jmxObjectName(JMX_CONNECTOR_TASK_ERROR_METRICS).jmxAttribute(DEADLETTERQUEUE_PRODUCE_REQUESTS)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_LAST_ERROR_TIMESTAMP).unit("").desc("最后一次错误时间").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_MAX)
.jmxObjectName(JMX_CONNECTOR_TASK_ERROR_METRICS).jmxAttribute(LAST_ERROR_TIMESTAMP)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_TOTAL_ERRORS_LOGGED).unit("").desc("记录日志的错误消息数").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM)
.jmxObjectName(JMX_CONNECTOR_TASK_ERROR_METRICS).jmxAttribute(TOTAL_ERRORS_LOGGED)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_TOTAL_RECORD_ERRORS).unit("").desc("消息处理错误的次数(异常消息数量)").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM)
.jmxObjectName(JMX_CONNECTOR_TASK_ERROR_METRICS).jmxAttribute(TOTAL_RECORD_ERRORS)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_TOTAL_RECORD_FAILURES).unit("").desc("消息处理失败的次数每次retry处理失败都会+1)").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM)
.jmxObjectName(JMX_CONNECTOR_TASK_ERROR_METRICS).jmxAttribute(TOTAL_RECORD_FAILURES)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_TOTAL_RECORDS_SKIPPED).unit("").desc("因为失败导致跳过(未处理)的消息数").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM)
.jmxObjectName(JMX_CONNECTOR_TASK_ERROR_METRICS).jmxAttribute(TOTAL_RECORDS_SKIPPED)));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_TOTAL_RETRIES).unit("").desc("失败重试的次数").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM)
.jmxObjectName(JMX_CONNECTOR_TASK_ERROR_METRICS).jmxAttribute(TOTAL_RETRIES)));
return items;
}
}

View File

@@ -0,0 +1,125 @@
package com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.BaseMetricVersionMetric;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
import java.util.List;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem.*;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.METRIC_CONNECT_MIRROR_MAKER;
import static com.xiaojukeji.know.streaming.km.common.jmx.JmxAttribute.*;
import static com.xiaojukeji.know.streaming.km.common.jmx.JmxName.JMX_MIRROR_MAKER_SOURCE;
import static com.xiaojukeji.know.streaming.km.core.service.connect.mm2.impl.MirrorMakerMetricServiceImpl.*;
@Component
public class MirrorMakerMetricVersionItems extends BaseMetricVersionMetric {
public static final String MIRROR_MAKER_METRIC_COLLECT_COST_TIME = Constant.COLLECT_METRICS_COST_TIME_METRICS_NAME;
public static final String MIRROR_MAKER_METRIC_HEALTH_STATE = "HealthState";
public static final String MIRROR_MAKER_METRIC_HEALTH_CHECK_PASSED = "HealthCheckPassed";
public static final String MIRROR_MAKER_METRIC_HEALTH_CHECK_TOTAL = "HealthCheckTotal";
public static final String MIRROR_MAKER_METRIC_BYTE_COUNT = "ByteCount";
public static final String MIRROR_MAKER_METRIC_BYTE_RATE = "ByteRate";
public static final String MIRROR_MAKER_METRIC_RECORD_AGE_MS = "RecordAgeMs";
public static final String MIRROR_MAKER_METRIC_RECORD_AGE_MS_AVG = "RecordAgeMsAvg";
public static final String MIRROR_MAKER_METRIC_RECORD_AGE_MS_MAX = "RecordAgeMsMax";
public static final String MIRROR_MAKER_METRIC_RECORD_AGE_MS_MIN = "RecordAgeMsMin";
public static final String MIRROR_MAKER_METRIC_RECORD_COUNT = "RecordCount";
public static final String MIRROR_MAKER_METRIC_RECORD_RATE = "RecordRate";
public static final String MIRROR_MAKER_METRIC_REPLICATION_LATENCY_MS = "ReplicationLatencyMs";
public static final String MIRROR_MAKER_METRIC_REPLICATION_LATENCY_MS_AVG = "ReplicationLatencyMsAvg";
public static final String MIRROR_MAKER_METRIC_REPLICATION_LATENCY_MS_MAX = "ReplicationLatencyMsMax";
public static final String MIRROR_MAKER_METRIC_REPLICATION_LATENCY_MS_MIN = "ReplicationLatencyMsMin";
@Override
public int versionItemType() {
return METRIC_CONNECT_MIRROR_MAKER.getCode();
}
@Override
public List<VersionMetricControlItem> init() {
List<VersionMetricControlItem> items = new ArrayList<>();
// HealthScore 指标
items.add(buildAllVersionsItem()
.name(MIRROR_MAKER_METRIC_HEALTH_STATE).unit("0:好 1:中 2:差 3:宕机").desc("健康状态(0:好 1:中 2:差 3:宕机)").category(CATEGORY_HEALTH)
.extendMethod(MIRROR_MAKER_METHOD_GET_HEALTH_SCORE));
items.add(buildAllVersionsItem()
.name(MIRROR_MAKER_METRIC_HEALTH_CHECK_PASSED).unit("").desc("健康项检查通过数").category(CATEGORY_HEALTH)
.extendMethod(MIRROR_MAKER_METHOD_GET_HEALTH_SCORE));
items.add(buildAllVersionsItem()
.name(MIRROR_MAKER_METRIC_HEALTH_CHECK_TOTAL).unit("").desc("健康项检查总数").category(CATEGORY_HEALTH)
.extendMethod(MIRROR_MAKER_METHOD_GET_HEALTH_SCORE));
items.add(buildAllVersionsItem()
.name(MIRROR_MAKER_METRIC_COLLECT_COST_TIME).unit("").desc("采集mirrorMaker指标的耗时").category(CATEGORY_PERFORMANCE)
.extendMethod(MIRROR_MAKER_METHOD_DO_NOTHING));
items.add(buildAllVersionsItem()
.name(MIRROR_MAKER_METRIC_BYTE_COUNT).unit("byte").desc("消息复制流量大小").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_SUM)
.jmxObjectName(JMX_MIRROR_MAKER_SOURCE).jmxAttribute(BYTE_COUNT)));
items.add(buildAllVersionsItem()
.name(MIRROR_MAKER_METRIC_BYTE_RATE).unit(BYTE_PER_SEC).desc("复制流量速率").category(CATEGORY_FLOW)
.extend(buildConnectJMXMethodExtend(MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_SUM)
.jmxObjectName(JMX_MIRROR_MAKER_SOURCE).jmxAttribute(BYTE_RATE)));
items.add(buildAllVersionsItem()
.name(MIRROR_MAKER_METRIC_RECORD_AGE_MS).unit("ms").desc("消息获取时年龄").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_AVG)
.jmxObjectName(JMX_MIRROR_MAKER_SOURCE).jmxAttribute(RECORD_AGE_MS)));
items.add(buildAllVersionsItem()
.name(MIRROR_MAKER_METRIC_RECORD_AGE_MS_AVG).unit("ms").desc("消息获取时平均年龄").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_AVG)
.jmxObjectName(JMX_MIRROR_MAKER_SOURCE).jmxAttribute(RECORD_AGE_MS_AVG)));
items.add(buildAllVersionsItem()
.name(MIRROR_MAKER_METRIC_RECORD_AGE_MS_MAX).unit("ms").desc("消息获取时最大年龄").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_MAX)
.jmxObjectName(JMX_MIRROR_MAKER_SOURCE).jmxAttribute(RECORD_AGE_MS_MAX)));
items.add(buildAllVersionsItem()
.name(MIRROR_MAKER_METRIC_RECORD_AGE_MS_MIN).unit("ms").desc("消息获取时最小年龄").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_MIN)
.jmxObjectName(JMX_MIRROR_MAKER_SOURCE).jmxAttribute(RECORD_AGE_MS_MIN)));
items.add(buildAllVersionsItem()
.name(MIRROR_MAKER_METRIC_RECORD_COUNT).unit("").desc("消息复制条数").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_SUM)
.jmxObjectName(JMX_MIRROR_MAKER_SOURCE).jmxAttribute(RECORD_COUNT)));
items.add(buildAllVersionsItem()
.name(MIRROR_MAKER_METRIC_RECORD_RATE).unit("条/s").desc("消息复制速率").category(CATEGORY_FLOW)
.extend(buildConnectJMXMethodExtend(MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_SUM)
.jmxObjectName(JMX_MIRROR_MAKER_SOURCE).jmxAttribute(RECORD_RATE)));
items.add(buildAllVersionsItem()
.name(MIRROR_MAKER_METRIC_REPLICATION_LATENCY_MS).unit("ms").desc("消息复制延迟时间").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_AVG)
.jmxObjectName(JMX_MIRROR_MAKER_SOURCE).jmxAttribute(REPLICATION_LATENCY_MS)));
items.add(buildAllVersionsItem()
.name(MIRROR_MAKER_METRIC_REPLICATION_LATENCY_MS_AVG).unit("ms").desc("消息复制平均延迟时间").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_AVG)
.jmxObjectName(JMX_MIRROR_MAKER_SOURCE).jmxAttribute(REPLICATION_LATENCY_MS_AVG)));
items.add(buildAllVersionsItem()
.name(MIRROR_MAKER_METRIC_REPLICATION_LATENCY_MS_MAX).unit("ms").desc("消息复制最大延迟时间").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_MAX)
.jmxObjectName(JMX_MIRROR_MAKER_SOURCE).jmxAttribute(REPLICATION_LATENCY_MS_MAX)));
items.add(buildAllVersionsItem()
.name(MIRROR_MAKER_METRIC_REPLICATION_LATENCY_MS_MIN).unit("ms").desc("消息复制最小延迟时间").category(CATEGORY_PERFORMANCE)
.extend(buildConnectJMXMethodExtend(MIRROR_MAKER_METHOD_GET_TOPIC_PARTITION_METRIC_LIST_MIN)
.jmxObjectName(JMX_MIRROR_MAKER_SOURCE).jmxAttribute(REPLICATION_LATENCY_MS_MIN)));
return items;
}
}

View File

@@ -1,13 +1,15 @@
package com.xiaojukeji.know.streaming.km.core.service.version.metrics;
package com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.BaseMetricVersionMetric;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
import java.util.List;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem.*;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.*;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.METRIC_BROKER;
import static com.xiaojukeji.know.streaming.km.common.jmx.JmxAttribute.*;
import static com.xiaojukeji.know.streaming.km.common.jmx.JmxName.*;
@@ -185,9 +187,12 @@ public class BrokerMetricVersionItems extends BaseMetricVersionMetric {
.jmxObjectName( JMX_SERVER_PARTITIONS ).jmxAttribute(VALUE)));
// LogSize 指标
items.add(buildAllVersionsItem()
items.add(buildItem().minVersion(V_0_10_0_0).maxVersion(V_1_0_0)
.name(BROKER_METRIC_LOG_SIZE).unit("byte").desc("Broker上的消息容量大小").category(CATEGORY_PARTITION)
.extendMethod(BROKER_METHOD_GET_LOG_SIZE));
.extendMethod(BROKER_METHOD_GET_LOG_SIZE_FROM_JMX));
items.add(buildItem().minVersion(V_1_0_0).maxVersion(V_MAX)
.name(BROKER_METRIC_LOG_SIZE).unit("byte").desc("Broker上的消息容量大小").category(CATEGORY_PARTITION)
.extendMethod(BROKER_METHOD_GET_LOG_SIZE_FROM_CLIENT));
// ActiveControllerCount 指标
items.add(buildAllVersionsItem(BROKER_METRIC_ACTIVE_CONTROLLER_COUNT, "").desc("Broker是否为controller").category(CATEGORY_PERFORMANCE)

View File

@@ -1,9 +1,10 @@
package com.xiaojukeji.know.streaming.km.core.service.version.metrics;
package com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.BaseMetricVersionMetric;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
@@ -56,6 +57,22 @@ public class ClusterMetricVersionItems extends BaseMetricVersionMetric {
public static final String CLUSTER_METRIC_HEALTH_CHECK_PASSED_CLUSTER = "HealthCheckPassed_Cluster";
public static final String CLUSTER_METRIC_HEALTH_CHECK_TOTAL_CLUSTER = "HealthCheckTotal_Cluster";
/**
* connector健康指标
*/
public static final String CLUSTER_METRIC_HEALTH_STATE_CONNECTOR = "HealthState_Connector";
public static final String CLUSTER_METRIC_HEALTH_CHECK_PASSED_CONNECTOR = "HealthCheckPassed_Connector";
public static final String CLUSTER_METRIC_HEALTH_CHECK_TOTAL_CONNECTOR = "HealthCheckTotal_Connector";
/**
* mm2健康指标
*/
public static final String CLUSTER_METRIC_HEALTH_STATE_MIRROR_MAKER = "HealthState_MirrorMaker";
public static final String CLUSTER_METRIC_HEALTH_CHECK_PASSED_MIRROR_MAKER = "HealthCheckPassed_MirrorMaker";
public static final String CLUSTER_METRIC_HEALTH_CHECK_TOTAL_MIRROR_MAKER = "HealthCheckTotal_MirrorMaker";
public static final String CLUSTER_METRIC_TOTAL_REQ_QUEUE_SIZE = "TotalRequestQueueSize";
public static final String CLUSTER_METRIC_TOTAL_RES_QUEUE_SIZE = "TotalResponseQueueSize";
public static final String CLUSTER_METRIC_EVENT_QUEUE_SIZE = "EventQueueSize";
@@ -76,7 +93,6 @@ public class ClusterMetricVersionItems extends BaseMetricVersionMetric {
public static final String CLUSTER_METRIC_PARTITION_MIN_ISR_E = "PartitionMinISR_E";
public static final String CLUSTER_METRIC_PARTITION_URP = "PartitionURP";
public static final String CLUSTER_METRIC_MESSAGES_IN = "MessagesIn";
public static final String CLUSTER_METRIC_MESSAGES = "Messages";
public static final String CLUSTER_METRIC_LEADER_MESSAGES = "LeaderMessages";
public static final String CLUSTER_METRIC_BYTES_IN = "BytesIn";
public static final String CLUSTER_METRIC_BYTES_IN_5_MIN = "BytesIn_min_5";
@@ -302,11 +318,6 @@ public class ClusterMetricVersionItems extends BaseMetricVersionMetric {
.name(CLUSTER_METRIC_LEADER_MESSAGES).unit("").desc("集群中leader总的消息条数").category(CATEGORY_CLUSTER)
.extend( buildMethodExtend( CLUSTER_METHOD_GET_MESSAGE_SIZE )));
// Messages 指标
itemList.add( buildAllVersionsItem()
.name(CLUSTER_METRIC_MESSAGES).unit("").desc("集群总的消息条数").category(CATEGORY_CLUSTER)
.extend( buildMethodExtend( CLUSTER_METHOD_GET_MESSAGE_SIZE )));
// BytesInPerSec 指标
itemList.add( buildAllVersionsItem()
.name(CLUSTER_METRIC_BYTES_IN).unit(BYTE_PER_SEC).desc("集群的每秒写入字节数").category(CATEGORY_CLUSTER)

View File

@@ -1,7 +1,8 @@
package com.xiaojukeji.know.streaming.km.core.service.version.metrics;
package com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMethodInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.BaseMetricVersionMetric;
import org.springframework.stereotype.Component;
import java.util.ArrayList;

View File

@@ -1,6 +1,7 @@
package com.xiaojukeji.know.streaming.km.core.service.version.metrics;
package com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.BaseMetricVersionMetric;
import org.springframework.stereotype.Component;
import java.util.ArrayList;

View File

@@ -1,7 +1,8 @@
package com.xiaojukeji.know.streaming.km.core.service.version.metrics;
package com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.BaseMetricVersionMetric;
import org.springframework.stereotype.Component;
import java.util.ArrayList;

View File

@@ -1,7 +1,9 @@
package com.xiaojukeji.know.streaming.km.core.service.version.metrics;
package com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.BaseMetricVersionMetric;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
@@ -35,6 +37,8 @@ public class TopicMetricVersionItems extends BaseMetricVersionMetric {
public static final String TOPIC_METRIC_BYTES_OUT_MIN_15 = "BytesOut_min_15";
public static final String TOPIC_METRIC_LOG_SIZE = "LogSize";
public static final String TOPIC_METRIC_UNDER_REPLICA_PARTITIONS = "PartitionURP";
public static final String TOPIC_METRIC_MIRROR_FETCH_LAG = "MirrorFetchLag";
public static final String TOPIC_METRIC_COLLECT_COST_TIME = Constant.COLLECT_METRICS_COST_TIME_METRICS_NAME;
@Override
@@ -147,6 +151,11 @@ public class TopicMetricVersionItems extends BaseMetricVersionMetric {
.name(TOPIC_METRIC_COLLECT_COST_TIME).unit("").desc("采集Topic指标的耗时").category(CATEGORY_PERFORMANCE)
.extendMethod(TOPIC_METHOD_DO_NOTHING));
itemList.add(buildItem().minVersion(VersionEnum.V_2_5_0_D_300).maxVersion(VersionEnum.V_2_5_0_D_MAX)
.name(TOPIC_METRIC_MIRROR_FETCH_LAG).unit("").desc("Topic复制延迟消息数").category(CATEGORY_FLOW)
.extend(buildJMXMethodExtend(TOPIC_METHOD_GET_TOPIC_MIRROR_FETCH_LAG)
.jmxObjectName(JMX_SERVER_TOPIC_MIRROR).jmxAttribute(VALUE)));
return itemList;
}
}

View File

@@ -1,7 +1,8 @@
package com.xiaojukeji.know.streaming.km.core.service.version.metrics;
package com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.BaseMetricVersionMetric;
import org.springframework.stereotype.Component;
import java.util.ArrayList;

View File

@@ -43,10 +43,10 @@ public class ZnodeServiceImpl implements ZnodeService {
try {
children = kafkaZKDAO.getChildren(clusterPhyId, path, false);
} catch (NotExistException e) {
LOGGER.error("class=ZnodeServiceImpl||method=listZnodeChildren||clusterPhyId={}||errMsg={}", clusterPhyId, "create ZK client create failed");
LOGGER.error("method=listZnodeChildren||clusterPhyId={}||errMsg={}", clusterPhyId, "create ZK client create failed");
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, "ZK客户端创建失败");
} catch (Exception e) {
LOGGER.error("class=ZnodeServiceImpl||method=listZnodeChildren||clusterPhyId={}||errMsg={}", clusterPhyId, "ZK operate failed");
LOGGER.error("method=listZnodeChildren||clusterPhyId={}||errMsg={}", clusterPhyId, "ZK operate failed");
return Result.buildFromRSAndMsg(ResultStatus.ZK_OPERATE_FAILED, "ZK操作失败");
}
@@ -69,10 +69,10 @@ public class ZnodeServiceImpl implements ZnodeService {
try {
dataAndStat = kafkaZKDAO.getDataAndStat(clusterPhyId, path);
} catch (NotExistException e) {
LOGGER.error("class=ZnodeServiceImpl||method=getZnode||clusterPhyId={}||errMsg={}", clusterPhyId, "create ZK client create failed");
LOGGER.error("method=getZnode||clusterPhyId={}||errMsg={}", clusterPhyId, "create ZK client create failed");
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, "ZK客户端创建失败");
} catch (Exception e) {
LOGGER.error("class=ZnodeServiceImpl||method=getZnode||clusterPhyId={}||errMsg={}", clusterPhyId, "ZK operate failed");
LOGGER.error("method=getZnode||clusterPhyId={}||errMsg={}", clusterPhyId, "ZK operate failed");
return Result.buildFromRSAndMsg(ResultStatus.ZK_OPERATE_FAILED, "ZK操作失败");
}

View File

@@ -44,7 +44,7 @@ import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus.*;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus.VC_JMX_CONNECT_ERROR;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.ZookeeperMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.ZookeeperMetricVersionItems.*;
@Service
@@ -140,7 +140,7 @@ public class ZookeeperMetricServiceImpl extends BaseMetricService implements Zoo
metrics.putMetric(ret.getData().getMetrics());
} catch (Exception e){
LOGGER.error(
"class=ZookeeperMetricServiceImpl||method=collectMetricsFromZookeeper||clusterPhyId={}||metricName={}||errMsg=exception!",
"method=collectMetricsFromZookeeper||clusterPhyId={}||metricName={}||errMsg=exception!",
clusterPhyId, metricName, e
);
}
@@ -206,8 +206,8 @@ public class ZookeeperMetricServiceImpl extends BaseMetricService implements Zoo
ZookeeperMetrics metrics = new ZookeeperMetrics(param.getClusterPhyId());
metrics.putMetric(ZOOKEEPER_METRIC_AVG_REQUEST_LATENCY, cmdData.getZkAvgLatency());
metrics.putMetric(ZOOKEEPER_METRIC_MIN_REQUEST_LATENCY, cmdData.getZkMinLatency().floatValue());
metrics.putMetric(ZOOKEEPER_METRIC_MAX_REQUEST_LATENCY, cmdData.getZkMaxLatency().floatValue());
metrics.putMetric(ZOOKEEPER_METRIC_MIN_REQUEST_LATENCY, cmdData.getZkMinLatency());
metrics.putMetric(ZOOKEEPER_METRIC_MAX_REQUEST_LATENCY, cmdData.getZkMaxLatency());
metrics.putMetric(ZOOKEEPER_METRIC_OUTSTANDING_REQUESTS, cmdData.getZkOutstandingRequests().floatValue());
metrics.putMetric(ZOOKEEPER_METRIC_NODE_COUNT, cmdData.getZkZnodeCount().floatValue());
metrics.putMetric(ZOOKEEPER_METRIC_NUM_ALIVE_CONNECTIONS, cmdData.getZkNumAliveConnections().floatValue());
@@ -255,8 +255,8 @@ public class ZookeeperMetricServiceImpl extends BaseMetricService implements Zoo
ZookeeperMetrics metrics = new ZookeeperMetrics(param.getClusterPhyId());
metrics.putMetric(ZOOKEEPER_METRIC_AVG_REQUEST_LATENCY, cmdData.getZkAvgLatency());
metrics.putMetric(ZOOKEEPER_METRIC_MIN_REQUEST_LATENCY, cmdData.getZkMinLatency().floatValue());
metrics.putMetric(ZOOKEEPER_METRIC_MAX_REQUEST_LATENCY, cmdData.getZkMaxLatency().floatValue());
metrics.putMetric(ZOOKEEPER_METRIC_MIN_REQUEST_LATENCY, cmdData.getZkMinLatency());
metrics.putMetric(ZOOKEEPER_METRIC_MAX_REQUEST_LATENCY, cmdData.getZkMaxLatency());
metrics.putMetric(ZOOKEEPER_METRIC_OUTSTANDING_REQUESTS, cmdData.getZkOutstandingRequests().floatValue());
metrics.putMetric(ZOOKEEPER_METRIC_NODE_COUNT, cmdData.getZkZnodeCount().floatValue());
metrics.putMetric(ZOOKEEPER_METRIC_WATCH_COUNT, cmdData.getZkWatchCount().floatValue());

View File

@@ -41,7 +41,7 @@ public class ZookeeperServiceImpl implements ZookeeperService {
addressList = ZookeeperUtils.connectStringParser(zookeeperAddress);
} catch (Exception e) {
LOGGER.error(
"class=ZookeeperServiceImpl||method=listFromZookeeperCluster||clusterPhyId={}||zookeeperAddress={}||errMsg=exception!",
"method=listFromZookeeperCluster||clusterPhyId={}||zookeeperAddress={}||errMsg=exception!",
clusterPhyId, zookeeperAddress, e
);
@@ -87,7 +87,7 @@ public class ZookeeperServiceImpl implements ZookeeperService {
zookeeperDAO.updateById(newInfo);
}
} catch (Exception e) {
LOGGER.error("class=ZookeeperServiceImpl||method=batchReplaceDataInDB||clusterPhyId={}||newInfo={}||errMsg=exception", clusterPhyId, newInfo, e);
LOGGER.error("method=batchReplaceDataInDB||clusterPhyId={}||newInfo={}||errMsg=exception", clusterPhyId, newInfo, e);
}
}
@@ -96,7 +96,7 @@ public class ZookeeperServiceImpl implements ZookeeperService {
try {
zookeeperDAO.deleteById(entry.getValue().getId());
} catch (Exception e) {
LOGGER.error("class=ZookeeperServiceImpl||method=batchReplaceDataInDB||clusterPhyId={}||expiredInfo={}||errMsg=exception", clusterPhyId, entry.getValue(), e);
LOGGER.error("method=batchReplaceDataInDB||clusterPhyId={}||expiredInfo={}||errMsg=exception", clusterPhyId, entry.getValue(), e);
}
});
}

Some files were not shown because too many files have changed in this diff Show More