mirror of
https://github.com/didi/KnowStreaming.git
synced 2025-12-24 03:42:07 +08:00
同步代码
This commit is contained in:
@@ -1,28 +1,18 @@
|
||||
package com.xiaojukeji.know.streaming.km.collector.metric;
|
||||
|
||||
import com.github.benmanes.caffeine.cache.Cache;
|
||||
import com.github.benmanes.caffeine.cache.Caffeine;
|
||||
import com.xiaojukeji.know.streaming.km.collector.service.CollectThreadPoolService;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BaseMetrics;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.event.metric.BaseMetricEvent;
|
||||
import com.xiaojukeji.know.streaming.km.common.component.SpringTool;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.FutureWaitUtil;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* @author didi
|
||||
*/
|
||||
public abstract class AbstractMetricCollector<T> {
|
||||
private static final double SIZE_THRESHOLD = 0.8;
|
||||
|
||||
private final Cache<String, BaseMetrics> latestMetricsMap = Caffeine.newBuilder()
|
||||
.expireAfterWrite(3, TimeUnit.MINUTES)
|
||||
.build();
|
||||
|
||||
public abstract void collectMetrics(ClusterPhy clusterPhy);
|
||||
|
||||
public abstract VersionItemTypeEnum collectorType();
|
||||
@@ -30,24 +20,6 @@ public abstract class AbstractMetricCollector<T> {
|
||||
@Autowired
|
||||
private CollectThreadPoolService collectThreadPoolService;
|
||||
|
||||
/**
|
||||
* 如果最近3分钟内的指标有异常,则采用之前的值
|
||||
*/
|
||||
protected void doOptimizeMetric(BaseMetrics metricPO){
|
||||
BaseMetrics latestMetrics = latestMetricsMap.getIfPresent(metricPO.unique());
|
||||
if (latestMetrics == null) {
|
||||
latestMetrics = metricPO;
|
||||
}
|
||||
|
||||
if(metricPO.getMetrics().size() < latestMetrics.getMetrics().size() * SIZE_THRESHOLD) {
|
||||
// 异常采集时,则替换metrics
|
||||
metricPO.putMetric(latestMetrics.getMetrics());
|
||||
} else {
|
||||
// 正常采集时,则替换cache
|
||||
latestMetricsMap.put(metricPO.unique(), metricPO);
|
||||
}
|
||||
}
|
||||
|
||||
protected FutureWaitUtil<Void> getFutureUtilByClusterPhyId(Long clusterPhyId) {
|
||||
return collectThreadPoolService.selectSuitableFutureUtil(clusterPhyId * 1000L + this.collectorType().getCode());
|
||||
}
|
||||
|
||||
@@ -103,8 +103,6 @@ public class BrokerMetricCollector extends AbstractMetricCollector<BrokerMetrics
|
||||
}
|
||||
}
|
||||
|
||||
doOptimizeMetric(metrics);
|
||||
|
||||
// 记录采集性能
|
||||
metrics.putMetric(Constant.COLLECT_METRICS_COST_TIME_METRICS_NAME, (System.currentTimeMillis() - startTime) / 1000.0f);
|
||||
}
|
||||
|
||||
@@ -73,7 +73,6 @@ public class ClusterMetricCollector extends AbstractMetricCollector<ClusterMetri
|
||||
}
|
||||
|
||||
future.waitExecute(30000);
|
||||
doOptimizeMetric(metrics);
|
||||
|
||||
metrics.putMetric(Constant.COLLECT_METRICS_COST_TIME_METRICS_NAME, (System.currentTimeMillis() - startTime) / 1000.0f);
|
||||
|
||||
|
||||
@@ -129,7 +129,6 @@ public class GroupMetricCollector extends AbstractMetricCollector<List<GroupMetr
|
||||
}
|
||||
}
|
||||
|
||||
doOptimizeMetric(groupMetrics);
|
||||
groupMetricsList.add(groupMetrics);
|
||||
groupMetricsList.addAll(tpGroupPOMap.values());
|
||||
|
||||
|
||||
@@ -39,44 +39,46 @@ public class MetricESSender implements ApplicationListener<BaseMetricEvent> {
|
||||
|
||||
@Override
|
||||
public void onApplicationEvent(BaseMetricEvent event) {
|
||||
if(event instanceof BrokerMetricEvent){
|
||||
if(event instanceof BrokerMetricEvent) {
|
||||
BrokerMetricEvent brokerMetricEvent = (BrokerMetricEvent)event;
|
||||
send2es(KafkaMetricIndexEnum.BROKER_INFO,
|
||||
ConvertUtil.list2List(brokerMetricEvent.getBrokerMetrics(), BrokerMetricPO.class));
|
||||
ConvertUtil.list2List(brokerMetricEvent.getBrokerMetrics(), BrokerMetricPO.class)
|
||||
);
|
||||
|
||||
}else if(event instanceof ClusterMetricEvent){
|
||||
} else if(event instanceof ClusterMetricEvent) {
|
||||
ClusterMetricEvent clusterMetricEvent = (ClusterMetricEvent)event;
|
||||
send2es(KafkaMetricIndexEnum.CLUSTER_INFO,
|
||||
ConvertUtil.list2List(clusterMetricEvent.getClusterMetrics(), ClusterMetricPO.class));
|
||||
ConvertUtil.list2List(clusterMetricEvent.getClusterMetrics(), ClusterMetricPO.class)
|
||||
);
|
||||
|
||||
}else if(event instanceof TopicMetricEvent){
|
||||
} else if(event instanceof TopicMetricEvent) {
|
||||
TopicMetricEvent topicMetricEvent = (TopicMetricEvent)event;
|
||||
send2es(KafkaMetricIndexEnum.TOPIC_INFO,
|
||||
ConvertUtil.list2List(topicMetricEvent.getTopicMetrics(), TopicMetricPO.class));
|
||||
ConvertUtil.list2List(topicMetricEvent.getTopicMetrics(), TopicMetricPO.class)
|
||||
);
|
||||
|
||||
}else if(event instanceof PartitionMetricEvent){
|
||||
} else if(event instanceof PartitionMetricEvent) {
|
||||
PartitionMetricEvent partitionMetricEvent = (PartitionMetricEvent)event;
|
||||
send2es(KafkaMetricIndexEnum.PARTITION_INFO,
|
||||
ConvertUtil.list2List(partitionMetricEvent.getPartitionMetrics(), PartitionMetricPO.class));
|
||||
ConvertUtil.list2List(partitionMetricEvent.getPartitionMetrics(), PartitionMetricPO.class)
|
||||
);
|
||||
|
||||
}else if(event instanceof GroupMetricEvent){
|
||||
} else if(event instanceof GroupMetricEvent) {
|
||||
GroupMetricEvent groupMetricEvent = (GroupMetricEvent)event;
|
||||
send2es(KafkaMetricIndexEnum.GROUP_INFO,
|
||||
ConvertUtil.list2List(groupMetricEvent.getGroupMetrics(), GroupMetricPO.class));
|
||||
ConvertUtil.list2List(groupMetricEvent.getGroupMetrics(), GroupMetricPO.class)
|
||||
);
|
||||
|
||||
}else if(event instanceof ReplicaMetricEvent){
|
||||
} else if(event instanceof ReplicaMetricEvent) {
|
||||
ReplicaMetricEvent replicaMetricEvent = (ReplicaMetricEvent)event;
|
||||
send2es(KafkaMetricIndexEnum.REPLICATION_INFO,
|
||||
ConvertUtil.list2List(replicaMetricEvent.getReplicationMetrics(), ReplicationMetricPO.class));
|
||||
ConvertUtil.list2List(replicaMetricEvent.getReplicationMetrics(), ReplicationMetricPO.class)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 根据不同监控维度来发送
|
||||
*
|
||||
* @param stats
|
||||
* @param statsList
|
||||
* @return
|
||||
*/
|
||||
private boolean send2es(KafkaMetricIndexEnum stats, List<? extends BaseESPO> statsList){
|
||||
if (CollectionUtils.isEmpty(statsList)) {
|
||||
@@ -98,8 +100,9 @@ public class MetricESSender implements ApplicationListener<BaseMetricEvent> {
|
||||
int num = (size) % THRESHOLD == 0 ? (size / THRESHOLD) : (size / THRESHOLD + 1);
|
||||
|
||||
if (size < THRESHOLD) {
|
||||
esExecutor.execute(() ->
|
||||
baseMetricESDao.batchInsertStats(statsList));
|
||||
esExecutor.execute(
|
||||
() -> baseMetricESDao.batchInsertStats(statsList)
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -107,8 +110,9 @@ public class MetricESSender implements ApplicationListener<BaseMetricEvent> {
|
||||
int end = (i * THRESHOLD) > size ? size : (i * THRESHOLD);
|
||||
int start = (i - 1) * THRESHOLD;
|
||||
|
||||
esExecutor.execute(() ->
|
||||
baseMetricESDao.batchInsertStats(statsList.subList(start, end)));
|
||||
esExecutor.execute(
|
||||
() -> baseMetricESDao.batchInsertStats(statsList.subList(start, end))
|
||||
);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
@@ -124,7 +124,5 @@ public class PartitionMetricCollector extends AbstractMetricCollector<PartitionM
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
metricsMap.entrySet().forEach(elem -> doOptimizeMetric(elem.getValue()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -116,8 +116,6 @@ public class ReplicaMetricCollector extends AbstractMetricCollector<ReplicationM
|
||||
}
|
||||
}
|
||||
|
||||
doOptimizeMetric(metrics);
|
||||
|
||||
// 记录采集性能
|
||||
metrics.putMetric(Constant.COLLECT_METRICS_COST_TIME_METRICS_NAME, (System.currentTimeMillis() - startTime) / 1000.0f);
|
||||
|
||||
|
||||
@@ -131,8 +131,6 @@ public class TopicMetricCollector extends AbstractMetricCollector<List<TopicMetr
|
||||
}
|
||||
}
|
||||
|
||||
doOptimizeMetric(aggMetrics);
|
||||
|
||||
// 记录采集性能
|
||||
aggMetrics.putMetric(Constant.COLLECT_METRICS_COST_TIME_METRICS_NAME, (System.currentTimeMillis() - startTime) / 1000.0f);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user