v2.1版本更新

This commit is contained in:
zengqiao
2020-12-19 00:27:16 +08:00
parent 3fea5c9c8c
commit 49280a8617
75 changed files with 1098 additions and 148 deletions

View File

@@ -1,4 +1,4 @@
package com.xiaojukeji.kafka.manager.task.dispatch.metrics.store;
package com.xiaojukeji.kafka.manager.task.dispatch.metrics.collect;
import com.xiaojukeji.kafka.manager.common.constant.KafkaMetricsCollections;
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
@@ -16,12 +16,12 @@ import org.springframework.beans.factory.annotation.Autowired;
import java.util.*;
/**
* Topic社区指标存储
* Topic社区指标收集
* @author zengqiao
* @date 20/7/21
*/
@CustomScheduled(name = "storeCommunityTopicMetrics", cron = "31 0/1 * * * ?", threadNum = 5)
public class StoreCommunityTopicMetrics extends AbstractScheduledTask<ClusterDO> {
@CustomScheduled(name = "collectAndPublishCommunityTopicMetrics", cron = "31 0/1 * * * ?", threadNum = 5)
public class CollectAndPublishCommunityTopicMetrics extends AbstractScheduledTask<ClusterDO> {
@Autowired
private JmxService jmxService;

View File

@@ -1,5 +1,6 @@
package com.xiaojukeji.kafka.manager.task.dispatch.metrics.delete;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.dao.*;
import com.xiaojukeji.kafka.manager.service.utils.ConfigUtils;
@@ -19,27 +20,30 @@ import java.util.List;
* @author zengqiao
* @date 20/1/8
*/
@CustomScheduled(name = "deleteMetrics", cron = "0 0/1 * * * ?", threadNum = 1)
@CustomScheduled(name = "deleteMetrics", cron = "0 0/2 * * * ?", threadNum = 1)
public class DeleteMetrics extends AbstractScheduledTask<EmptyEntry> {
private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
@Autowired
private TopicMetricsDao topicMetricsDao;
private ConfigUtils configUtils;
@Autowired
private TopicAppMetricsDao topicAppMetricsDao;
private TopicMetricsDao topicMetricsDao;
@Autowired
private TopicRequestMetricsDao topicRequestMetricsDao;
private TopicAppMetricsDao topicAppMetricsDao;
@Autowired
private BrokerMetricsDao brokerMetricsDao;
private TopicRequestMetricsDao topicRequestMetricsDao;
@Autowired
private ClusterMetricsDao clusterMetricsDao;
private BrokerMetricsDao brokerMetricsDao;
@Autowired
private ConfigUtils configUtils;
private ClusterMetricsDao clusterMetricsDao;
@Autowired
private TopicThrottledMetricsDao topicThrottledMetricsDao;
@Override
public List<EmptyEntry> listAllTasks() {
@@ -50,8 +54,8 @@ public class DeleteMetrics extends AbstractScheduledTask<EmptyEntry> {
@Override
public void processTask(EmptyEntry entryEntry) {
if (!"dev".equals(configUtils.getKafkaManagerEnv())) {
// 非预发&线上环境直接跳过
if (Constant.INVALID_CODE.equals(configUtils.getMaxMetricsSaveDays())) {
// 无需数据删除
return;
}
@@ -75,6 +79,12 @@ public class DeleteMetrics extends AbstractScheduledTask<EmptyEntry> {
LOGGER.error("delete topic request metrics failed.", e);
}
try {
deleteThrottledMetrics();
} catch (Exception e) {
LOGGER.error("delete topic throttled metrics failed.", e);
}
try {
deleteBrokerMetrics();
} catch (Exception e) {
@@ -90,27 +100,32 @@ public class DeleteMetrics extends AbstractScheduledTask<EmptyEntry> {
}
private void deleteTopicMetrics() {
Date endTime = new Date(System.currentTimeMillis() - 3 * 24 * 60 * 60 * 1000);
Date endTime = new Date(System.currentTimeMillis() - configUtils.getMaxMetricsSaveDays() * 24 * 60 * 60 * 1000);
topicMetricsDao.deleteBeforeTime(endTime);
}
private void deleteTopicAppMetrics() {
Date endTime = new Date(System.currentTimeMillis() - 3 * 24 * 60 * 60 * 1000);
Date endTime = new Date(System.currentTimeMillis() - configUtils.getMaxMetricsSaveDays() * 24 * 60 * 60 * 1000);
topicAppMetricsDao.deleteBeforeTime(endTime);
}
private void deleteTopicRequestMetrics() {
Date endTime = new Date(System.currentTimeMillis() - 3 * 24 * 60 * 60 * 1000);
Date endTime = new Date(System.currentTimeMillis() - configUtils.getMaxMetricsSaveDays() * 24 * 60 * 60 * 1000);
topicRequestMetricsDao.deleteBeforeTime(endTime);
}
private void deleteThrottledMetrics() {
Date endTime = new Date(System.currentTimeMillis() - configUtils.getMaxMetricsSaveDays() * 24 * 60 * 60 * 1000);
topicThrottledMetricsDao.deleteBeforeTime(endTime);
}
private void deleteBrokerMetrics() {
Date endTime = new Date(System.currentTimeMillis() - 7 * 24 * 60 * 60 * 1000);
Date endTime = new Date(System.currentTimeMillis() - configUtils.getMaxMetricsSaveDays() * 24 * 60 * 60 * 1000);
brokerMetricsDao.deleteBeforeTime(endTime);
}
private void deleteClusterMetrics() {
Date endTime = new Date(System.currentTimeMillis() - 7 * 24 * 60 * 60 * 1000);
Date endTime = new Date(System.currentTimeMillis() - configUtils.getMaxMetricsSaveDays() * 24 * 60 * 60 * 1000);
clusterMetricsDao.deleteBeforeTime(endTime);
}
}

View File

@@ -23,6 +23,7 @@ import com.xiaojukeji.kafka.manager.task.component.CustomScheduled;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import java.util.ArrayList;
import java.util.List;
@@ -34,6 +35,7 @@ import java.util.Map;
* @date 20/5/7
*/
@CustomScheduled(name = "storeBrokerMetrics", cron = "21 0/1 * * * ?", threadNum = 2)
@ConditionalOnProperty(prefix = "custom.store-metrics-task.community", name = "broker-metrics-enabled", havingValue = "true", matchIfMissing = true)
public class StoreBrokerMetrics extends AbstractScheduledTask<ClusterDO> {
private static final Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);

View File

@@ -16,6 +16,7 @@ import com.xiaojukeji.kafka.manager.task.component.CustomScheduled;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.stereotype.Component;
import java.util.*;
@@ -25,6 +26,7 @@ import java.util.*;
* @date 20/7/21
*/
@CustomScheduled(name = "storeDiDiAppTopicMetrics", cron = "41 0/1 * * * ?", threadNum = 5)
@ConditionalOnProperty(prefix = "custom.store-metrics-task.didi", name = "app-topic-metrics-enabled", havingValue = "true", matchIfMissing = true)
public class StoreDiDiAppTopicMetrics extends AbstractScheduledTask<ClusterDO> {
private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);

View File

@@ -16,6 +16,7 @@ import com.xiaojukeji.kafka.manager.task.component.CustomScheduled;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import java.util.*;
@@ -24,6 +25,7 @@ import java.util.*;
* @date 20/7/21
*/
@CustomScheduled(name = "storeDiDiTopicRequestTimeMetrics", cron = "51 0/1 * * * ?", threadNum = 5)
@ConditionalOnProperty(prefix = "custom.store-metrics-task.didi", name = "topic-request-time-metrics-enabled", havingValue = "true", matchIfMissing = true)
public class StoreDiDiTopicRequestTimeMetrics extends AbstractScheduledTask<ClusterDO> {
private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);

View File

@@ -69,6 +69,7 @@ public class AutoHandleTopicOrder extends AbstractScheduledTask<EmptyEntry> {
return ;
}
Integer maxPassedOrderNumPerTask = configService.getAutoPassedTopicApplyOrderNumPerTask();
for (OrderDO orderDO: doList) {
if (!OrderTypeEnum.APPLY_TOPIC.getCode().equals(orderDO.getType())) {
continue;
@@ -77,7 +78,11 @@ public class AutoHandleTopicOrder extends AbstractScheduledTask<EmptyEntry> {
if (!handleApplyTopicOrder(orderDO)) {
continue;
}
return;
maxPassedOrderNumPerTask -= 1;
if (maxPassedOrderNumPerTask <= 0) {
return;
}
LOGGER.info("class=AutoHandleTopicOrder||method=processTask||msg=passed id:{}", orderDO.getId());
} catch (Exception e) {
LOGGER.error("handle apply topic order failed, orderDO:{}.", orderDO, e);
}

View File

@@ -1,4 +1,4 @@
package com.xiaojukeji.kafka.manager.task.dispatch.metrics.store;
package com.xiaojukeji.kafka.manager.task.listener;
import com.xiaojukeji.kafka.manager.monitor.common.entry.bizenum.MonitorMetricNameEnum;
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;

View File

@@ -11,6 +11,7 @@ import com.xiaojukeji.kafka.manager.service.utils.MetricsConvertUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.ApplicationListener;
import org.springframework.stereotype.Component;
@@ -22,6 +23,7 @@ import java.util.List;
* @date 20/9/1
*/
@Component("storeCommunityTopicMetrics2DB")
@ConditionalOnProperty(prefix = "custom.store-metrics-task.community", name = "topic-metrics-enabled", havingValue = "true", matchIfMissing = true)
public class StoreCommunityTopicMetrics2DB implements ApplicationListener<TopicMetricsCollectedEvent> {
private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);

View File

@@ -11,6 +11,7 @@ import com.xiaojukeji.kafka.manager.task.common.TopicThrottledMetricsCollectedEv
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.ApplicationListener;
import org.springframework.stereotype.Component;
@@ -21,6 +22,7 @@ import java.util.*;
* @date 20/9/24
*/
@Component("storeTopicThrottledMetrics2DB")
@ConditionalOnProperty(prefix = "custom.store-metrics-task.didi", name = "topic-throttled-metrics", havingValue = "true", matchIfMissing = true)
public class StoreTopicThrottledMetrics2DB implements ApplicationListener<TopicThrottledMetricsCollectedEvent> {
private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);

View File

@@ -31,9 +31,6 @@ public class FlushClusterMetadata {
Set<Long> oldClusterIdSet = physicalClusterMetadataManager.getClusterIdSet();
for (ClusterDO clusterDO: doList) {
newClusterIdSet.add(clusterDO.getId());
if (oldClusterIdSet.contains(clusterDO.getId())) {
continue;
}
// 添加集群
physicalClusterMetadataManager.addNew(clusterDO);