diff --git a/kafka-manager-console/src/container/header/index.tsx b/kafka-manager-console/src/container/header/index.tsx
index 0205e1be..94e992e2 100644
--- a/kafka-manager-console/src/container/header/index.tsx
+++ b/kafka-manager-console/src/container/header/index.tsx
@@ -145,7 +145,7 @@ export const Header = observer((props: IHeader) => {
diff --git a/kafka-manager-console/src/store/curve-info.ts b/kafka-manager-console/src/store/curve-info.ts
index 9531c849..fc4c57a9 100644
--- a/kafka-manager-console/src/store/curve-info.ts
+++ b/kafka-manager-console/src/store/curve-info.ts
@@ -1,6 +1,6 @@
import { observable, action } from 'mobx';
import moment = require('moment');
-import { EChartOption } from 'echarts/lib/echarts';
+import { EChartsOption } from 'echarts';
import { ICurve } from 'container/common-curve/config';
import { curveKeys, PERIOD_RADIO_MAP } from 'container/admin/data-curve/config';
import { timeFormat } from 'constants/strategy';
@@ -13,7 +13,7 @@ class CurveInfo {
public timeRange: [moment.Moment, moment.Moment] = PERIOD_RADIO_MAP.get(this.periodKey).dateRange;
@observable
- public curveData: { [key: string]: EChartOption } = {};
+ public curveData: { [key: string]: EChartsOption } = {};
@observable
public curveLoading: { [key: string]: boolean } = {};
@@ -25,7 +25,7 @@ class CurveInfo {
public currentOperator: string;
@action.bound
- public setCurveData(key: curveKeys | string, data: EChartOption) {
+ public setCurveData(key: curveKeys | string, data: EChartsOption) {
this.curveData[key] = data;
}
@@ -59,7 +59,7 @@ class CurveInfo {
public getCommonCurveData = (
options: ICurve,
- parser: (option: ICurve, data: any[]) => EChartOption,
+ parser: (option: ICurve, data: any[]) => EChartsOption,
reload?: boolean) => {
const { path } = options;
this.setCurveData(path, null);
diff --git a/kafka-manager-console/webpack.config.js b/kafka-manager-console/webpack.config.js
index a07d9990..d6d12fa8 100644
--- a/kafka-manager-console/webpack.config.js
+++ b/kafka-manager-console/webpack.config.js
@@ -122,11 +122,11 @@ module.exports = {
},
},
devServer: {
- contentBase: outPath,
+ // contentBase: outPath,
host: '127.0.0.1',
port: 1025,
hot: true,
- disableHostCheck: true,
+ // disableHostCheck: true,
historyApiFallback: true,
proxy: {
'/api/v1/': {
diff --git a/kafka-manager-core/pom.xml b/kafka-manager-core/pom.xml
index 0d678447..d69ddbb4 100644
--- a/kafka-manager-core/pom.xml
+++ b/kafka-manager-core/pom.xml
@@ -24,7 +24,6 @@
1.8
UTF-8
UTF-8
-
5.1.3.RELEASE
@@ -38,12 +37,10 @@
org.springframework
spring-web
- ${spring-version}
org.springframework
spring-test
- ${spring-version}
diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/KafkaClientPool.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/KafkaClientPool.java
index 56e17ae5..2e1e9e71 100644
--- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/KafkaClientPool.java
+++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/KafkaClientPool.java
@@ -14,6 +14,8 @@ import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.stereotype.Service;
import java.util.Map;
import java.util.Properties;
@@ -25,9 +27,22 @@ import java.util.concurrent.locks.ReentrantLock;
* @author zengqiao
* @date 19/12/24
*/
+@Service
public class KafkaClientPool {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaClientPool.class);
+ @Value(value = "${client-pool.kafka-consumer.min-idle-client-num:24}")
+ private Integer kafkaConsumerMinIdleClientNum;
+
+ @Value(value = "${client-pool.kafka-consumer.max-idle-client-num:24}")
+ private Integer kafkaConsumerMaxIdleClientNum;
+
+ @Value(value = "${client-pool.kafka-consumer.max-total-client-num:24}")
+ private Integer kafkaConsumerMaxTotalClientNum;
+
+ @Value(value = "${client-pool.kafka-consumer.borrow-timeout-unit-ms:3000}")
+ private Integer kafkaConsumerBorrowTimeoutUnitMs;
+
/**
* AdminClient
*/
@@ -84,7 +99,7 @@ public class KafkaClientPool {
return true;
}
- private static void initKafkaConsumerPool(ClusterDO clusterDO) {
+ private void initKafkaConsumerPool(ClusterDO clusterDO) {
lock.lock();
try {
GenericObjectPool> objectPool = KAFKA_CONSUMER_POOL.get(clusterDO.getId());
@@ -92,9 +107,9 @@ public class KafkaClientPool {
return;
}
GenericObjectPoolConfig> config = new GenericObjectPoolConfig<>();
- config.setMaxIdle(24);
- config.setMinIdle(24);
- config.setMaxTotal(24);
+ config.setMaxIdle(kafkaConsumerMaxIdleClientNum);
+ config.setMinIdle(kafkaConsumerMinIdleClientNum);
+ config.setMaxTotal(kafkaConsumerMaxTotalClientNum);
KAFKA_CONSUMER_POOL.put(clusterDO.getId(), new GenericObjectPool<>(new KafkaConsumerFactory(clusterDO), config));
} catch (Exception e) {
LOGGER.error("create kafka consumer pool failed, clusterDO:{}.", clusterDO, e);
@@ -118,7 +133,7 @@ public class KafkaClientPool {
}
}
- public static KafkaConsumer borrowKafkaConsumerClient(ClusterDO clusterDO) {
+ public KafkaConsumer borrowKafkaConsumerClient(ClusterDO clusterDO) {
if (ValidateUtils.isNull(clusterDO)) {
return null;
}
@@ -132,7 +147,7 @@ public class KafkaClientPool {
}
try {
- return objectPool.borrowObject(3000);
+ return objectPool.borrowObject(kafkaConsumerBorrowTimeoutUnitMs);
} catch (Exception e) {
LOGGER.error("borrow kafka consumer client failed, clusterDO:{}.", clusterDO, e);
}
diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/LogicalClusterMetadataManager.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/LogicalClusterMetadataManager.java
index 744101ef..2c47db7e 100644
--- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/LogicalClusterMetadataManager.java
+++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/LogicalClusterMetadataManager.java
@@ -156,6 +156,9 @@ public class LogicalClusterMetadataManager {
return logicalClusterDO.getClusterId();
}
+ /**
+ * 定时刷新逻辑集群元数据到缓存中
+ */
@Scheduled(cron="0/30 * * * * ?")
public void flush() {
List logicalClusterDOList = logicalClusterService.listAll();
@@ -208,7 +211,8 @@ public class LogicalClusterMetadataManager {
// 计算逻辑集群到Topic名称的映射
Set topicNameSet = PhysicalClusterMetadataManager.getBrokerTopicNum(
logicalClusterDO.getClusterId(),
- brokerIdSet);
+ brokerIdSet
+ );
LOGICAL_CLUSTER_ID_TOPIC_NAME_MAP.put(logicalClusterDO.getId(), topicNameSet);
// 计算Topic名称到逻辑集群的映射
diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/PhysicalClusterMetadataManager.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/PhysicalClusterMetadataManager.java
index c5f09820..47ab8b64 100644
--- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/PhysicalClusterMetadataManager.java
+++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/PhysicalClusterMetadataManager.java
@@ -50,6 +50,9 @@ public class PhysicalClusterMetadataManager {
@Autowired
private ClusterService clusterService;
+ @Autowired
+ private ThreadPool threadPool;
+
private static final Map CLUSTER_MAP = new ConcurrentHashMap<>();
private static final Map CONTROLLER_DATA_MAP = new ConcurrentHashMap<>();
@@ -125,7 +128,7 @@ public class PhysicalClusterMetadataManager {
zkConfig.watchChildren(ZkPathUtil.BROKER_IDS_ROOT, brokerListener);
//增加Topic监控
- TopicStateListener topicListener = new TopicStateListener(clusterDO.getId(), zkConfig);
+ TopicStateListener topicListener = new TopicStateListener(clusterDO.getId(), zkConfig, threadPool);
topicListener.init();
zkConfig.watchChildren(ZkPathUtil.BROKER_TOPICS_ROOT, topicListener);
@@ -314,7 +317,7 @@ public class PhysicalClusterMetadataManager {
metadataMap.put(brokerId, brokerMetadata);
Map jmxMap = JMX_CONNECTOR_MAP.getOrDefault(clusterId, new ConcurrentHashMap<>());
- jmxMap.put(brokerId, new JmxConnectorWrap(brokerMetadata.getHost(), brokerMetadata.getJmxPort(), jmxConfig));
+ jmxMap.put(brokerId, new JmxConnectorWrap(clusterId, brokerId, brokerMetadata.getHost(), brokerMetadata.getJmxPort(), jmxConfig));
JMX_CONNECTOR_MAP.put(clusterId, jmxMap);
Map versionMap = KAFKA_VERSION_MAP.getOrDefault(clusterId, new ConcurrentHashMap<>());
@@ -539,9 +542,12 @@ public class PhysicalClusterMetadataManager {
}
public static Set getBrokerTopicNum(Long clusterId, Set brokerIdSet) {
- Set topicNameSet = new HashSet<>();
-
Map metadataMap = TOPIC_METADATA_MAP.get(clusterId);
+ if (metadataMap == null) {
+ return new HashSet<>();
+ }
+
+ Set topicNameSet = new HashSet<>();
for (String topicName: metadataMap.keySet()) {
try {
TopicMetadata tm = metadataMap.get(topicName);
diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/ThreadPool.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/ThreadPool.java
index f1b685cb..ba870465 100644
--- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/ThreadPool.java
+++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/cache/ThreadPool.java
@@ -1,37 +1,63 @@
package com.xiaojukeji.kafka.manager.service.cache;
import com.xiaojukeji.kafka.manager.common.utils.factory.DefaultThreadFactory;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.stereotype.Service;
-import java.util.concurrent.*;
+import javax.annotation.PostConstruct;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
/**
* @author zengqiao
* @date 20/8/24
*/
+@Service
public class ThreadPool {
- private static final ExecutorService COLLECT_METRICS_THREAD_POOL = new ThreadPoolExecutor(
- 256,
- 256,
- 120L,
- TimeUnit.SECONDS,
- new LinkedBlockingQueue(),
- new DefaultThreadFactory("Collect-Metrics-Thread")
- );
- private static final ExecutorService API_CALL_THREAD_POOL = new ThreadPoolExecutor(
- 16,
- 16,
- 120L,
- TimeUnit.SECONDS,
- new LinkedBlockingQueue(),
- new DefaultThreadFactory("Api-Call-Thread")
- );
+ @Value(value = "${thread-pool.collect-metrics.thread-num:256}")
+ private Integer collectMetricsThreadNum;
- public static void submitCollectMetricsTask(Runnable collectMetricsTask) {
- COLLECT_METRICS_THREAD_POOL.submit(collectMetricsTask);
+ @Value(value = "${thread-pool.collect-metrics.queue-size:10000}")
+ private Integer collectMetricsQueueSize;
+
+ @Value(value = "${thread-pool.api-call.thread-num:16}")
+ private Integer apiCallThreadNum;
+
+ @Value(value = "${thread-pool.api-call.queue-size:10000}")
+ private Integer apiCallQueueSize;
+
+ private ThreadPoolExecutor collectMetricsThreadPool;
+
+ private ThreadPoolExecutor apiCallThreadPool;
+
+ @PostConstruct
+ public void init() {
+ collectMetricsThreadPool = new ThreadPoolExecutor(
+ collectMetricsThreadNum,
+ collectMetricsThreadNum,
+ 120L,
+ TimeUnit.SECONDS,
+ new LinkedBlockingQueue<>(collectMetricsQueueSize),
+ new DefaultThreadFactory("TaskThreadPool")
+ );
+
+ apiCallThreadPool = new ThreadPoolExecutor(
+ apiCallThreadNum,
+ apiCallThreadNum,
+ 120L,
+ TimeUnit.SECONDS,
+ new LinkedBlockingQueue<>(apiCallQueueSize),
+ new DefaultThreadFactory("ApiThreadPool")
+ );
}
- public static void submitApiCallTask(Runnable apiCallTask) {
- API_CALL_THREAD_POOL.submit(apiCallTask);
+ public void submitCollectMetricsTask(Long clusterId, Runnable collectMetricsTask) {
+ collectMetricsThreadPool.submit(collectMetricsTask);
+ }
+
+ public void submitApiCallTask(Long clusterId, Runnable apiCallTask) {
+ apiCallThreadPool.submit(apiCallTask);
}
}
diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/gateway/impl/GatewayConfigServiceImpl.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/gateway/impl/GatewayConfigServiceImpl.java
index 0ceb3b30..754a81a7 100644
--- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/gateway/impl/GatewayConfigServiceImpl.java
+++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/gateway/impl/GatewayConfigServiceImpl.java
@@ -185,7 +185,8 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
List gatewayConfigDOList = gatewayConfigDao.getByConfigType(gatewayConfigDO.getType());
Long version = 1L;
for (GatewayConfigDO elem: gatewayConfigDOList) {
- if (elem.getVersion() > version) {
+ if (elem.getVersion() >= version) {
+ // 大于等于的情况下,都需要+1
version = elem.getVersion() + 1L;
}
}
@@ -204,6 +205,7 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
@Override
public Result deleteById(Long id) {
try {
+ // TODO 删除的时候,不能直接删,也需要变更一下version
if (gatewayConfigDao.deleteById(id) > 0) {
return Result.buildSuc();
}
@@ -232,7 +234,8 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
List gatewayConfigDOList = gatewayConfigDao.getByConfigType(newGatewayConfigDO.getType());
Long version = 1L;
for (GatewayConfigDO elem: gatewayConfigDOList) {
- if (elem.getVersion() > version) {
+ if (elem.getVersion() >= version) {
+ // 大于等于的情况下,都需要+1
version = elem.getVersion() + 1L;
}
}
diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/BrokerServiceImpl.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/BrokerServiceImpl.java
index 24eea55f..ac3e0593 100644
--- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/BrokerServiceImpl.java
+++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/BrokerServiceImpl.java
@@ -61,6 +61,9 @@ public class BrokerServiceImpl implements BrokerService {
@Autowired
private PhysicalClusterMetadataManager physicalClusterMetadataManager;
+ @Autowired
+ private ThreadPool threadPool;
+
@Override
public ClusterBrokerStatus getClusterBrokerStatus(Long clusterId) {
// 副本同步状态
@@ -201,7 +204,7 @@ public class BrokerServiceImpl implements BrokerService {
return getBrokerMetricsFromJmx(clusterId, brokerId, metricsCode);
}
});
- ThreadPool.submitApiCallTask(taskList[i]);
+ threadPool.submitApiCallTask(clusterId, taskList[i]);
}
List metricsList = new ArrayList<>(brokerIdSet.size());
for (int i = 0; i < brokerIdList.size(); i++) {
diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/ExpertServiceImpl.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/ExpertServiceImpl.java
index d0b34e3d..94f00d2c 100644
--- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/ExpertServiceImpl.java
+++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/ExpertServiceImpl.java
@@ -22,6 +22,7 @@ import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.*;
+import java.util.regex.Pattern;
/**
* @author zengqiao
@@ -240,9 +241,11 @@ public class ExpertServiceImpl implements ExpertService {
return new ArrayList<>();
}
+ //获取满足条件的过期Topic
List filteredExpiredTopicList = new ArrayList<>();
for (TopicExpiredDO elem: expiredTopicList) {
- if (config.getIgnoreClusterIdList().contains(elem.getClusterId())) {
+ //判定是否为忽略Cluster或者判定是否为忽略Topic名,使用正则来过滤理论上不属于过期的Topic
+ if (config.getIgnoreClusterIdList().contains(elem.getClusterId()) || Pattern.matches(config.getFilterRegex(), elem.getTopicName())) {
continue;
}
filteredExpiredTopicList.add(elem);
diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/JmxServiceImpl.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/JmxServiceImpl.java
index 1dc3b011..0ce3ddbb 100644
--- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/JmxServiceImpl.java
+++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/JmxServiceImpl.java
@@ -39,6 +39,9 @@ public class JmxServiceImpl implements JmxService {
@Autowired
private PhysicalClusterMetadataManager physicalClusterMetadataManager;
+ @Autowired
+ private ThreadPool threadPool;
+
@Override
public BrokerMetrics getBrokerMetrics(Long clusterId, Integer brokerId, Integer metricsCode) {
if (clusterId == null || brokerId == null || metricsCode == null) {
@@ -98,7 +101,7 @@ public class JmxServiceImpl implements JmxService {
);
}
});
- ThreadPool.submitCollectMetricsTask(taskList[i]);
+ threadPool.submitCollectMetricsTask(clusterId, taskList[i]);
}
List metricsList = new ArrayList<>();
@@ -305,7 +308,7 @@ public class JmxServiceImpl implements JmxService {
return metricsList;
}
});
- ThreadPool.submitCollectMetricsTask(taskList[i]);
+ threadPool.submitCollectMetricsTask(clusterId, taskList[i]);
}
Map metricsMap = new HashMap<>();
diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/RegionServiceImpl.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/RegionServiceImpl.java
index b468f873..16aa2b9f 100644
--- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/RegionServiceImpl.java
+++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/RegionServiceImpl.java
@@ -2,6 +2,8 @@ package com.xiaojukeji.kafka.manager.service.service.impl;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.pojo.RegionDO;
+import com.xiaojukeji.kafka.manager.common.events.RegionCreatedEvent;
+import com.xiaojukeji.kafka.manager.common.utils.SpringTool;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
import com.xiaojukeji.kafka.manager.dao.RegionDao;
@@ -59,6 +61,8 @@ public class RegionServiceImpl implements RegionService {
return ResultStatus.BROKER_NOT_EXIST;
}
if (regionDao.insert(regionDO) > 0) {
+ // 发布region创建事件
+ SpringTool.publish(new RegionCreatedEvent(this, regionDO));
return ResultStatus.SUCCESS;
}
} catch (DuplicateKeyException e) {
diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/TopicServiceImpl.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/TopicServiceImpl.java
index 94b3f88f..62d1f4cb 100644
--- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/TopicServiceImpl.java
+++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/service/impl/TopicServiceImpl.java
@@ -87,6 +87,9 @@ public class TopicServiceImpl implements TopicService {
@Autowired
private AbstractHealthScoreStrategy healthScoreStrategy;
+ @Autowired
+ private KafkaClientPool kafkaClientPool;
+
@Override
public List getTopicMetricsFromDB(Long clusterId, String topicName, Date startTime, Date endTime) {
try {
@@ -340,7 +343,7 @@ public class TopicServiceImpl implements TopicService {
Map topicPartitionLongMap = new HashMap<>();
KafkaConsumer kafkaConsumer = null;
try {
- kafkaConsumer = KafkaClientPool.borrowKafkaConsumerClient(clusterDO);
+ kafkaConsumer = kafkaClientPool.borrowKafkaConsumerClient(clusterDO);
if ((offsetPosEnum.getCode() & OffsetPosEnum.END.getCode()) > 0) {
topicPartitionLongMap = kafkaConsumer.endOffsets(topicPartitionList);
} else if ((offsetPosEnum.getCode() & OffsetPosEnum.BEGINNING.getCode()) > 0) {
@@ -538,7 +541,7 @@ public class TopicServiceImpl implements TopicService {
List partitionOffsetDTOList = new ArrayList<>();
try {
- kafkaConsumer = KafkaClientPool.borrowKafkaConsumerClient(clusterDO);
+ kafkaConsumer = kafkaClientPool.borrowKafkaConsumerClient(clusterDO);
Map offsetAndTimestampMap = kafkaConsumer.offsetsForTimes(timestampsToSearch);
if (offsetAndTimestampMap == null) {
return new ArrayList<>();
diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/strategy/healthscore/DidiHealthScoreStrategy.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/strategy/healthscore/DidiHealthScoreStrategy.java
index 11adf30c..e93fd9df 100644
--- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/strategy/healthscore/DidiHealthScoreStrategy.java
+++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/strategy/healthscore/DidiHealthScoreStrategy.java
@@ -45,6 +45,9 @@ public class DidiHealthScoreStrategy extends AbstractHealthScoreStrategy {
@Autowired
private JmxService jmxService;
+ @Autowired
+ private ThreadPool threadPool;
+
@Override
public Integer calBrokerHealthScore(Long clusterId, Integer brokerId) {
BrokerMetadata brokerMetadata = PhysicalClusterMetadataManager.getBrokerMetadata(clusterId, brokerId);
@@ -125,7 +128,7 @@ public class DidiHealthScoreStrategy extends AbstractHealthScoreStrategy {
return calBrokerHealthScore(clusterId, brokerId);
}
});
- ThreadPool.submitApiCallTask(taskList[i]);
+ threadPool.submitApiCallTask(clusterId, taskList[i]);
}
Integer topicHealthScore = HEALTH_SCORE_HEALTHY;
diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/utils/ConfigUtils.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/utils/ConfigUtils.java
index 5df85b5e..40b73868 100644
--- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/utils/ConfigUtils.java
+++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/utils/ConfigUtils.java
@@ -1,5 +1,6 @@
package com.xiaojukeji.kafka.manager.service.utils;
+import lombok.Data;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
@@ -8,38 +9,18 @@ import org.springframework.stereotype.Service;
* @author zengqiao
* @date 20/4/26
*/
+@Data
@Service("configUtils")
public class ConfigUtils {
- @Value(value = "${custom.idc}")
+ private ConfigUtils() {
+ }
+
+ @Value(value = "${custom.idc:cn}")
private String idc;
- @Value(value = "${spring.profiles.active}")
+ @Value(value = "${spring.profiles.active:dev}")
private String kafkaManagerEnv;
- @Value(value = "${custom.store-metrics-task.save-days}")
- private Long maxMetricsSaveDays;
-
- public String getIdc() {
- return idc;
- }
-
- public void setIdc(String idc) {
- this.idc = idc;
- }
-
- public String getKafkaManagerEnv() {
- return kafkaManagerEnv;
- }
-
- public void setKafkaManagerEnv(String kafkaManagerEnv) {
- this.kafkaManagerEnv = kafkaManagerEnv;
- }
-
- public Long getMaxMetricsSaveDays() {
- return maxMetricsSaveDays;
- }
-
- public void setMaxMetricsSaveDays(Long maxMetricsSaveDays) {
- this.maxMetricsSaveDays = maxMetricsSaveDays;
- }
+ @Value(value = "${spring.application.version:unknown}")
+ private String applicationVersion;
}
diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/BrokerStateListener.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/BrokerStateListener.java
index a94ec9de..f5cdefe8 100644
--- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/BrokerStateListener.java
+++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/BrokerStateListener.java
@@ -74,15 +74,10 @@ public class BrokerStateListener implements StateChangeListener {
BrokerMetadata brokerMetadata = null;
try {
brokerMetadata = zkConfig.get(ZkPathUtil.getBrokerIdNodePath(brokerId), BrokerMetadata.class);
- if (!brokerMetadata.getEndpoints().isEmpty()) {
- String endpoint = brokerMetadata.getEndpoints().get(0);
- int idx = endpoint.indexOf("://");
- endpoint = endpoint.substring(idx + "://".length());
- idx = endpoint.indexOf(":");
- brokerMetadata.setHost(endpoint.substring(0, idx));
- brokerMetadata.setPort(Integer.parseInt(endpoint.substring(idx + 1)));
- }
+ // 解析并更新本次存储的broker元信息
+ BrokerMetadata.parseAndUpdateBrokerMetadata(brokerMetadata);
+
brokerMetadata.setClusterId(clusterId);
brokerMetadata.setBrokerId(brokerId);
PhysicalClusterMetadataManager.putBrokerMetadata(clusterId, brokerId, brokerMetadata, jmxConfig);
diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/ControllerStateListener.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/ControllerStateListener.java
index 3f43f57b..c417df66 100644
--- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/ControllerStateListener.java
+++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/ControllerStateListener.java
@@ -19,13 +19,13 @@ import org.springframework.dao.DuplicateKeyException;
* @date 20/5/14
*/
public class ControllerStateListener implements StateChangeListener {
- private final static Logger LOGGER = LoggerFactory.getLogger(ControllerStateListener.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(ControllerStateListener.class);
- private Long clusterId;
+ private final Long clusterId;
- private ZkConfigImpl zkConfig;
+ private final ZkConfigImpl zkConfig;
- private ControllerDao controllerDao;
+ private final ControllerDao controllerDao;
public ControllerStateListener(Long clusterId, ZkConfigImpl zkConfig, ControllerDao controllerDao) {
this.clusterId = clusterId;
@@ -35,8 +35,11 @@ public class ControllerStateListener implements StateChangeListener {
@Override
public void init() {
+ if (!checkNodeExist()) {
+ LOGGER.warn("kafka-controller data not exist, clusterId:{}.", clusterId);
+ return;
+ }
processControllerChange();
- return;
}
@Override
@@ -49,12 +52,21 @@ public class ControllerStateListener implements StateChangeListener {
break;
}
} catch (Exception e) {
- LOGGER.error("process controller state change failed, clusterId:{} state:{} path:{}.",
- clusterId, state, path, e);
+ LOGGER.error("process controller state change failed, clusterId:{} state:{} path:{}.", clusterId, state, path, e);
}
}
- private void processControllerChange(){
+ private boolean checkNodeExist() {
+ try {
+ return zkConfig.checkPathExists(ZkPathUtil.CONTROLLER_ROOT_NODE);
+ } catch (Exception e) {
+ LOGGER.error("init kafka-controller data failed, clusterId:{}.", clusterId, e);
+ }
+
+ return false;
+ }
+
+ private void processControllerChange() {
LOGGER.warn("init controllerData or controller change, clusterId:{}.", clusterId);
ControllerData controllerData = null;
try {
diff --git a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/TopicStateListener.java b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/TopicStateListener.java
index 4314a101..6f3d33b3 100644
--- a/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/TopicStateListener.java
+++ b/kafka-manager-core/src/main/java/com/xiaojukeji/kafka/manager/service/zookeeper/TopicStateListener.java
@@ -10,6 +10,7 @@ import com.xiaojukeji.kafka.manager.service.cache.ThreadPool;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
import java.util.HashSet;
import java.util.List;
@@ -28,9 +29,12 @@ public class TopicStateListener implements StateChangeListener {
private ZkConfigImpl zkConfig;
- public TopicStateListener(Long clusterId, ZkConfigImpl zkConfig) {
+ private ThreadPool threadPool;
+
+ public TopicStateListener(Long clusterId, ZkConfigImpl zkConfig, ThreadPool threadPool) {
this.clusterId = clusterId;
this.zkConfig = zkConfig;
+ this.threadPool = threadPool;
}
@Override
@@ -47,7 +51,7 @@ public class TopicStateListener implements StateChangeListener {
return null;
}
});
- ThreadPool.submitCollectMetricsTask(taskList[i]);
+ threadPool.submitCollectMetricsTask(clusterId, taskList[i]);
}
} catch (Exception e) {
LOGGER.error("init topics metadata failed, clusterId:{}.", clusterId, e);
diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/BrokerMetricsDao.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/BrokerMetricsDao.java
index 75399538..9f1d36eb 100644
--- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/BrokerMetricsDao.java
+++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/BrokerMetricsDao.java
@@ -20,5 +20,5 @@ public interface BrokerMetricsDao {
*/
List getBrokerMetrics(Long clusterId, Integer brokerId, Date startTime, Date endTime);
- int deleteBeforeTime(Date endTime);
+ int deleteBeforeTime(Date endTime, Integer limitSize);
}
diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/ClusterMetricsDao.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/ClusterMetricsDao.java
index d0731508..0e2e68a7 100644
--- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/ClusterMetricsDao.java
+++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/ClusterMetricsDao.java
@@ -10,5 +10,5 @@ public interface ClusterMetricsDao {
List getClusterMetrics(long clusterId, Date startTime, Date endTime);
- int deleteBeforeTime(Date endTime);
+ int deleteBeforeTime(Date endTime, Integer limitSize);
}
diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicAppMetricsDao.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicAppMetricsDao.java
index 9d02c5d5..e0c3f84e 100644
--- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicAppMetricsDao.java
+++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicAppMetricsDao.java
@@ -30,5 +30,5 @@ public interface TopicAppMetricsDao {
* @param endTime
* @return
*/
- int deleteBeforeTime(Date endTime);
+ int deleteBeforeTime(Date endTime, Integer limitSize);
}
diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicMetricsDao.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicMetricsDao.java
index 58029f36..5d7af6e0 100644
--- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicMetricsDao.java
+++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicMetricsDao.java
@@ -22,5 +22,5 @@ public interface TopicMetricsDao {
List getLatestTopicMetrics(Long clusterId, Date afterTime);
- int deleteBeforeTime(Date endTime);
+ int deleteBeforeTime(Date endTime, Integer limitSize);
}
diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicRequestMetricsDao.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicRequestMetricsDao.java
index e7fd5169..5e6b237d 100644
--- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicRequestMetricsDao.java
+++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicRequestMetricsDao.java
@@ -33,9 +33,7 @@ public interface TopicRequestMetricsDao {
* @param endTime
* @return
*/
- int deleteBeforeTime(Date endTime);
-
- int deleteBeforeId(Long id);
+ int deleteBeforeTime(Date endTime, Integer limitSize);
List getById(Long startId, Long endId);
}
\ No newline at end of file
diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicThrottledMetricsDao.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicThrottledMetricsDao.java
index 1010cc17..cc975c52 100644
--- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicThrottledMetricsDao.java
+++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/TopicThrottledMetricsDao.java
@@ -32,5 +32,5 @@ public interface TopicThrottledMetricsDao {
List getLatestTopicThrottledMetrics(Long clusterId, Date afterTime);
- int deleteBeforeTime(Date endTime);
+ int deleteBeforeTime(Date endTime, Integer limitSize);
}
diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/BrokerMetricsImpl.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/BrokerMetricsImpl.java
index 5a06e5ce..bba58185 100644
--- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/BrokerMetricsImpl.java
+++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/BrokerMetricsImpl.java
@@ -37,7 +37,10 @@ public class BrokerMetricsImpl implements BrokerMetricsDao {
}
@Override
- public int deleteBeforeTime(Date endTime) {
- return sqlSession.delete("BrokerMetricsDao.deleteBeforeTime", endTime);
+ public int deleteBeforeTime(Date endTime, Integer limitSize) {
+ Map params = new HashMap<>(2);
+ params.put("endTime", endTime);
+ params.put("limitSize", limitSize);
+ return sqlSession.delete("BrokerMetricsDao.deleteBeforeTime", params);
}
}
diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/ClusterMetricsDaoImpl.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/ClusterMetricsDaoImpl.java
index b05d3c0f..08948871 100644
--- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/ClusterMetricsDaoImpl.java
+++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/ClusterMetricsDaoImpl.java
@@ -27,7 +27,7 @@ public class ClusterMetricsDaoImpl implements ClusterMetricsDao {
@Override
public List getClusterMetrics(long clusterId, Date startTime, Date endTime) {
- Map map = new HashMap(3);
+ Map map = new HashMap<>(3);
map.put("clusterId", clusterId);
map.put("startTime", startTime);
map.put("endTime", endTime);
@@ -35,7 +35,10 @@ public class ClusterMetricsDaoImpl implements ClusterMetricsDao {
}
@Override
- public int deleteBeforeTime(Date endTime) {
- return sqlSession.delete("ClusterMetricsDao.deleteBeforeTime", endTime);
+ public int deleteBeforeTime(Date endTime, Integer limitSize) {
+ Map params = new HashMap<>(2);
+ params.put("endTime", endTime);
+ params.put("limitSize", limitSize);
+ return sqlSession.delete("ClusterMetricsDao.deleteBeforeTime", params);
}
}
diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicAppMetricsDaoImpl.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicAppMetricsDaoImpl.java
index fe55a1ab..90ce7e3e 100644
--- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicAppMetricsDaoImpl.java
+++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicAppMetricsDaoImpl.java
@@ -46,7 +46,10 @@ public class TopicAppMetricsDaoImpl implements TopicAppMetricsDao {
}
@Override
- public int deleteBeforeTime(Date endTime) {
- return sqlSession.delete("TopicAppMetricsDao.deleteBeforeTime", endTime);
+ public int deleteBeforeTime(Date endTime, Integer limitSize) {
+ Map params = new HashMap<>(2);
+ params.put("endTime", endTime);
+ params.put("limitSize", limitSize);
+ return sqlSession.delete("TopicAppMetricsDao.deleteBeforeTime", params);
}
}
diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicMetricsDaoImpl.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicMetricsDaoImpl.java
index 7397a28c..a7eae32c 100644
--- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicMetricsDaoImpl.java
+++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicMetricsDaoImpl.java
@@ -60,7 +60,10 @@ public class TopicMetricsDaoImpl implements TopicMetricsDao {
}
@Override
- public int deleteBeforeTime(Date endTime) {
- return sqlSession.delete("TopicMetricsDao.deleteBeforeTime", endTime);
+ public int deleteBeforeTime(Date endTime, Integer limitSize) {
+ Map params = new HashMap<>(2);
+ params.put("endTime", endTime);
+ params.put("limitSize", limitSize);
+ return sqlSession.delete("TopicMetricsDao.deleteBeforeTime", params);
}
}
diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicRequestMetricsDaoImpl.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicRequestMetricsDaoImpl.java
index bfaa552c..e59324f5 100644
--- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicRequestMetricsDaoImpl.java
+++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicRequestMetricsDaoImpl.java
@@ -45,13 +45,11 @@ public class TopicRequestMetricsDaoImpl implements TopicRequestMetricsDao {
}
@Override
- public int deleteBeforeTime(Date endTime) {
- return sqlSession.delete("TopicRequestMetricsDao.deleteBeforeTime", endTime);
- }
-
- @Override
- public int deleteBeforeId(Long id) {
- return sqlSession.delete("TopicRequestMetricsDao.deleteBeforeId", id);
+ public int deleteBeforeTime(Date endTime, Integer limitSize) {
+ Map params = new HashMap<>();
+ params.put("endTime", endTime);
+ params.put("limitSize", limitSize);
+ return sqlSession.delete("TopicRequestMetricsDao.deleteBeforeTime", params);
}
@Override
diff --git a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicThrottledMetricsDaoImpl.java b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicThrottledMetricsDaoImpl.java
index 784bc242..b1f64d43 100644
--- a/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicThrottledMetricsDaoImpl.java
+++ b/kafka-manager-dao/src/main/java/com/xiaojukeji/kafka/manager/dao/impl/TopicThrottledMetricsDaoImpl.java
@@ -75,7 +75,10 @@ public class TopicThrottledMetricsDaoImpl implements TopicThrottledMetricsDao {
}
@Override
- public int deleteBeforeTime(Date endTime) {
- return sqlSession.delete("TopicThrottledMetricsDao.deleteBeforeTime", endTime);
+ public int deleteBeforeTime(Date endTime, Integer limitSize) {
+ Map params = new HashMap<>(2);
+ params.put("endTime", endTime);
+ params.put("limitSize", limitSize);
+ return sqlSession.delete("TopicThrottledMetricsDao.deleteBeforeTime", params);
}
}
diff --git a/kafka-manager-dao/src/main/resources/mapper/AccountDao.xml b/kafka-manager-dao/src/main/resources/mapper/AccountDao.xml
index ac920416..299d120b 100644
--- a/kafka-manager-dao/src/main/resources/mapper/AccountDao.xml
+++ b/kafka-manager-dao/src/main/resources/mapper/AccountDao.xml
@@ -8,6 +8,9 @@
+
+
+
@@ -15,9 +18,9 @@
diff --git a/kafka-manager-dao/src/main/resources/mapper/BrokerMetricsDao.xml b/kafka-manager-dao/src/main/resources/mapper/BrokerMetricsDao.xml
index 49746df7..b5115e10 100644
--- a/kafka-manager-dao/src/main/resources/mapper/BrokerMetricsDao.xml
+++ b/kafka-manager-dao/src/main/resources/mapper/BrokerMetricsDao.xml
@@ -29,9 +29,9 @@
]]>
-
+
\ No newline at end of file
diff --git a/kafka-manager-dao/src/main/resources/mapper/ClusterMetricsDao.xml b/kafka-manager-dao/src/main/resources/mapper/ClusterMetricsDao.xml
index 11614d2d..8aca62ee 100644
--- a/kafka-manager-dao/src/main/resources/mapper/ClusterMetricsDao.xml
+++ b/kafka-manager-dao/src/main/resources/mapper/ClusterMetricsDao.xml
@@ -27,9 +27,9 @@
-
+
\ No newline at end of file
diff --git a/kafka-manager-dao/src/main/resources/mapper/TopicAppMetricsDao.xml b/kafka-manager-dao/src/main/resources/mapper/TopicAppMetricsDao.xml
index 1c64c0ce..fff5037a 100644
--- a/kafka-manager-dao/src/main/resources/mapper/TopicAppMetricsDao.xml
+++ b/kafka-manager-dao/src/main/resources/mapper/TopicAppMetricsDao.xml
@@ -30,9 +30,9 @@
]]>
-
+
\ No newline at end of file
diff --git a/kafka-manager-dao/src/main/resources/mapper/TopicMetricsDao.xml b/kafka-manager-dao/src/main/resources/mapper/TopicMetricsDao.xml
index 53e13b2d..249863f4 100644
--- a/kafka-manager-dao/src/main/resources/mapper/TopicMetricsDao.xml
+++ b/kafka-manager-dao/src/main/resources/mapper/TopicMetricsDao.xml
@@ -37,9 +37,9 @@
]]>
-
+
\ No newline at end of file
diff --git a/kafka-manager-dao/src/main/resources/mapper/TopicRequestMetricsDao.xml b/kafka-manager-dao/src/main/resources/mapper/TopicRequestMetricsDao.xml
index b9aaa35b..7ad5e679 100644
--- a/kafka-manager-dao/src/main/resources/mapper/TopicRequestMetricsDao.xml
+++ b/kafka-manager-dao/src/main/resources/mapper/TopicRequestMetricsDao.xml
@@ -34,15 +34,9 @@
ORDER BY gmt_create ASC
-
+
-
-
-
-
diff --git a/kafka-manager-dao/src/main/resources/mapper/TopicThrottledMetricsDao.xml b/kafka-manager-dao/src/main/resources/mapper/TopicThrottledMetricsDao.xml
index c5b6474d..e163d30f 100644
--- a/kafka-manager-dao/src/main/resources/mapper/TopicThrottledMetricsDao.xml
+++ b/kafka-manager-dao/src/main/resources/mapper/TopicThrottledMetricsDao.xml
@@ -54,9 +54,9 @@
AND gmt_create > #{afterTime}
-
+
\ No newline at end of file
diff --git a/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/component/account/BaseEnterpriseStaffService.java b/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/component/account/BaseEnterpriseStaffService.java
index 2eef7774..52e7347e 100644
--- a/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/component/account/BaseEnterpriseStaffService.java
+++ b/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/component/account/BaseEnterpriseStaffService.java
@@ -54,7 +54,8 @@ public class BaseEnterpriseStaffService extends AbstractEnterpriseStaffService {
}
List staffList = new ArrayList<>();
for (AccountDO accountDO: doList) {
- staffList.add(new EnterpriseStaff(accountDO.getUsername(), accountDO.getUsername(), ""));
+ //这里对chineseName填充共识的displayName,Department则获取Department信息
+ staffList.add(new EnterpriseStaff(accountDO.getUsername(), accountDO.getDisplayName(), accountDO.getDepartment()));
}
return staffList;
} catch (Exception e) {
diff --git a/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/component/ldap/LdapAuthentication.java b/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/component/ldap/LdapAuthentication.java
index f456c916..4dde4083 100644
--- a/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/component/ldap/LdapAuthentication.java
+++ b/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/component/ldap/LdapAuthentication.java
@@ -1,5 +1,6 @@
package com.xiaojukeji.kafka.manager.account.component.ldap;
+import com.xiaojukeji.kafka.manager.common.utils.SplitUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -14,7 +15,9 @@ import javax.naming.directory.SearchControls;
import javax.naming.directory.SearchResult;
import javax.naming.ldap.InitialLdapContext;
import javax.naming.ldap.LdapContext;
+import java.util.HashMap;
import java.util.Hashtable;
+import java.util.Map;
@Component
public class LdapAuthentication {
@@ -60,8 +63,11 @@ public class LdapAuthentication {
return null;
}
- private String getUserDN(String account, LdapContext ctx) {
+ private Map getLdapAttrsInfo(String account, LdapContext ctx) {
+ //存储更多的LDAP元信息
+ Map ldapAttrsInfo = new HashMap<>();
String userDN = "";
+ ldapAttrsInfo.clear();
try {
SearchControls constraints = new SearchControls();
constraints.setSearchScope(SearchControls.SUBTREE_SCOPE);
@@ -69,7 +75,7 @@ public class LdapAuthentication {
NamingEnumeration en = ctx.search("", filter, constraints);
if (en == null || !en.hasMoreElements()) {
- return "";
+ return null;
}
// maybe more than one element
while (en.hasMoreElements()) {
@@ -78,13 +84,25 @@ public class LdapAuthentication {
SearchResult si = (SearchResult) obj;
userDN += si.getName();
userDN += "," + ldapBasedn;
+ //携带LDAP更多元信息以填充用户元信息
+ ldapAttrsInfo.put("userDN", userDN);
+ ldapAttrsInfo.put("sAMAccountName",
+ SplitUtils.keyValueSplit(si.getAttributes().get("samaccountname").toString()));
+ ldapAttrsInfo.put("department",
+ SplitUtils.keyValueSplit(si.getAttributes().get("department").toString()));
+ ldapAttrsInfo.put("company",
+ SplitUtils.keyValueSplit(si.getAttributes().get("company").toString()));
+ ldapAttrsInfo.put("displayName",
+ SplitUtils.keyValueSplit(si.getAttributes().get("displayname").toString()));
+ ldapAttrsInfo.put("mail",
+ SplitUtils.keyValueSplit(si.getAttributes().get("mail").toString()));
break;
}
}
} catch (Exception e) {
LOGGER.error("class=LdapAuthentication||method=getUserDN||account={}||errMsg={}", account, e);
}
- return userDN;
+ return ldapAttrsInfo;
}
/**
@@ -93,23 +111,23 @@ public class LdapAuthentication {
* @param password
* @return
*/
- public boolean authenticate(String account, String password) {
+ public Map authenticate(String account, String password) {
LdapContext ctx = getLdapContext();
if (ValidateUtils.isNull(ctx)) {
- return false;
+ return null;
}
try {
- String userDN = getUserDN(account, ctx);
- if(ValidateUtils.isBlank(userDN)){
- return false;
+ Map ldapAttrsInfo = getLdapAttrsInfo(account, ctx);
+ if(ValidateUtils.isNull(ldapAttrsInfo)){
+ return null;
}
- ctx.addToEnvironment(Context.SECURITY_PRINCIPAL, userDN);
+ ctx.addToEnvironment(Context.SECURITY_PRINCIPAL, ldapAttrsInfo.get("userDN").toString());
ctx.addToEnvironment(Context.SECURITY_CREDENTIALS, password);
ctx.reconnect(null);
- return true;
+ return ldapAttrsInfo;
} catch (AuthenticationException e) {
LOGGER.warn("class=LdapAuthentication||method=authenticate||account={}||errMsg={}", account, e);
} catch (NamingException e) {
@@ -125,6 +143,6 @@ public class LdapAuthentication {
}
}
}
- return false;
+ return null;
}
}
diff --git a/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/component/sso/BaseSessionSignOn.java b/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/component/sso/BaseSessionSignOn.java
index 1ff36964..a0309cb6 100644
--- a/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/component/sso/BaseSessionSignOn.java
+++ b/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/component/sso/BaseSessionSignOn.java
@@ -17,6 +17,7 @@ import org.springframework.stereotype.Service;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
+import java.util.Map;
/**
* @author zengqiao
@@ -31,43 +32,53 @@ public class BaseSessionSignOn extends AbstractSingleSignOn {
private LdapAuthentication ldapAuthentication;
//是否开启ldap验证
- @Value(value = "${account.ldap.enabled:}")
+ @Value(value = "${account.ldap.enabled:false}")
private Boolean accountLdapEnabled;
//ldap自动注册的默认角色。请注意:它通常来说都是低权限角色
- @Value(value = "${account.ldap.auth-user-registration-role:}")
+ @Value(value = "${account.ldap.auth-user-registration-role:normal}")
private String authUserRegistrationRole;
//ldap自动注册是否开启
- @Value(value = "${account.ldap.auth-user-registration:}")
- private boolean authUserRegistration;
+ @Value(value = "${account.ldap.auth-user-registration:false}")
+ private Boolean authUserRegistration;
@Override
public Result loginAndGetLdap(HttpServletRequest request, HttpServletResponse response, LoginDTO dto) {
if (ValidateUtils.isBlank(dto.getUsername()) || ValidateUtils.isNull(dto.getPassword())) {
return Result.buildFailure("Missing parameters");
}
-
- Result accountResult = accountService.getAccountDO(dto.getUsername());
+ //先创建空对象,看是在LDAP去做填充,还是直接查表填充
+ Result accountResult;
//判断是否激活了LDAP验证, 若激活则也可使用ldap进行认证
if(!ValidateUtils.isNull(accountLdapEnabled) && accountLdapEnabled){
//去LDAP验证账密
- if(!ldapAuthentication.authenticate(dto.getUsername(),dto.getPassword())){
+ Map ldapAttrsInfo = ldapAuthentication.authenticate(dto.getUsername(),dto.getPassword());;
+ if(ValidateUtils.isNull(ldapAttrsInfo)){
return Result.buildFrom(ResultStatus.LDAP_AUTHENTICATION_FAILED);
}
+ //LDAP验证通过,拿LDAP的sAMAccountName替换dto对象的值,便于第一次自动注册采用LDAP值,并且第二次也避免REPLACE
+ dto.setUsername(ldapAttrsInfo.get("sAMAccountName").toString());
+ accountResult = accountService.getAccountDO(dto.getUsername());
+
if((ValidateUtils.isNull(accountResult) || ValidateUtils.isNull(accountResult.getData())) && authUserRegistration){
//自动注册
AccountDO accountDO = new AccountDO();
accountDO.setUsername(dto.getUsername());
accountDO.setRole(AccountRoleEnum.getUserRoleEnum(authUserRegistrationRole).getRole());
accountDO.setPassword(dto.getPassword());
+ accountDO.setDisplayName(ldapAttrsInfo.getOrDefault("displayName", "").toString());
+ accountDO.setDepartment(ldapAttrsInfo.getOrDefault("department", "").toString());
+ accountDO.setMail(ldapAttrsInfo.getOrDefault("mail", "").toString());
accountService.createAccount(accountDO);
}
return Result.buildSuc(dto.getUsername());
}
+ //不走LDAP认证直接查表填充
+ accountResult = accountService.getAccountDO(dto.getUsername());
if (ValidateUtils.isNull(accountResult) || accountResult.failed()) {
return new Result<>(accountResult.getCode(), accountResult.getMessage());
diff --git a/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/impl/AccountServiceImpl.java b/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/impl/AccountServiceImpl.java
index e4d03c23..07d10a64 100644
--- a/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/impl/AccountServiceImpl.java
+++ b/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/impl/AccountServiceImpl.java
@@ -275,6 +275,9 @@ public class AccountServiceImpl implements AccountService {
return enterpriseStaffService.searchEnterpriseStaffByKeyWord(prefix);
}
+ /**
+ * 定时刷新account信息到缓存中
+ */
@Scheduled(cron ="0/5 * * * * ?")
public void flush() {
try {
diff --git a/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/impl/LoginServiceImpl.java b/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/impl/LoginServiceImpl.java
index f49f7dca..f0299d87 100644
--- a/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/impl/LoginServiceImpl.java
+++ b/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/impl/LoginServiceImpl.java
@@ -14,6 +14,7 @@ import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import javax.servlet.http.Cookie;
@@ -27,7 +28,13 @@ import javax.servlet.http.HttpSession;
*/
@Service("loginService")
public class LoginServiceImpl implements LoginService {
- private final static Logger LOGGER = LoggerFactory.getLogger(LoginServiceImpl.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(LoginServiceImpl.class);
+
+ @Value(value = "${account.jump-login.gateway-api:false}")
+ private Boolean jumpLoginGatewayApi;
+
+ @Value(value = "${account.jump-login.third-part-api:false}")
+ private Boolean jumpLoginThirdPartApi;
@Autowired
private AccountService accountService;
@@ -75,8 +82,10 @@ public class LoginServiceImpl implements LoginService {
return false;
}
- if (classRequestMappingValue.equals(ApiPrefix.API_V1_SSO_PREFIX)) {
- // 白名单接口直接true
+ if (classRequestMappingValue.equals(ApiPrefix.API_V1_SSO_PREFIX) ||
+ (jumpLoginGatewayApi != null && jumpLoginGatewayApi && classRequestMappingValue.equals(ApiPrefix.GATEWAY_API_V1_PREFIX)) ||
+ (jumpLoginThirdPartApi != null && jumpLoginThirdPartApi && classRequestMappingValue.equals(ApiPrefix.API_V1_THIRD_PART_PREFIX))) {
+ // 登录接口 or 允许跳过且是跳过类型的接口,则直接跳过登录
return true;
}
diff --git a/kafka-manager-extends/kafka-manager-kcm/pom.xml b/kafka-manager-extends/kafka-manager-kcm/pom.xml
index 9b881568..550cf523 100644
--- a/kafka-manager-extends/kafka-manager-kcm/pom.xml
+++ b/kafka-manager-extends/kafka-manager-kcm/pom.xml
@@ -28,7 +28,6 @@
1.8
UTF-8
UTF-8
- 5.1.3.RELEASE
@@ -56,17 +55,14 @@
org.springframework
spring-beans
- ${spring-version}
org.springframework
spring-context
- ${spring-version}
org.springframework
spring-test
- ${spring-version}
diff --git a/kafka-manager-extends/kafka-manager-kcm/src/main/java/com/xiaojukeji/kafka/manager/kcm/component/agent/n9e/N9e.java b/kafka-manager-extends/kafka-manager-kcm/src/main/java/com/xiaojukeji/kafka/manager/kcm/component/agent/n9e/N9e.java
index 6e3fa677..d0a2503b 100644
--- a/kafka-manager-extends/kafka-manager-kcm/src/main/java/com/xiaojukeji/kafka/manager/kcm/component/agent/n9e/N9e.java
+++ b/kafka-manager-extends/kafka-manager-kcm/src/main/java/com/xiaojukeji/kafka/manager/kcm/component/agent/n9e/N9e.java
@@ -37,21 +37,24 @@ import java.util.Map;
public class N9e extends AbstractAgent {
private static final Logger LOGGER = LoggerFactory.getLogger(N9e.class);
- @Value("${kcm.n9e.base-url}")
+ @Value("${kcm.n9e.base-url:}")
private String baseUrl;
- @Value("${kcm.n9e.user-token}")
+ @Value("${kcm.n9e.user-token:12345678}")
private String userToken;
- @Value("${kcm.n9e.account}")
+ @Value("${kcm.n9e.account:root}")
private String account;
- @Value("${kcm.n9e.timeout}")
+ @Value("${kcm.n9e.timeout:300}")
private Integer timeout;
- @Value("${kcm.n9e.script-file}")
+ @Value("${kcm.n9e.script-file:kcm_script.sh}")
private String scriptFile;
+ @Value("${kcm.n9e.logikm-url:}")
+ private String logiKMUrl;
+
private String script;
private static final String CREATE_TASK_URI = "/api/job-ce/tasks";
@@ -219,7 +222,8 @@ public class N9e extends AbstractAgent {
sb.append(creationTaskData.getKafkaPackageUrl()).append(",,");
sb.append(creationTaskData.getServerPropertiesName().replace(KafkaFileEnum.SERVER_CONFIG.getSuffix(), "")).append(",,");
sb.append(creationTaskData.getServerPropertiesMd5()).append(",,");
- sb.append(creationTaskData.getServerPropertiesUrl());
+ sb.append(creationTaskData.getServerPropertiesUrl()).append(",,");
+ sb.append(this.logiKMUrl);
N9eCreationTask n9eCreationTask = new N9eCreationTask();
n9eCreationTask.setTitle(Constant.TASK_TITLE_PREFIX + "-集群ID:" + creationTaskData.getClusterId());
diff --git a/kafka-manager-extends/kafka-manager-kcm/src/main/resources/kcm_script.sh b/kafka-manager-extends/kafka-manager-kcm/src/main/resources/kcm_script.sh
index ffd54a20..16ffb80c 100644
--- a/kafka-manager-extends/kafka-manager-kcm/src/main/resources/kcm_script.sh
+++ b/kafka-manager-extends/kafka-manager-kcm/src/main/resources/kcm_script.sh
@@ -18,12 +18,13 @@ p_kafka_server_properties_name=${7} #server配置名
p_kafka_server_properties_md5=${8} #server配置MD5
p_kafka_server_properties_url=${9} #server配置文件下载地址
+p_kafka_manager_url=${10} #LogiKM地址
+
#----------------------------------------配置信息------------------------------------------------------#
g_base_dir='/home'
g_cluster_task_dir=${g_base_dir}"/kafka_cluster_task/task_${p_task_id}" #部署升级路径
g_rollback_version=${g_cluster_task_dir}"/rollback_version" #回滚版本
g_new_kafka_package_name='' #最终的包名
-g_kafka_manager_addr='' #kafka-manager地址
g_local_ip=`ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:"`
g_hostname=${g_local_ip}
@@ -47,7 +48,7 @@ function dchat_alarm() {
# 检查并初始化环境
function check_and_init_env() {
- if [ -z "${p_task_id}" -o -z "${p_cluster_task_type}" -o -z "${p_kafka_package_url}" -o -z "${p_cluster_id}" -o -z "${p_kafka_package_name}" -o -z "${p_kafka_package_md5}" -o -z "${p_kafka_server_properties_name}" -o -z "${p_kafka_server_properties_md5}" ]; then
+ if [ -z "${p_task_id}" -o -z "${p_cluster_task_type}" -o -z "${p_kafka_package_url}" -o -z "${p_cluster_id}" -o -z "${p_kafka_package_name}" -o -z "${p_kafka_package_md5}" -o -z "${p_kafka_server_properties_name}" -o -z "${p_kafka_server_properties_md5}" -o -z "${p_kafka_manager_url}" ]; then
ECHO_LOG "存在为空的参数不合法, 退出集群任务"
dchat_alarm "存在为空的参数不合法, 退出集群任务"
exit 1
@@ -72,11 +73,11 @@ function check_and_init_env() {
# 检查并等待集群所有的副本处于同步的状态
function check_and_wait_broker_stabled() {
- under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${g_kafka_manager_addr}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l`
+ under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${p_kafka_manager_url}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l`
while [ "$under_replication_count" -ne 1 ]; do
ECHO_LOG "存在${under_replication_count}个副本未同步, sleep 10s"
sleep 10
- under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${g_kafka_manager_addr}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l`
+ under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${p_kafka_manager_url}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l`
done
ECHO_LOG "集群副本都已经处于同步的状态, 可以进行集群升级"
}
@@ -324,6 +325,7 @@ ECHO_LOG " p_kafka_package_name=${p_kafka_package_name}"
ECHO_LOG " p_kafka_package_md5=${p_kafka_package_md5}"
ECHO_LOG " p_kafka_server_properties_name=${p_kafka_server_properties_name}"
ECHO_LOG " p_kafka_server_properties_md5=${p_kafka_server_properties_md5}"
+ECHO_LOG " p_kafka_manager_url=${p_kafka_manager_url}"
@@ -342,7 +344,7 @@ fi
ECHO_LOG "停kafka服务"
stop_kafka_server
-ECHO_LOG "停5秒, 确保"
+ECHO_LOG "再停5秒, 确保端口已释放"
sleep 5
if [ "${p_cluster_task_type}" == "0" ];then
diff --git a/kafka-manager-extends/kafka-manager-monitor/pom.xml b/kafka-manager-extends/kafka-manager-monitor/pom.xml
index 66c79b33..489892f7 100644
--- a/kafka-manager-extends/kafka-manager-monitor/pom.xml
+++ b/kafka-manager-extends/kafka-manager-monitor/pom.xml
@@ -25,7 +25,6 @@
1.8
UTF-8
UTF-8
- 5.1.3.RELEASE
@@ -63,12 +62,10 @@
org.springframework
spring-beans
- ${spring-version}
org.springframework
spring-context
- ${spring-version}
diff --git a/kafka-manager-extends/kafka-manager-monitor/src/main/java/com/xiaojukeji/kafka/manager/monitor/component/n9e/N9eService.java b/kafka-manager-extends/kafka-manager-monitor/src/main/java/com/xiaojukeji/kafka/manager/monitor/component/n9e/N9eService.java
index 2d4b041c..5609bb2f 100644
--- a/kafka-manager-extends/kafka-manager-monitor/src/main/java/com/xiaojukeji/kafka/manager/monitor/component/n9e/N9eService.java
+++ b/kafka-manager-extends/kafka-manager-monitor/src/main/java/com/xiaojukeji/kafka/manager/monitor/component/n9e/N9eService.java
@@ -27,19 +27,19 @@ import java.util.concurrent.TimeUnit;
public class N9eService extends AbstractMonitorService {
private static final Logger LOGGER = LoggerFactory.getLogger(N9eService.class);
- @Value("${monitor.n9e.nid}")
+ @Value("${monitor.n9e.nid:}")
private Integer monitorN9eNid;
- @Value("${monitor.n9e.user-token}")
+ @Value("${monitor.n9e.user-token:}")
private String monitorN9eUserToken;
- @Value("${monitor.n9e.mon.base-url}")
+ @Value("${monitor.n9e.mon.base-url:}")
private String monitorN9eMonBaseUrl;
- @Value("${monitor.n9e.sink.base-url}")
+ @Value("${monitor.n9e.sink.base-url:}")
private String monitorN9eSinkBaseUrl;
- @Value("${monitor.n9e.rdb.base-url}")
+ @Value("${monitor.n9e.rdb.base-url:}")
private String monitorN9eRdbBaseUrl;
private static final Cache NOTIFY_GROUP_CACHE = Caffeine.newBuilder()
diff --git a/kafka-manager-extends/kafka-manager-notify/pom.xml b/kafka-manager-extends/kafka-manager-notify/pom.xml
index a2fd2c4b..348164eb 100644
--- a/kafka-manager-extends/kafka-manager-notify/pom.xml
+++ b/kafka-manager-extends/kafka-manager-notify/pom.xml
@@ -25,7 +25,6 @@
1.8
UTF-8
UTF-8
- 5.1.3.RELEASE
@@ -48,7 +47,6 @@
org.springframework
spring-context
- ${spring-version}
\ No newline at end of file
diff --git a/kafka-manager-extends/kafka-manager-notify/src/main/java/com/xiaojukeji/kafka/manager/notify/OrderPassedNotifyService.java b/kafka-manager-extends/kafka-manager-notify/src/main/java/com/xiaojukeji/kafka/manager/notify/OrderPassedNotifyService.java
index 48e4cb89..8fd6f23b 100644
--- a/kafka-manager-extends/kafka-manager-notify/src/main/java/com/xiaojukeji/kafka/manager/notify/OrderPassedNotifyService.java
+++ b/kafka-manager-extends/kafka-manager-notify/src/main/java/com/xiaojukeji/kafka/manager/notify/OrderPassedNotifyService.java
@@ -19,7 +19,7 @@ public class OrderPassedNotifyService implements ApplicationListener1.8
UTF-8
UTF-8
- 5.1.3.RELEASE
@@ -46,7 +45,6 @@
org.springframework
spring-context
- ${spring-version}
diff --git a/kafka-manager-extends/kafka-manager-openapi/src/main/java/com/xiaojukeji/kafka/manager/openapi/impl/ThirdPartServiceImpl.java b/kafka-manager-extends/kafka-manager-openapi/src/main/java/com/xiaojukeji/kafka/manager/openapi/impl/ThirdPartServiceImpl.java
index 5df7815e..07b0a3e3 100644
--- a/kafka-manager-extends/kafka-manager-openapi/src/main/java/com/xiaojukeji/kafka/manager/openapi/impl/ThirdPartServiceImpl.java
+++ b/kafka-manager-extends/kafka-manager-openapi/src/main/java/com/xiaojukeji/kafka/manager/openapi/impl/ThirdPartServiceImpl.java
@@ -42,6 +42,9 @@ public class ThirdPartServiceImpl implements ThirdPartService {
@Autowired
private ConsumerService consumerService;
+ @Autowired
+ private KafkaClientPool kafkaClientPool;
+
@Override
public Result checkConsumeHealth(Long clusterId,
String topicName,
@@ -109,7 +112,7 @@ public class ThirdPartServiceImpl implements ThirdPartService {
Long timestamp) {
KafkaConsumer kafkaConsumer = null;
try {
- kafkaConsumer = KafkaClientPool.borrowKafkaConsumerClient(clusterDO);
+ kafkaConsumer = kafkaClientPool.borrowKafkaConsumerClient(clusterDO);
if (ValidateUtils.isNull(kafkaConsumer)) {
return null;
}
diff --git a/kafka-manager-task/pom.xml b/kafka-manager-task/pom.xml
index 8927ef8e..dce8d3c8 100644
--- a/kafka-manager-task/pom.xml
+++ b/kafka-manager-task/pom.xml
@@ -24,7 +24,6 @@
1.8
UTF-8
UTF-8
- 5.1.3.RELEASE
@@ -52,7 +51,6 @@
org.springframework
spring-context
- ${spring-version}
\ No newline at end of file
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/AbstractScheduledTask.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/AbstractScheduledTask.java
index 7eddb926..bfd6da5d 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/AbstractScheduledTask.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/AbstractScheduledTask.java
@@ -1,7 +1,6 @@
package com.xiaojukeji.kafka.manager.task.component;
import com.google.common.collect.Lists;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.utils.factory.DefaultThreadFactory;
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
import com.xiaojukeji.kafka.manager.common.utils.NetUtils;
@@ -29,7 +28,7 @@ import java.util.concurrent.*;
* @date 20/8/10
*/
public abstract class AbstractScheduledTask implements SchedulingConfigurer {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(AbstractScheduledTask.class);
@Autowired
private HeartbeatDao heartbeatDao;
@@ -73,18 +72,16 @@ public abstract class AbstractScheduledTask implements Sch
LOGGER.info("init custom scheduled finished, scheduledName:{} scheduledCron:{}.", scheduledName, scheduledCron);
}
- private boolean checkAndModifyCron(String scheduledName, String scheduledCron, boolean existIfIllegal) {
+ private boolean checkAndModifyCron(String scheduledName, String scheduledCron, boolean isInit) {
if (scheduledCron.matches(ScheduledTaskConstant.CRON_REG_EX)) {
this.scheduledCron = scheduledCron;
- LOGGER.info("modify scheduledCron success, scheduledName:{} scheduledCron:{}."
- , scheduledName, scheduledCron);
+ LOGGER.info("{} scheduledCron success, scheduledName:{} scheduledCron:{}.", isInit? "init": "modify", scheduledName, scheduledCron);
return true;
}
- LOGGER.error("modify scheduledCron failed, format invalid, scheduledName:{} scheduledCron:{}."
- , scheduledName, scheduledCron);
- if (existIfIllegal) {
- System.exit(0);
+ LOGGER.error("modify scheduledCron failed, format invalid, scheduledName:{} scheduledCron:{}.", scheduledName, scheduledCron);
+ if (isInit) {
+ throw new UnsupportedOperationException(String.format("scheduledName:%s scheduledCron:%s format invalid", scheduledName, scheduledCron));
}
return false;
}
@@ -130,7 +127,8 @@ public abstract class AbstractScheduledTask implements Sch
LOGGER.info("customScheduled task finished, empty selected task, scheduledName:{}.", scheduledName);
return;
}
- LOGGER.info("customScheduled task running, selected tasks, IP:{} selectedTasks:{}.",
+
+ LOGGER.debug("customScheduled task running, selected tasks, IP:{} selectedTasks:{}.",
NetUtils.localIp(), JsonUtils.toJSONString(selectTasks)
);
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/BaseBizTask.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/BaseBizTask.java
index 37a36238..b4cfdd47 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/BaseBizTask.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/BaseBizTask.java
@@ -1,6 +1,5 @@
package com.xiaojukeji.kafka.manager.task.component;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -9,11 +8,11 @@ import org.slf4j.LoggerFactory;
* @date 20/8/10
*/
public class BaseBizTask implements Runnable {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(AbstractScheduledTask.class);
- private E task;
+ private final E task;
- private AbstractScheduledTask scheduledTask;
+ private final AbstractScheduledTask scheduledTask;
public BaseBizTask(E task, AbstractScheduledTask scheduledTask) {
this.task = task;
@@ -30,6 +29,7 @@ public class BaseBizTask implements Runnable {
} catch (Throwable t) {
LOGGER.error("scheduled task scheduleName:{} execute failed, task:{}", scheduledTask.getScheduledName(), task, t);
}
+
LOGGER.info("scheduled task scheduleName:{} finished, cost-time:{}ms.", scheduledTask.getScheduledName(), System.currentTimeMillis() - startTime);
}
}
\ No newline at end of file
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/CustomScheduled.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/CustomScheduled.java
index 473d4541..fcc88489 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/CustomScheduled.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/CustomScheduled.java
@@ -18,4 +18,6 @@ public @interface CustomScheduled {
String cron();
int threadNum() default 1;
+
+ String description() default "";
}
\ No newline at end of file
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/Heartbeat.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/Heartbeat.java
index d00c0ad0..73f1ecb5 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/Heartbeat.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/component/Heartbeat.java
@@ -1,6 +1,5 @@
package com.xiaojukeji.kafka.manager.task.component;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.utils.NetUtils;
import com.xiaojukeji.kafka.manager.dao.HeartbeatDao;
import com.xiaojukeji.kafka.manager.common.entity.pojo.HeartbeatDO;
@@ -18,11 +17,14 @@ import java.util.Date;
*/
@Component
public class Heartbeat {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(Heartbeat.class);
@Autowired
private HeartbeatDao heartbeatDao;
+ /**
+ * 定时获取管控平台所在机器IP等信息到DB
+ */
@Scheduled(cron = ScheduledTaskConstant.HEARTBEAT_CRON)
public void ipFlush() {
try {
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/CalKafkaTopicBill.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/CalKafkaTopicBill.java
index 93b0a274..ee9797c6 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/CalKafkaTopicBill.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/CalKafkaTopicBill.java
@@ -1,8 +1,6 @@
package com.xiaojukeji.kafka.manager.task.dispatch.biz;
-
import com.xiaojukeji.kafka.manager.common.constant.Constant;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.task.config.TopicBillConfig;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.AppDO;
import com.xiaojukeji.kafka.manager.common.utils.DateUtils;
@@ -24,13 +22,12 @@ import org.springframework.beans.factory.annotation.Autowired;
import java.util.*;
/**
- * 计算账单
* @author zengqiao
* @date 20/5/11
*/
-@CustomScheduled(name = "calKafkaBill", cron = "0 0 1 * * *", threadNum = 1)
+@CustomScheduled(name = "calKafkaBill", cron = "0 0 1 * * ?", threadNum = 1, description = "计算账单")
public class CalKafkaTopicBill extends AbstractScheduledTask {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(CalKafkaTopicBill.class);
@Autowired
private AppService appService;
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/CalRegionCapacity.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/CalRegionCapacity.java
index 973d7888..813626e0 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/CalRegionCapacity.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/CalRegionCapacity.java
@@ -19,7 +19,7 @@ import java.util.*;
* @author zengqiao
* @date 20/6/30
*/
-@CustomScheduled(name = "calRegionCapacity", cron = "0 0 0/12 * * ?", threadNum = 1)
+@CustomScheduled(name = "calRegionCapacity", cron = "0 0 0/12 * * ?", threadNum = 1, description = "计算Region容量")
public class CalRegionCapacity extends AbstractScheduledTask {
@Autowired
private RegionService regionService;
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/CalTopicStatistics.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/CalTopicStatistics.java
index 7d809417..dd725311 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/CalTopicStatistics.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/CalTopicStatistics.java
@@ -1,7 +1,6 @@
package com.xiaojukeji.kafka.manager.task.dispatch.biz;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetPosEnum;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.TopicStatisticsDO;
import com.xiaojukeji.kafka.manager.common.utils.DateUtils;
@@ -28,9 +27,9 @@ import java.util.Map;
* @author zengqiao
* @date 20/3/29
*/
-@CustomScheduled(name = "calTopicStatistics", cron = "0 0 0/4 * * ?", threadNum = 5)
+@CustomScheduled(name = "calTopicStatistics", cron = "0 0 0/4 * * ?", threadNum = 5, description = "定时计算Topic统计数据")
public class CalTopicStatistics extends AbstractScheduledTask {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(CalTopicStatistics.class);
@Autowired
private ClusterService clusterService;
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/FlushBrokerTable.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/FlushBrokerTable.java
index 533f321f..2612b9bd 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/FlushBrokerTable.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/FlushBrokerTable.java
@@ -1,7 +1,6 @@
package com.xiaojukeji.kafka.manager.task.dispatch.biz;
import com.xiaojukeji.kafka.manager.common.bizenum.DBStatusEnum;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.BrokerMetadata;
import com.xiaojukeji.kafka.manager.common.entity.pojo.BrokerDO;
@@ -25,9 +24,9 @@ import java.util.*;
* @author zengqiao
* @date 20/6/2
*/
-@CustomScheduled(name = "flushBrokerTable", cron = "0 0 0/1 * * ?", threadNum = 1)
+@CustomScheduled(name = "flushBrokerTable", cron = "0 0 0/1 * * ?", threadNum = 1, description = "定时刷新BrokerTable数据")
public class FlushBrokerTable extends AbstractScheduledTask {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(FlushBrokerTable.class);
@Autowired
private BrokerService brokerService;
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/FlushExpiredTopic.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/FlushExpiredTopic.java
index 1759ea0e..14662173 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/FlushExpiredTopic.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/FlushExpiredTopic.java
@@ -1,7 +1,6 @@
package com.xiaojukeji.kafka.manager.task.dispatch.biz;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.TopicExpiredDO;
@@ -30,9 +29,9 @@ import java.util.Map;
* @author zengqiao
* @date 20/4/1
*/
-@CustomScheduled(name = "flushExpiredTopic", cron = "0 0 0/5 * * ?", threadNum = 1)
+@CustomScheduled(name = "flushExpiredTopic", cron = "0 0 0/5 * * ?", threadNum = 1, description = "定期更新过期Topic")
public class FlushExpiredTopic extends AbstractScheduledTask {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(FlushExpiredTopic.class);
@Autowired
private TopicExpiredDao topicExpiredDao;
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/SyncClusterTaskState.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/SyncClusterTaskState.java
index 9edddb28..1f9fee22 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/SyncClusterTaskState.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/biz/SyncClusterTaskState.java
@@ -1,6 +1,5 @@
package com.xiaojukeji.kafka.manager.task.dispatch.biz;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterTaskDO;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.kcm.ClusterTaskService;
@@ -17,13 +16,14 @@ import java.util.Arrays;
import java.util.List;
/**
+ * 同步更新集群任务状态
* @author zengqiao
* @date 20/9/7
*/
-@CustomScheduled(name = "syncClusterTaskState", cron = "0 0/1 * * * ?", threadNum = 1)
+@CustomScheduled(name = "syncClusterTaskState", cron = "0 0/1 * * * ?", threadNum = 1, description = "同步更新集群任务状态")
@ConditionalOnProperty(prefix = "kcm", name = "enabled", havingValue = "true", matchIfMissing = true)
public class SyncClusterTaskState extends AbstractScheduledTask {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(SyncClusterTaskState.class);
@Autowired
private ClusterTaskService clusterTaskService;
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishBrokerMetrics.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishBrokerMetrics.java
new file mode 100644
index 00000000..47aa60d4
--- /dev/null
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishBrokerMetrics.java
@@ -0,0 +1,93 @@
+package com.xiaojukeji.kafka.manager.task.dispatch.metrics.collect;
+
+import com.xiaojukeji.kafka.manager.common.constant.KafkaMetricsCollections;
+import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
+import com.xiaojukeji.kafka.manager.common.events.metrics.BatchBrokerMetricsCollectedEvent;
+import com.xiaojukeji.kafka.manager.common.utils.SpringTool;
+import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
+import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConstant;
+import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
+import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
+import com.xiaojukeji.kafka.manager.service.service.ClusterService;
+import com.xiaojukeji.kafka.manager.service.service.JmxService;
+import com.xiaojukeji.kafka.manager.service.strategy.AbstractHealthScoreStrategy;
+import com.xiaojukeji.kafka.manager.task.component.AbstractScheduledTask;
+import com.xiaojukeji.kafka.manager.task.component.CustomScheduled;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Broker指标信息收集
+ * @author zengqiao
+ * @date 20/5/7
+ */
+@CustomScheduled(name = "collectAndPublishBrokerMetrics", cron = "21 0/1 * * * ?", threadNum = 2)
+@ConditionalOnProperty(prefix = "task.metrics.collect", name = "broker-metrics-enabled", havingValue = "true", matchIfMissing = true)
+public class CollectAndPublishBrokerMetrics extends AbstractScheduledTask {
+ private static final Logger LOGGER = LoggerFactory.getLogger(CollectAndPublishBrokerMetrics.class);
+
+ @Autowired
+ private JmxService jmxService;
+
+ @Autowired
+ private ClusterService clusterService;
+
+ @Autowired
+ private AbstractHealthScoreStrategy healthScoreStrategy;
+
+ @Override
+ protected List listAllTasks() {
+ return clusterService.list();
+ }
+
+ @Override
+ public void processTask(ClusterDO clusterDO) {
+ long startTime = System.currentTimeMillis();
+
+ try {
+ SpringTool.publish(new BatchBrokerMetricsCollectedEvent(
+ this,
+ clusterDO.getId(),
+ startTime,
+ this.getBrokerMetrics(clusterDO.getId()))
+ );
+ } catch (Exception e) {
+ LOGGER.error("collect broker-metrics failed, physicalClusterId:{}.", clusterDO.getId(), e);
+ }
+
+ LOGGER.info("collect broker-metrics finished, physicalClusterId:{} costTime:{}", clusterDO.getId(), System.currentTimeMillis() - startTime);
+ }
+
+ private List getBrokerMetrics(Long clusterId) {
+ List metricsList = new ArrayList<>();
+ for (Integer brokerId: PhysicalClusterMetadataManager.getBrokerIdList(clusterId)) {
+ BrokerMetrics metrics = jmxService.getBrokerMetrics(
+ clusterId,
+ brokerId,
+ KafkaMetricsCollections.BROKER_TO_DB_METRICS
+ );
+
+ if (ValidateUtils.isNull(metrics)) {
+ continue;
+ }
+
+ metrics.getMetricsMap().put(
+ JmxConstant.HEALTH_SCORE,
+ healthScoreStrategy.calBrokerHealthScore(clusterId, brokerId, metrics)
+ );
+
+ metricsList.add(metrics);
+ }
+
+ if (ValidateUtils.isEmptyList(metricsList)) {
+ return new ArrayList<>();
+ }
+
+ return metricsList;
+ }
+}
\ No newline at end of file
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishCGData.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishCGData.java
index cc67428f..ceedff80 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishCGData.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishCGData.java
@@ -1,7 +1,6 @@
package com.xiaojukeji.kafka.manager.task.dispatch.metrics.collect;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetPosEnum;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroup;
import com.xiaojukeji.kafka.manager.common.entity.metrics.ConsumerMetrics;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
@@ -28,12 +27,13 @@ import java.util.concurrent.Callable;
import java.util.concurrent.FutureTask;
/**
+ * 收集并发布消费者指标数据
* @author zengqiao
* @date 20/9/14
*/
-@CustomScheduled(name = "newCollectAndPublishCGData", cron = "30 0/1 * * * *", threadNum = 10)
+@CustomScheduled(name = "newCollectAndPublishCGData", cron = "30 0/1 * * * ?", threadNum = 10, description = "收集并发布消费者指标数据")
public class CollectAndPublishCGData extends AbstractScheduledTask {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(CollectAndPublishCGData.class);
@Autowired
private TopicService topicService;
@@ -44,6 +44,9 @@ public class CollectAndPublishCGData extends AbstractScheduledTask {
@Autowired
private ConsumerService consumerService;
+ @Autowired
+ private ThreadPool threadPool;
+
@Override
protected List listAllTasks() {
return clusterService.list();
@@ -82,7 +85,7 @@ public class CollectAndPublishCGData extends AbstractScheduledTask {
return getTopicConsumerMetrics(clusterDO, topicName, startTimeUnitMs);
}
});
- ThreadPool.submitCollectMetricsTask(taskList[i]);
+ threadPool.submitCollectMetricsTask(clusterDO.getId(), taskList[i]);
}
List consumerMetricsList = new ArrayList<>();
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishCommunityTopicMetrics.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishCommunityTopicMetrics.java
index a6757310..c6bfb003 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishCommunityTopicMetrics.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishCommunityTopicMetrics.java
@@ -20,7 +20,7 @@ import java.util.*;
* @author zengqiao
* @date 20/7/21
*/
-@CustomScheduled(name = "collectAndPublishCommunityTopicMetrics", cron = "31 0/1 * * * ?", threadNum = 5)
+@CustomScheduled(name = "collectAndPublishCommunityTopicMetrics", cron = "31 0/1 * * * ?", threadNum = 5, description = "Topic社区指标收集")
public class CollectAndPublishCommunityTopicMetrics extends AbstractScheduledTask {
@Autowired
private JmxService jmxService;
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishTopicThrottledMetrics.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishTopicThrottledMetrics.java
index 27cf68c1..a0783e25 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishTopicThrottledMetrics.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/collect/CollectAndPublishTopicThrottledMetrics.java
@@ -16,10 +16,11 @@ import org.springframework.beans.factory.annotation.Autowired;
import java.util.*;
/**
+ * 收集和发布Topic限流信息
* @author zengqiao
* @date 2019-05-10
*/
-@CustomScheduled(name = "collectAndPublishTopicThrottledMetrics", cron = "11 0/1 * * * ?", threadNum = 5)
+@CustomScheduled(name = "collectAndPublishTopicThrottledMetrics", cron = "11 0/1 * * * ?", threadNum = 5, description = "收集和发布Topic限流信息")
public class CollectAndPublishTopicThrottledMetrics extends AbstractScheduledTask {
@Autowired
private ClusterService clusterService;
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/delete/DeleteMetrics.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/delete/DeleteMetrics.java
index b8632971..c1fad06b 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/delete/DeleteMetrics.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/delete/DeleteMetrics.java
@@ -1,15 +1,14 @@
package com.xiaojukeji.kafka.manager.task.dispatch.metrics.delete;
-import com.xiaojukeji.kafka.manager.common.constant.Constant;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
+import com.xiaojukeji.kafka.manager.common.utils.BackoffUtils;
import com.xiaojukeji.kafka.manager.dao.*;
-import com.xiaojukeji.kafka.manager.service.utils.ConfigUtils;
import com.xiaojukeji.kafka.manager.task.component.AbstractScheduledTask;
import com.xiaojukeji.kafka.manager.task.component.CustomScheduled;
import com.xiaojukeji.kafka.manager.task.component.EmptyEntry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Value;
import java.util.Arrays;
import java.util.Date;
@@ -20,12 +19,9 @@ import java.util.List;
* @author zengqiao
* @date 20/1/8
*/
-@CustomScheduled(name = "deleteMetrics", cron = "0 0/2 * * * ?", threadNum = 1)
+@CustomScheduled(name = "deleteMetrics", cron = "0 0/2 * * * ?", threadNum = 1, description = "定期删除Metrics信息")
public class DeleteMetrics extends AbstractScheduledTask {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
-
- @Autowired
- private ConfigUtils configUtils;
+ private static final Logger LOGGER = LoggerFactory.getLogger(DeleteMetrics.class);
@Autowired
private TopicMetricsDao topicMetricsDao;
@@ -45,6 +41,27 @@ public class DeleteMetrics extends AbstractScheduledTask {
@Autowired
private TopicThrottledMetricsDao topicThrottledMetricsDao;
+ @Value(value = "${task.metrics.delete.delete-limit-size:1000}")
+ private Integer deleteLimitSize;
+
+ @Value(value = "${task.metrics.delete.cluster-metrics-save-days:14}")
+ private Integer clusterMetricsSaveDays;
+
+ @Value(value = "${task.metrics.delete.broker-metrics-save-days:14}")
+ private Integer brokerMetricsSaveDays;
+
+ @Value(value = "${task.metrics.delete.topic-metrics-save-days:7}")
+ private Integer topicMetricsSaveDays;
+
+ @Value(value = "${task.metrics.delete.topic-request-time-metrics-save-days:7}")
+ private Integer topicRequestTimeMetricsSaveDays;
+
+ @Value(value = "${task.metrics.delete.topic-throttled-metrics-save-days:7}")
+ private Integer topicThrottledMetricsSaveDays;
+
+ @Value(value = "${task.metrics.delete.app-topic-metrics-save-days:7}")
+ private Integer appTopicMetricsSaveDays;
+
@Override
public List listAllTasks() {
EmptyEntry emptyEntry = new EmptyEntry();
@@ -54,78 +71,73 @@ public class DeleteMetrics extends AbstractScheduledTask {
@Override
public void processTask(EmptyEntry entryEntry) {
- if (Constant.INVALID_CODE.equals(configUtils.getMaxMetricsSaveDays())) {
- // 无需数据删除
- return;
- }
-
long startTime = System.currentTimeMillis();
LOGGER.info("start delete metrics");
- try {
- deleteTopicMetrics();
- } catch (Exception e) {
- LOGGER.error("delete topic metrics failed.", e);
+
+ // 数据量可能比较大,一次触发多删除几次
+ for (int i = 0; i < 10; ++i) {
+ try {
+ boolean needReDelete = this.deleteCommunityTopicMetrics();
+ if (!needReDelete) {
+ break;
+ }
+
+ // 暂停1000毫秒,避免删除太快导致DB出现问题
+ BackoffUtils.backoff(1000);
+ } catch (Exception e) {
+ LOGGER.error("delete community topic metrics failed.", e);
+ }
+ }
+
+ // 数据量可能比较大,一次触发多删除几次
+ for (int i = 0; i < 10; ++i) {
+ try {
+ boolean needReDelete = this.deleteDiDiTopicMetrics();
+ if (!needReDelete) {
+ break;
+ }
+
+ // 暂停1000毫秒,避免删除太快导致DB出现问题
+ BackoffUtils.backoff(1000);
+ } catch (Exception e) {
+ LOGGER.error("delete didi topic metrics failed.", e);
+ }
}
try {
- deleteTopicAppMetrics();
+ this.deleteClusterBrokerMetrics();
} catch (Exception e) {
- LOGGER.error("delete topic app metrics failed.", e);
+ LOGGER.error("delete cluster and broker metrics failed.", e);
}
- try {
- deleteTopicRequestMetrics();
- } catch (Exception e) {
- LOGGER.error("delete topic request metrics failed.", e);
- }
-
- try {
- deleteThrottledMetrics();
- } catch (Exception e) {
- LOGGER.error("delete topic throttled metrics failed.", e);
- }
-
- try {
- deleteBrokerMetrics();
- } catch (Exception e) {
- LOGGER.error("delete broker metrics failed.", e);
- }
-
- try {
- deleteClusterMetrics();
- } catch (Exception e) {
- LOGGER.error("delete cluster metrics failed.", e);
- }
LOGGER.info("finish delete metrics, costTime:{}ms.", System.currentTimeMillis() - startTime);
}
- private void deleteTopicMetrics() {
- Date endTime = new Date(System.currentTimeMillis() - configUtils.getMaxMetricsSaveDays() * 24 * 60 * 60 * 1000);
- topicMetricsDao.deleteBeforeTime(endTime);
+ private boolean deleteCommunityTopicMetrics() {
+ return topicMetricsDao.deleteBeforeTime(new Date(System.currentTimeMillis() - this.topicMetricsSaveDays * 24L * 60L* 60L * 1000L), this.deleteLimitSize) >= this.deleteLimitSize;
}
- private void deleteTopicAppMetrics() {
- Date endTime = new Date(System.currentTimeMillis() - configUtils.getMaxMetricsSaveDays() * 24 * 60 * 60 * 1000);
- topicAppMetricsDao.deleteBeforeTime(endTime);
+ private boolean deleteDiDiTopicMetrics() {
+ boolean needReDelete = false;
+
+ if (topicAppMetricsDao.deleteBeforeTime(new Date(System.currentTimeMillis() - this.appTopicMetricsSaveDays * 24L * 60L* 60L * 1000L), this.deleteLimitSize) >= this.deleteLimitSize) {
+ needReDelete = true;
+ }
+
+ if (topicRequestMetricsDao.deleteBeforeTime(new Date(System.currentTimeMillis() - this.topicRequestTimeMetricsSaveDays * 24L * 60L* 60L * 1000L), this.deleteLimitSize) >= this.deleteLimitSize) {
+ needReDelete = true;
+ }
+
+ if (topicThrottledMetricsDao.deleteBeforeTime(new Date(System.currentTimeMillis() - this.topicThrottledMetricsSaveDays * 24L * 60L* 60L * 1000L), this.deleteLimitSize) >= this.deleteLimitSize) {
+ needReDelete = true;
+ }
+
+ return needReDelete;
}
- private void deleteTopicRequestMetrics() {
- Date endTime = new Date(System.currentTimeMillis() - configUtils.getMaxMetricsSaveDays() * 24 * 60 * 60 * 1000);
- topicRequestMetricsDao.deleteBeforeTime(endTime);
- }
+ private void deleteClusterBrokerMetrics() {
+ brokerMetricsDao.deleteBeforeTime(new Date(System.currentTimeMillis() - this.brokerMetricsSaveDays * 24L * 60L* 60L * 1000L), this.deleteLimitSize);
- private void deleteThrottledMetrics() {
- Date endTime = new Date(System.currentTimeMillis() - configUtils.getMaxMetricsSaveDays() * 24 * 60 * 60 * 1000);
- topicThrottledMetricsDao.deleteBeforeTime(endTime);
- }
-
- private void deleteBrokerMetrics() {
- Date endTime = new Date(System.currentTimeMillis() - configUtils.getMaxMetricsSaveDays() * 24 * 60 * 60 * 1000);
- brokerMetricsDao.deleteBeforeTime(endTime);
- }
-
- private void deleteClusterMetrics() {
- Date endTime = new Date(System.currentTimeMillis() - configUtils.getMaxMetricsSaveDays() * 24 * 60 * 60 * 1000);
- clusterMetricsDao.deleteBeforeTime(endTime);
+ clusterMetricsDao.deleteBeforeTime(new Date(System.currentTimeMillis() - this.clusterMetricsSaveDays * 24L * 60L * 60L * 1000L), this.deleteLimitSize);
}
}
\ No newline at end of file
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreBrokerMetrics.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreBrokerMetrics.java
deleted file mode 100644
index 50f5f633..00000000
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreBrokerMetrics.java
+++ /dev/null
@@ -1,136 +0,0 @@
-package com.xiaojukeji.kafka.manager.task.dispatch.metrics.store;
-
-import com.xiaojukeji.kafka.manager.common.constant.KafkaMetricsCollections;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
-import com.xiaojukeji.kafka.manager.common.entity.metrics.BaseMetrics;
-import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
-import com.xiaojukeji.kafka.manager.common.entity.metrics.ClusterMetrics;
-import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
-import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConstant;
-import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
-import com.xiaojukeji.kafka.manager.dao.BrokerMetricsDao;
-import com.xiaojukeji.kafka.manager.dao.ClusterMetricsDao;
-import com.xiaojukeji.kafka.manager.common.entity.pojo.BrokerMetricsDO;
-import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
-import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterMetricsDO;
-import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
-import com.xiaojukeji.kafka.manager.service.service.ClusterService;
-import com.xiaojukeji.kafka.manager.service.service.JmxService;
-import com.xiaojukeji.kafka.manager.service.strategy.AbstractHealthScoreStrategy;
-import com.xiaojukeji.kafka.manager.service.utils.MetricsConvertUtils;
-import com.xiaojukeji.kafka.manager.task.component.AbstractScheduledTask;
-import com.xiaojukeji.kafka.manager.task.component.CustomScheduled;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Broker指标信息存DB, Broker流量, 集群流量
- * @author zengqiao
- * @date 20/5/7
- */
-@CustomScheduled(name = "storeBrokerMetrics", cron = "21 0/1 * * * ?", threadNum = 2)
-@ConditionalOnProperty(prefix = "custom.store-metrics-task.community", name = "broker-metrics-enabled", havingValue = "true", matchIfMissing = true)
-public class StoreBrokerMetrics extends AbstractScheduledTask {
- private static final Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
-
- @Autowired
- private JmxService jmxService;
-
- @Autowired
- private ClusterService clusterService;
-
- @Autowired
- private BrokerMetricsDao brokerMetricsDao;
-
- @Autowired
- private ClusterMetricsDao clusterMetricsDao;
-
- @Autowired
- private AbstractHealthScoreStrategy healthScoreStrategy;
-
- private static final Integer INSERT_BATCH_SIZE = 100;
-
- @Override
- protected List listAllTasks() {
- return clusterService.list();
- }
-
- @Override
- public void processTask(ClusterDO clusterDO) {
- long startTime = System.currentTimeMillis();
- List clusterMetricsList = new ArrayList<>();
-
- try {
- List brokerMetricsList = getAndBatchAddMetrics(startTime, clusterDO.getId());
- clusterMetricsList.add(supplyAndConvert2ClusterMetrics(
- clusterDO.getId(),
- MetricsConvertUtils.merge2BaseMetricsByAdd(brokerMetricsList))
- );
- } catch (Throwable t) {
- LOGGER.error("collect failed, clusterId:{}.", clusterDO.getId(), t);
- }
- long endTime = System.currentTimeMillis();
- LOGGER.info("collect finish, clusterId:{} costTime:{}", clusterDO.getId(), endTime - startTime);
-
- List doList = MetricsConvertUtils.convertAndUpdateCreateTime2ClusterMetricsDOList(
- startTime,
- clusterMetricsList
- );
- clusterMetricsDao.batchAdd(doList);
- }
-
- private List getAndBatchAddMetrics(Long startTime, Long clusterId) {
- List metricsList = new ArrayList<>();
- for (Integer brokerId: PhysicalClusterMetadataManager.getBrokerIdList(clusterId)) {
- BrokerMetrics metrics = jmxService.getBrokerMetrics(
- clusterId,
- brokerId,
- KafkaMetricsCollections.BROKER_TO_DB_METRICS
- );
- if (ValidateUtils.isNull(metrics)) {
- continue;
- }
- metrics.getMetricsMap().put(
- JmxConstant.HEALTH_SCORE,
- healthScoreStrategy.calBrokerHealthScore(clusterId, brokerId, metrics)
- );
- metricsList.add(metrics);
- }
- if (ValidateUtils.isEmptyList(metricsList)) {
- return new ArrayList<>();
- }
-
- List doList =
- MetricsConvertUtils.convertAndUpdateCreateTime2BrokerMetricsDOList(startTime, metricsList);
- int i = 0;
- do {
- brokerMetricsDao.batchAdd(doList.subList(i, Math.min(i + INSERT_BATCH_SIZE, doList.size())));
- i += INSERT_BATCH_SIZE;
- } while (i < doList.size());
- return metricsList;
- }
-
- private ClusterMetrics supplyAndConvert2ClusterMetrics(Long clusterId, BaseMetrics baseMetrics) {
- ClusterMetrics metrics = new ClusterMetrics(clusterId);
- Map metricsMap = metrics.getMetricsMap();
- metricsMap.putAll(baseMetrics.getMetricsMap());
- metricsMap.put(JmxConstant.TOPIC_NUM, PhysicalClusterMetadataManager.getTopicNameList(clusterId).size());
- metricsMap.put(JmxConstant.BROKER_NUM, PhysicalClusterMetadataManager.getBrokerIdList(clusterId).size());
- Integer partitionNum = 0;
- for (String topicName : PhysicalClusterMetadataManager.getTopicNameList(clusterId)) {
- TopicMetadata topicMetaData = PhysicalClusterMetadataManager.getTopicMetadata(clusterId, topicName);
- if (ValidateUtils.isNull(topicMetaData)) {
- continue;
- }
- partitionNum += topicMetaData.getPartitionNum();
- }
- metricsMap.put(JmxConstant.PARTITION_NUM, partitionNum);
- return metrics;
- }
-}
\ No newline at end of file
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreDiDiAppTopicMetrics.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreDiDiAppTopicMetrics.java
index ede6525d..e04cda15 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreDiDiAppTopicMetrics.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreDiDiAppTopicMetrics.java
@@ -2,7 +2,6 @@ package com.xiaojukeji.kafka.manager.task.dispatch.metrics.store;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.constant.KafkaMetricsCollections;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.dao.TopicAppMetricsDao;
@@ -17,18 +16,18 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
-import org.springframework.stereotype.Component;
import java.util.*;
/**
+ * JMX中获取appId维度的流量信息存DB
* @author zengqiao
* @date 20/7/21
*/
-@CustomScheduled(name = "storeDiDiAppTopicMetrics", cron = "41 0/1 * * * ?", threadNum = 5)
+@CustomScheduled(name = "storeDiDiAppTopicMetrics", cron = "41 0/1 * * * ?", threadNum = 5, description = "JMX中获取appId维度的流量信息存DB")
@ConditionalOnProperty(prefix = "custom.store-metrics-task.didi", name = "app-topic-metrics-enabled", havingValue = "true", matchIfMissing = true)
public class StoreDiDiAppTopicMetrics extends AbstractScheduledTask {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(StoreDiDiAppTopicMetrics.class);
@Autowired
private JmxService jmxService;
@@ -50,7 +49,7 @@ public class StoreDiDiAppTopicMetrics extends AbstractScheduledTask {
try {
getAndBatchAddTopicAppMetrics(startTime, clusterDO.getId());
- } catch (Throwable t) {
+ } catch (Exception t) {
LOGGER.error("save topic metrics failed, clusterId:{}.", clusterDO.getId(), t);
}
}
@@ -65,7 +64,12 @@ public class StoreDiDiAppTopicMetrics extends AbstractScheduledTask {
MetricsConvertUtils.convertAndUpdateCreateTime2TopicMetricsDOList(startTime, metricsList);
int i = 0;
do {
- topicAppMetricsDao.batchAdd(doList.subList(i, Math.min(i + Constant.BATCH_INSERT_SIZE, doList.size())));
+ List subDOList = doList.subList(i, Math.min(i + Constant.BATCH_INSERT_SIZE, doList.size()));
+ if (ValidateUtils.isEmptyList(subDOList)) {
+ return;
+ }
+
+ topicAppMetricsDao.batchAdd(subDOList);
i += Constant.BATCH_INSERT_SIZE;
} while (i < doList.size());
}
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreDiDiTopicRequestTimeMetrics.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreDiDiTopicRequestTimeMetrics.java
index c4caa229..5885d800 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreDiDiTopicRequestTimeMetrics.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/metrics/store/StoreDiDiTopicRequestTimeMetrics.java
@@ -2,7 +2,6 @@ package com.xiaojukeji.kafka.manager.task.dispatch.metrics.store;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.constant.KafkaMetricsCollections;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.dao.TopicRequestMetricsDao;
@@ -21,13 +20,14 @@ import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import java.util.*;
/**
+ * JMX中获取的TopicRequestTimeMetrics信息存DB
* @author zengqiao
* @date 20/7/21
*/
-@CustomScheduled(name = "storeDiDiTopicRequestTimeMetrics", cron = "51 0/1 * * * ?", threadNum = 5)
+@CustomScheduled(name = "storeDiDiTopicRequestTimeMetrics", cron = "51 0/1 * * * ?", threadNum = 5, description = "JMX中获取的TopicRequestTimeMetrics信息存DB")
@ConditionalOnProperty(prefix = "custom.store-metrics-task.didi", name = "topic-request-time-metrics-enabled", havingValue = "true", matchIfMissing = true)
public class StoreDiDiTopicRequestTimeMetrics extends AbstractScheduledTask {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(StoreDiDiTopicRequestTimeMetrics.class);
@Autowired
private JmxService jmxService;
@@ -51,7 +51,7 @@ public class StoreDiDiTopicRequestTimeMetrics extends AbstractScheduledTask subDOList = doList.subList(i, Math.min(i + Constant.BATCH_INSERT_SIZE, doList.size()));
+ if (ValidateUtils.isEmptyList(subDOList)) {
+ return;
+ }
+
+ topicRequestMetricsDao.batchAdd(subDOList);
i += Constant.BATCH_INSERT_SIZE;
} while (i < doList.size());
}
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/op/AutoHandleTopicOrder.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/op/AutoHandleTopicOrder.java
index 46158b60..198bd1c0 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/op/AutoHandleTopicOrder.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/op/AutoHandleTopicOrder.java
@@ -4,7 +4,6 @@ import com.xiaojukeji.kafka.manager.bpm.OrderService;
import com.xiaojukeji.kafka.manager.bpm.common.OrderStatusEnum;
import com.xiaojukeji.kafka.manager.bpm.common.OrderTypeEnum;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.constant.SystemCodeConstant;
import com.xiaojukeji.kafka.manager.common.constant.TopicCreationConstant;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
@@ -31,14 +30,15 @@ import java.util.List;
import java.util.Properties;
/**
+ * 定时自动处理Topic相关工单
* @author zengqiao
* @date 20/7/28
*/
@Component
-@CustomScheduled(name = "autoHandleTopicOrder", cron = "0 0/1 * * * ?", threadNum = 1)
+@CustomScheduled(name = "autoHandleTopicOrder", cron = "0 0/1 * * * ?", threadNum = 1, description = "定时自动处理Topic相关工单")
@ConditionalOnProperty(prefix = "task.op.order-auto-exec", name = "topic-enabled", havingValue = "true", matchIfMissing = false)
public class AutoHandleTopicOrder extends AbstractScheduledTask {
- private static final Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(AutoHandleTopicOrder.class);
@Autowired
private ConfigService configService;
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/op/AutomatedHandleOrder.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/op/AutomatedHandleOrder.java
index e9cb1cb1..d428a1cd 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/op/AutomatedHandleOrder.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/op/AutomatedHandleOrder.java
@@ -4,7 +4,6 @@ import com.xiaojukeji.kafka.manager.bpm.OrderService;
import com.xiaojukeji.kafka.manager.bpm.common.OrderStatusEnum;
import com.xiaojukeji.kafka.manager.bpm.common.OrderTypeEnum;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.bpm.common.handle.OrderHandleBaseDTO;
import com.xiaojukeji.kafka.manager.common.utils.DateUtils;
@@ -31,10 +30,10 @@ import java.util.*;
* @date 2020/6/12
*/
@Component
-@CustomScheduled(name = "automatedHandleOrder", cron = "0 0/1 * * * ?", threadNum = 1)
+@CustomScheduled(name = "automatedHandleOrder", cron = "0 0/1 * * * ?", threadNum = 1, description = "工单自动化审批")
@ConditionalOnProperty(prefix = "task.op.order-auto-exec", name = "app-enabled", havingValue = "true", matchIfMissing = false)
public class AutomatedHandleOrder extends AbstractScheduledTask {
- private static final Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(AutomatedHandleOrder.class);
@Autowired
private OrderService orderService;
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/op/FlushReassignment.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/op/FlushReassignment.java
index cfd11bfa..7006158b 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/op/FlushReassignment.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/op/FlushReassignment.java
@@ -3,7 +3,6 @@ package com.xiaojukeji.kafka.manager.task.dispatch.op;
import com.xiaojukeji.kafka.manager.common.bizenum.TaskStatusEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.TaskStatusReassignEnum;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.constant.TopicCreationConstant;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
@@ -34,9 +33,9 @@ import java.util.*;
* @date 19/12/29
*/
@Component
-@CustomScheduled(name = "flushReassignment", cron = "0 0/1 * * * ?", threadNum = 1)
+@CustomScheduled(name = "flushReassignment", cron = "0 0/1 * * * ?", threadNum = 1, description = "定时处理分区迁移任务")
public class FlushReassignment extends AbstractScheduledTask {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(FlushReassignment.class);
@Autowired
private ClusterService clusterService;
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/op/SyncTopic2DB.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/op/SyncTopic2DB.java
index bb069aa8..c41ff634 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/op/SyncTopic2DB.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/dispatch/op/SyncTopic2DB.java
@@ -2,7 +2,6 @@ package com.xiaojukeji.kafka.manager.task.dispatch.op;
import com.xiaojukeji.kafka.manager.common.bizenum.TopicAuthorityEnum;
import com.xiaojukeji.kafka.manager.common.constant.KafkaConstant;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.constant.TopicCreationConstant;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.TopicDO;
@@ -36,10 +35,10 @@ import java.util.stream.Collectors;
* @date 19/12/29
*/
@Component
-@CustomScheduled(name = "syncTopic2DB", cron = "0 0/2 * * * ?", threadNum = 1)
+@CustomScheduled(name = "syncTopic2DB", cron = "0 0/2 * * * ?", threadNum = 1, description = "定期将未落盘的Topic刷新到DB中")
@ConditionalOnProperty(prefix = "task.op", name = "sync-topic-enabled", havingValue = "true", matchIfMissing = false)
public class SyncTopic2DB extends AbstractScheduledTask {
- private static final Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(SyncTopic2DB.class);
private static final String SYNC_TOPIC_2_DB_CONFIG_KEY = "SYNC_TOPIC_2_DB_CONFIG_KEY";
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/biz/RegionCreatedListener.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/biz/RegionCreatedListener.java
new file mode 100644
index 00000000..5daa0e9e
--- /dev/null
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/biz/RegionCreatedListener.java
@@ -0,0 +1,38 @@
+package com.xiaojukeji.kafka.manager.task.listener.biz;
+
+import com.xiaojukeji.kafka.manager.common.events.RegionCreatedEvent;
+import com.xiaojukeji.kafka.manager.task.dispatch.biz.CalRegionCapacity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.context.ApplicationListener;
+import org.springframework.scheduling.annotation.Async;
+import org.springframework.stereotype.Component;
+
+/**
+ * Region创建监听器,
+ * TODO 后续需要将其移动到core模块
+ * @author zengqiao
+ * @date 22/01/11
+ */
+@Component
+public class RegionCreatedListener implements ApplicationListener {
+ private static final Logger logger = LoggerFactory.getLogger(RegionCreatedListener.class);
+
+ @Autowired
+ private CalRegionCapacity calRegionCapacity;
+
+ @Async
+ @Override
+ public void onApplicationEvent(RegionCreatedEvent event) {
+ try {
+ logger.info("cal region capacity started when region created, regionDO:{}.", event.getRegionDO());
+
+ calRegionCapacity.processTask(event.getRegionDO());
+
+ logger.info("cal region capacity finished when region created, regionDO:{}.", event.getRegionDO());
+ } catch (Exception e) {
+ logger.error("cal region capacity failed when region created, regionDO:{}.", event.getRegionDO(), e);
+ }
+ }
+}
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/SinkBrokerMetrics2DB.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/SinkBrokerMetrics2DB.java
new file mode 100644
index 00000000..923d26b6
--- /dev/null
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/SinkBrokerMetrics2DB.java
@@ -0,0 +1,55 @@
+package com.xiaojukeji.kafka.manager.task.listener.sink.db;
+
+import com.xiaojukeji.kafka.manager.common.constant.Constant;
+import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
+import com.xiaojukeji.kafka.manager.common.entity.pojo.BrokerMetricsDO;
+import com.xiaojukeji.kafka.manager.common.events.metrics.BatchBrokerMetricsCollectedEvent;
+import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
+import com.xiaojukeji.kafka.manager.dao.BrokerMetricsDao;
+import com.xiaojukeji.kafka.manager.service.utils.MetricsConvertUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
+import org.springframework.context.ApplicationListener;
+import org.springframework.stereotype.Component;
+
+import java.util.List;
+
+/**
+ * @author zengqiao
+ * @date 22/01/17
+ */
+@Component
+@ConditionalOnProperty(prefix = "task.metrics.sink.broker-metrics", name = "sink-db-enabled", havingValue = "true", matchIfMissing = true)
+public class SinkBrokerMetrics2DB implements ApplicationListener {
+ private static final Logger logger = LoggerFactory.getLogger(SinkBrokerMetrics2DB.class);
+
+ @Autowired
+ private BrokerMetricsDao metricsDao;
+
+ @Override
+ public void onApplicationEvent(BatchBrokerMetricsCollectedEvent event) {
+ logger.debug("sink broker-metrics to db start, event:{}.", event);
+
+ List metricsList = event.getMetricsList();
+ if (ValidateUtils.isEmptyList(metricsList)) {
+ logger.warn("sink broker-metrics to db finished, without need sink, event:{}.", event);
+ return;
+ }
+
+ List doList = MetricsConvertUtils.convertAndUpdateCreateTime2BrokerMetricsDOList(event.getCollectTime(), metricsList);
+ int i = 0;
+ while (i < doList.size()) {
+ List subDOList = doList.subList(i, Math.min(i + Constant.BATCH_INSERT_SIZE, doList.size()));
+ if (ValidateUtils.isEmptyList(subDOList)) {
+ break;
+ }
+
+ metricsDao.batchAdd(subDOList);
+ i += Constant.BATCH_INSERT_SIZE;
+ }
+
+ logger.debug("sink broker-metrics to db finished, event:{}.", event);
+ }
+}
\ No newline at end of file
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/SinkClusterMetrics2DB.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/SinkClusterMetrics2DB.java
new file mode 100644
index 00000000..a1aab09c
--- /dev/null
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/SinkClusterMetrics2DB.java
@@ -0,0 +1,80 @@
+package com.xiaojukeji.kafka.manager.task.listener.sink.db;
+
+import com.xiaojukeji.kafka.manager.common.entity.metrics.BaseMetrics;
+import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
+import com.xiaojukeji.kafka.manager.common.entity.metrics.ClusterMetrics;
+import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterMetricsDO;
+import com.xiaojukeji.kafka.manager.common.events.metrics.BatchBrokerMetricsCollectedEvent;
+import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
+import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConstant;
+import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
+import com.xiaojukeji.kafka.manager.dao.ClusterMetricsDao;
+import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
+import com.xiaojukeji.kafka.manager.service.utils.MetricsConvertUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
+import org.springframework.context.ApplicationListener;
+import org.springframework.stereotype.Component;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * @author zengqiao
+ * @date 22/01/17
+ */
+@Component
+@ConditionalOnProperty(prefix = "task.metrics.sink.cluster-metrics", name = "sink-db-enabled", havingValue = "true", matchIfMissing = true)
+public class SinkClusterMetrics2DB implements ApplicationListener {
+ private static final Logger logger = LoggerFactory.getLogger(SinkClusterMetrics2DB.class);
+
+ @Autowired
+ private ClusterMetricsDao clusterMetricsDao;
+
+ @Override
+ public void onApplicationEvent(BatchBrokerMetricsCollectedEvent event) {
+ logger.debug("sink cluster-metrics to db start, event:{}.", event);
+
+ List metricsList = event.getMetricsList();
+ if (ValidateUtils.isEmptyList(metricsList)) {
+ logger.warn("sink cluster-metrics to db finished, without need sink, event:{}.", event);
+ return;
+ }
+
+ List doList = MetricsConvertUtils.convertAndUpdateCreateTime2ClusterMetricsDOList(
+ event.getCollectTime(),
+ // 合并broker-metrics为cluster-metrics
+ Arrays.asList(supplyAndConvert2ClusterMetrics(event.getPhysicalClusterId(), MetricsConvertUtils.merge2BaseMetricsByAdd(event.getMetricsList())))
+ );
+
+ if (ValidateUtils.isEmptyList(doList)) {
+ logger.warn("sink cluster-metrics to db finished, without need sink, event:{}.", event);
+ return;
+ }
+
+ clusterMetricsDao.batchAdd(doList);
+
+ logger.debug("sink cluster-metrics to db finished, event:{}.", event);
+ }
+
+ private ClusterMetrics supplyAndConvert2ClusterMetrics(Long clusterId, BaseMetrics baseMetrics) {
+ ClusterMetrics metrics = new ClusterMetrics(clusterId);
+ Map metricsMap = metrics.getMetricsMap();
+ metricsMap.putAll(baseMetrics.getMetricsMap());
+ metricsMap.put(JmxConstant.TOPIC_NUM, PhysicalClusterMetadataManager.getTopicNameList(clusterId).size());
+ metricsMap.put(JmxConstant.BROKER_NUM, PhysicalClusterMetadataManager.getBrokerIdList(clusterId).size());
+ Integer partitionNum = 0;
+ for (String topicName : PhysicalClusterMetadataManager.getTopicNameList(clusterId)) {
+ TopicMetadata topicMetaData = PhysicalClusterMetadataManager.getTopicMetadata(clusterId, topicName);
+ if (ValidateUtils.isNull(topicMetaData)) {
+ continue;
+ }
+ partitionNum += topicMetaData.getPartitionNum();
+ }
+ metricsMap.put(JmxConstant.PARTITION_NUM, partitionNum);
+ return metrics;
+ }
+}
\ No newline at end of file
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/StoreCommunityTopicMetrics2DB.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/StoreCommunityTopicMetrics2DB.java
similarity index 76%
rename from kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/StoreCommunityTopicMetrics2DB.java
rename to kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/StoreCommunityTopicMetrics2DB.java
index 0c0714f7..46966d5e 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/StoreCommunityTopicMetrics2DB.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/StoreCommunityTopicMetrics2DB.java
@@ -1,7 +1,6 @@
-package com.xiaojukeji.kafka.manager.task.listener;
+package com.xiaojukeji.kafka.manager.task.listener.sink.db;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
import com.xiaojukeji.kafka.manager.common.entity.pojo.TopicMetricsDO;
import com.xiaojukeji.kafka.manager.common.events.TopicMetricsCollectedEvent;
@@ -25,7 +24,7 @@ import java.util.List;
@Component("storeCommunityTopicMetrics2DB")
@ConditionalOnProperty(prefix = "custom.store-metrics-task.community", name = "topic-metrics-enabled", havingValue = "true", matchIfMissing = true)
public class StoreCommunityTopicMetrics2DB implements ApplicationListener {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(StoreCommunityTopicMetrics2DB.class);
@Autowired
private TopicMetricsDao topicMetricsDao;
@@ -40,17 +39,21 @@ public class StoreCommunityTopicMetrics2DB implements ApplicationListener metricsList) throws Exception {
- List doList =
- MetricsConvertUtils.convertAndUpdateCreateTime2TopicMetricsDOList(startTime, metricsList);
+ private void store2DB(Long startTime, List metricsList) {
+ List doList = MetricsConvertUtils.convertAndUpdateCreateTime2TopicMetricsDOList(startTime, metricsList);
int i = 0;
do {
- topicMetricsDao.batchAdd(doList.subList(i, Math.min(i + Constant.BATCH_INSERT_SIZE, doList.size())));
+ List subDOList = doList.subList(i, Math.min(i + Constant.BATCH_INSERT_SIZE, doList.size()));
+ if (ValidateUtils.isEmptyList(subDOList)) {
+ return;
+ }
+
+ topicMetricsDao.batchAdd(subDOList);
i += Constant.BATCH_INSERT_SIZE;
} while (i < doList.size());
}
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/StoreTopicThrottledMetrics2DB.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/StoreTopicThrottledMetrics2DB.java
similarity index 92%
rename from kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/StoreTopicThrottledMetrics2DB.java
rename to kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/StoreTopicThrottledMetrics2DB.java
index 4e34e732..fd0f6517 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/StoreTopicThrottledMetrics2DB.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/db/StoreTopicThrottledMetrics2DB.java
@@ -1,8 +1,7 @@
-package com.xiaojukeji.kafka.manager.task.listener;
+package com.xiaojukeji.kafka.manager.task.listener.sink.db;
import com.xiaojukeji.kafka.manager.common.bizenum.KafkaClientEnum;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicThrottledMetrics;
import com.xiaojukeji.kafka.manager.common.entity.pojo.TopicThrottledMetricsDO;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
@@ -22,9 +21,9 @@ import java.util.*;
* @date 20/9/24
*/
@Component("storeTopicThrottledMetrics2DB")
-@ConditionalOnProperty(prefix = "custom.store-metrics-task.didi", name = "topic-throttled-metrics", havingValue = "true", matchIfMissing = true)
+@ConditionalOnProperty(prefix = "custom.store-metrics-task.didi", name = "topic-throttled-metrics-enabled", havingValue = "true", matchIfMissing = true)
public class StoreTopicThrottledMetrics2DB implements ApplicationListener {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(StoreTopicThrottledMetrics2DB.class);
@Autowired
private ThrottleService throttleService;
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkCommunityTopicMetrics2Kafka.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/kafka/SinkCommunityTopicMetrics2Kafka.java
similarity index 94%
rename from kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkCommunityTopicMetrics2Kafka.java
rename to kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/kafka/SinkCommunityTopicMetrics2Kafka.java
index ad80ceb2..3b8e6413 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkCommunityTopicMetrics2Kafka.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/kafka/SinkCommunityTopicMetrics2Kafka.java
@@ -1,7 +1,6 @@
-package com.xiaojukeji.kafka.manager.task.listener;
+package com.xiaojukeji.kafka.manager.task.listener.sink.kafka;
import com.xiaojukeji.kafka.manager.common.constant.ConfigConstant;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.entity.ao.config.TopicNameConfig;
import com.xiaojukeji.kafka.manager.common.entity.ao.remote.KafkaTopicMetrics;
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
@@ -27,7 +26,7 @@ import java.util.List;
*/
@Component("sinkCommunityTopicMetrics2Kafka")
public class SinkCommunityTopicMetrics2Kafka implements ApplicationListener {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(SinkCommunityTopicMetrics2Kafka.class);
@Autowired
private ConfigService configService;
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkConsumerMetrics2Kafka.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/kafka/SinkConsumerMetrics2Kafka.java
similarity index 94%
rename from kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkConsumerMetrics2Kafka.java
rename to kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/kafka/SinkConsumerMetrics2Kafka.java
index 7070dae1..c2d69dbe 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkConsumerMetrics2Kafka.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/kafka/SinkConsumerMetrics2Kafka.java
@@ -1,7 +1,6 @@
-package com.xiaojukeji.kafka.manager.task.listener;
+package com.xiaojukeji.kafka.manager.task.listener.sink.kafka;
import com.xiaojukeji.kafka.manager.common.constant.ConfigConstant;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.entity.ao.config.TopicNameConfig;
import com.xiaojukeji.kafka.manager.common.entity.ao.remote.KafkaConsumerMetrics;
import com.xiaojukeji.kafka.manager.common.entity.ao.remote.KafkaConsumerMetricsElem;
@@ -27,7 +26,7 @@ import java.util.Map;
*/
@Component("produceConsumerMetrics")
public class SinkConsumerMetrics2Kafka implements ApplicationListener {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(SinkConsumerMetrics2Kafka.class);
@Autowired
private ConfigService configService;
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkCommunityTopicMetrics2Monitor.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/monitor/SinkCommunityTopicMetrics2Monitor.java
similarity index 94%
rename from kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkCommunityTopicMetrics2Monitor.java
rename to kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/monitor/SinkCommunityTopicMetrics2Monitor.java
index e2ac74a9..4adcf915 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkCommunityTopicMetrics2Monitor.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/monitor/SinkCommunityTopicMetrics2Monitor.java
@@ -1,7 +1,6 @@
-package com.xiaojukeji.kafka.manager.task.listener;
+package com.xiaojukeji.kafka.manager.task.listener.sink.monitor;
import com.xiaojukeji.kafka.manager.monitor.common.entry.bizenum.MonitorMetricNameEnum;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.monitor.common.MonitorSinkConstant;
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
import com.xiaojukeji.kafka.manager.monitor.common.entry.sink.MonitorTopicSinkTag;
@@ -26,13 +25,14 @@ import java.util.Arrays;
import java.util.List;
/**
+ * 定时上报Topic监控指标
* @author zengqiao
* @date 20/8/10
*/
@ConditionalOnProperty(prefix = "monitor", name = "enabled", havingValue = "true", matchIfMissing = true)
-@CustomScheduled(name = "sinkCommunityTopicMetrics2Monitor", cron = "1 0/1 * * * ?", threadNum = 5)
+@CustomScheduled(name = "sinkCommunityTopicMetrics2Monitor", cron = "1 0/1 * * * ?", threadNum = 5, description = "定时上报Topic监控指标")
public class SinkCommunityTopicMetrics2Monitor extends AbstractScheduledTask {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(SinkCommunityTopicMetrics2Monitor.class);
@Autowired
private AbstractMonitorService abstractMonitor;
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkConsumerMetrics2Monitor.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/monitor/SinkConsumerMetrics2Monitor.java
similarity index 97%
rename from kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkConsumerMetrics2Monitor.java
rename to kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/monitor/SinkConsumerMetrics2Monitor.java
index 4ca276f9..d12b0dfb 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkConsumerMetrics2Monitor.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/monitor/SinkConsumerMetrics2Monitor.java
@@ -1,7 +1,6 @@
-package com.xiaojukeji.kafka.manager.task.listener;
+package com.xiaojukeji.kafka.manager.task.listener.sink.monitor;
import com.xiaojukeji.kafka.manager.monitor.common.entry.bizenum.MonitorMetricNameEnum;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.monitor.common.MonitorSinkConstant;
import com.xiaojukeji.kafka.manager.common.entity.metrics.ConsumerMetrics;
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
@@ -32,7 +31,7 @@ import java.util.*;
@Component("sinkConsumerMetrics2Monitor")
@ConditionalOnProperty(prefix = "monitor", name = "enabled", havingValue = "true", matchIfMissing = true)
public class SinkConsumerMetrics2Monitor implements ApplicationListener {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(SinkConsumerMetrics2Monitor.class);
@Autowired
private LogicalClusterMetadataManager logicalClusterMetadataManager;
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkTopicThrottledMetrics2Monitor.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/monitor/SinkTopicThrottledMetrics2Monitor.java
similarity index 98%
rename from kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkTopicThrottledMetrics2Monitor.java
rename to kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/monitor/SinkTopicThrottledMetrics2Monitor.java
index fb95947c..ff1cb823 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/SinkTopicThrottledMetrics2Monitor.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/listener/sink/monitor/SinkTopicThrottledMetrics2Monitor.java
@@ -1,4 +1,4 @@
-package com.xiaojukeji.kafka.manager.task.listener;
+package com.xiaojukeji.kafka.manager.task.listener.sink.monitor;
import com.xiaojukeji.kafka.manager.common.bizenum.KafkaClientEnum;
import com.xiaojukeji.kafka.manager.monitor.common.MonitorSinkConstant;
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/FlushTopicMetrics.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/FlushTopicMetrics.java
index affb03e4..0323de24 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/FlushTopicMetrics.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/FlushTopicMetrics.java
@@ -1,7 +1,6 @@
package com.xiaojukeji.kafka.manager.task.schedule;
import com.xiaojukeji.kafka.manager.common.constant.KafkaMetricsCollections;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
@@ -22,7 +21,7 @@ import java.util.*;
*/
@Component
public class FlushTopicMetrics {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(FlushTopicMetrics.class);
@Autowired
private JmxService jmxService;
@@ -30,6 +29,9 @@ public class FlushTopicMetrics {
@Autowired
private ClusterService clusterService;
+ /**
+ * 定时刷新topic指标到缓存中
+ */
@Scheduled(cron="5 0/1 * * * ?")
public void flushTopicMetrics() {
long startTime = System.currentTimeMillis();
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushBKConsumerGroupMetadata.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushBKConsumerGroupMetadata.java
index 239c3ed0..0a32df75 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushBKConsumerGroupMetadata.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushBKConsumerGroupMetadata.java
@@ -1,6 +1,5 @@
package com.xiaojukeji.kafka.manager.task.schedule.metadata;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.entity.ConsumerMetadata;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
@@ -25,11 +24,14 @@ import java.util.*;
*/
@Component
public class FlushBKConsumerGroupMetadata {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(FlushBKConsumerGroupMetadata.class);
@Autowired
private ClusterService clusterService;
+ /**
+ * 定时刷新broker上消费组信息到缓存中
+ */
@Scheduled(cron="15 0/1 * * * ?")
public void schedule() {
List doList = clusterService.list();
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushClusterMetadata.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushClusterMetadata.java
index e88ad696..416b392d 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushClusterMetadata.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushClusterMetadata.java
@@ -25,6 +25,9 @@ public class FlushClusterMetadata {
@Autowired
private PhysicalClusterMetadataManager physicalClusterMetadataManager;
+ /**
+ * 定时刷新物理集群元信息到缓存中
+ */
@Scheduled(cron="0/30 * * * * ?")
public void flush() {
Map dbClusterMap = clusterService.list().stream().collect(Collectors.toMap(ClusterDO::getId, Function.identity(), (key1, key2) -> key2));
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushTopicProperties.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushTopicProperties.java
index 41a8bde4..82358baa 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushTopicProperties.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushTopicProperties.java
@@ -1,6 +1,5 @@
package com.xiaojukeji.kafka.manager.task.schedule.metadata;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkConfigImpl;
import com.xiaojukeji.kafka.manager.service.utils.KafkaZookeeperUtils;
@@ -22,11 +21,14 @@ import java.util.Properties;
*/
@Component
public class FlushTopicProperties {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(FlushTopicProperties.class);
@Autowired
private ClusterService clusterService;
+ /**
+ * 定时刷新物理集群配置到缓存中
+ */
@Scheduled(cron="25 0/1 * * * ?")
public void flush() {
List doList = clusterService.list();
diff --git a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushZKConsumerGroupMetadata.java b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushZKConsumerGroupMetadata.java
index a7d196af..bd7f1d74 100644
--- a/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushZKConsumerGroupMetadata.java
+++ b/kafka-manager-task/src/main/java/com/xiaojukeji/kafka/manager/task/schedule/metadata/FlushZKConsumerGroupMetadata.java
@@ -1,6 +1,5 @@
package com.xiaojukeji.kafka.manager.task.schedule.metadata;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.entity.ConsumerMetadata;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkConfigImpl;
@@ -27,11 +26,17 @@ import java.util.stream.Collectors;
*/
@Component
public class FlushZKConsumerGroupMetadata {
- private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(FlushZKConsumerGroupMetadata.class);
@Autowired
private ClusterService clusterService;
+ @Autowired
+ private ThreadPool threadPool;
+
+ /**
+ * 定时刷新zk上的消费组信息到缓存中
+ */
@Scheduled(cron="35 0/1 * * * ?")
public void schedule() {
List doList = clusterService.list();
@@ -95,7 +100,7 @@ public class FlushZKConsumerGroupMetadata {
return new ArrayList<>();
}
});
- ThreadPool.submitCollectMetricsTask(taskList[i]);
+ threadPool.submitCollectMetricsTask(clusterId, taskList[i]);
}
Map> topicNameConsumerGroupMap = new HashMap<>();
diff --git a/kafka-manager-web/pom.xml b/kafka-manager-web/pom.xml
index f6808f04..ec979f2a 100644
--- a/kafka-manager-web/pom.xml
+++ b/kafka-manager-web/pom.xml
@@ -16,10 +16,9 @@
1.8
1.8
- 2.1.1.RELEASE
- 5.1.3.RELEASE
false
8.5.72
+ 2.16.0
@@ -72,22 +71,22 @@
org.springframework.boot
spring-boot-starter-web
- ${springframework.boot.version}
+ ${spring.boot.version}
org.springframework.boot
spring-boot-starter-aop
- ${springframework.boot.version}
+ ${spring.boot.version}
org.springframework.boot
spring-boot-starter-logging
- ${springframework.boot.version}
+ ${spring.boot.version}
org.springframework.boot
spring-boot-starter-thymeleaf
- ${springframework.boot.version}
+ ${spring.boot.version}
junit
@@ -104,7 +103,6 @@
org.springframework
spring-context-support
- ${spring-version}
org.springframework.boot
@@ -116,11 +114,11 @@
kafka-manager
-
+
org.springframework.boot
spring-boot-maven-plugin
- ${springframework.boot.version}
+ ${spring.boot.version}
diff --git a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/MainApplication.java b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/MainApplication.java
index 106d15f5..c5522a4f 100644
--- a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/MainApplication.java
+++ b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/MainApplication.java
@@ -3,7 +3,6 @@ package com.xiaojukeji.kafka.manager.web;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.SpringApplication;
-import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.web.servlet.ServletComponentScan;
import org.springframework.scheduling.annotation.EnableAsync;
@@ -17,7 +16,6 @@ import org.springframework.scheduling.annotation.EnableScheduling;
@EnableAsync
@EnableScheduling
@ServletComponentScan
-@EnableAutoConfiguration
@SpringBootApplication(scanBasePackages = {"com.xiaojukeji.kafka.manager"})
public class MainApplication {
private static final Logger LOGGER = LoggerFactory.getLogger(MainApplication.class);
@@ -28,7 +26,8 @@ public class MainApplication {
sa.run(args);
LOGGER.info("MainApplication started");
} catch (Exception e) {
- e.printStackTrace();
+ LOGGER.error("start failed and application exit", e);
+ System.exit(1);
}
}
}
diff --git a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/api/versionone/thirdpart/ThirdPartBrokerController.java b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/api/versionone/thirdpart/ThirdPartBrokerController.java
index 790b85be..8469afec 100644
--- a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/api/versionone/thirdpart/ThirdPartBrokerController.java
+++ b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/api/versionone/thirdpart/ThirdPartBrokerController.java
@@ -32,7 +32,7 @@ import java.util.stream.Collectors;
*/
@Api(tags = "开放接口-Broker相关接口(REST)")
@RestController
-@RequestMapping(ApiPrefix.API_V1_THIRD_PART_OP_PREFIX)
+@RequestMapping(ApiPrefix.API_V1_THIRD_PART_PREFIX)
public class ThirdPartBrokerController {
@Autowired
private BrokerService brokerService;
@@ -44,7 +44,7 @@ public class ThirdPartBrokerController {
private ClusterService clusterService;
@ApiOperation(value = "Broker信息概览", notes = "")
- @RequestMapping(value = "{clusterId}/brokers/{brokerId}/overview", method = RequestMethod.GET)
+ @GetMapping(value = "{clusterId}/brokers/{brokerId}/overview")
@ResponseBody
public Result getBrokerOverview(@PathVariable Long clusterId,
@PathVariable Integer brokerId) {
@@ -70,7 +70,7 @@ public class ThirdPartBrokerController {
}
@ApiOperation(value = "BrokerRegion信息", notes = "所有集群的")
- @RequestMapping(value = "broker-regions", method = RequestMethod.GET)
+ @GetMapping(value = "broker-regions")
@ResponseBody
public Result> getBrokerRegions() {
List clusterDOList = clusterService.list();
diff --git a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/config/SwaggerConfig.java b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/config/SwaggerConfig.java
index 91d0080c..f8406cfe 100644
--- a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/config/SwaggerConfig.java
+++ b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/config/SwaggerConfig.java
@@ -1,5 +1,7 @@
package com.xiaojukeji.kafka.manager.web.config;
+import com.xiaojukeji.kafka.manager.service.utils.ConfigUtils;
+import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.servlet.config.annotation.*;
@@ -20,6 +22,9 @@ import springfox.documentation.swagger2.annotations.EnableSwagger2;
@EnableWebMvc
@EnableSwagger2
public class SwaggerConfig implements WebMvcConfigurer {
+ @Autowired
+ private ConfigUtils configUtils;
+
@Override
public void addResourceHandlers(ResourceHandlerRegistry registry) {
registry.addResourceHandler("swagger-ui.html").addResourceLocations("classpath:/META-INF/resources/");
@@ -39,10 +44,9 @@ public class SwaggerConfig implements WebMvcConfigurer {
private ApiInfo apiInfo() {
return new ApiInfoBuilder()
- .title("Logi-KafkaManager 接口文档")
- .description("欢迎使用滴滴Logi-KafkaManager")
- .contact("huangyiminghappy@163.com")
- .version("2.2.0")
+ .title("LogiKM接口文档")
+ .description("欢迎使用滴滴LogiKM")
+ .version(configUtils.getApplicationVersion())
.build();
}
diff --git a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/AccountConverter.java b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/AccountConverter.java
index a7eebff4..d1ce32c2 100644
--- a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/AccountConverter.java
+++ b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/AccountConverter.java
@@ -13,11 +13,19 @@ import java.util.List;
* @date 19/5/3
*/
public class AccountConverter {
+ private AccountConverter() {
+ }
+
public static AccountDO convert2AccountDO(AccountDTO dto) {
AccountDO accountDO = new AccountDO();
accountDO.setUsername(dto.getUsername());
accountDO.setPassword(dto.getPassword());
accountDO.setRole(dto.getRole());
+
+ // 兼容前端未传这些信息的情况
+ accountDO.setDepartment(dto.getDepartment() == null? "": dto.getDepartment());
+ accountDO.setMail(dto.getMail() == null? "": dto.getMail());
+ accountDO.setDisplayName(dto.getDisplayName() == null? "": dto.getDisplayName());
return accountDO;
}
diff --git a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/ReassignModelConverter.java b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/ReassignModelConverter.java
index 747fbb8b..06de3ad9 100644
--- a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/ReassignModelConverter.java
+++ b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/converters/ReassignModelConverter.java
@@ -95,12 +95,21 @@ public class ReassignModelConverter {
vo.setBeginTime(0L);
vo.setEndTime(0L);
+ StringBuilder clusterAndTopicName = new StringBuilder();
+
Integer completedTopicNum = 0;
Set statusSet = new HashSet<>();
for (ReassignTaskDO elem: doList) {
vo.setGmtCreate(elem.getGmtCreate().getTime());
vo.setOperator(elem.getOperator());
vo.setDescription(elem.getDescription());
+
+ if (clusterAndTopicName.length() == 0) {
+ clusterAndTopicName.append("-").append(elem.getClusterId()).append("-").append(elem.getTopicName());
+ } else {
+ clusterAndTopicName.append("等");
+ }
+
if (TaskStatusReassignEnum.isFinished(elem.getStatus())) {
completedTopicNum += 1;
statusSet.add(elem.getStatus());
@@ -114,6 +123,9 @@ public class ReassignModelConverter {
vo.setBeginTime(elem.getBeginTime().getTime());
}
+ // 任务名称上,增加展示集群ID和Topic名称,多个时,仅展示第一个. PR from Hongten
+ vo.setTaskName(String.format("%s 数据迁移任务%s", DateUtils.getFormattedDate(taskId), clusterAndTopicName.toString()));
+
// 任务整体状态
if (statusSet.contains(TaskStatusReassignEnum.RUNNING.getCode())) {
vo.setStatus(TaskStatusReassignEnum.RUNNING.getCode());
diff --git a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/metrics/MetricsRegistry.java b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/metrics/MetricsRegistry.java
index 0847df6b..ccda754f 100644
--- a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/metrics/MetricsRegistry.java
+++ b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/metrics/MetricsRegistry.java
@@ -1,7 +1,6 @@
package com.xiaojukeji.kafka.manager.web.metrics;
import com.codahale.metrics.*;
-import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.utils.factory.DefaultThreadFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -21,7 +20,7 @@ import java.util.concurrent.TimeUnit;
*/
@Component
public class MetricsRegistry {
- private static final Logger LOGGER = LoggerFactory.getLogger(LogConstant.API_METRICS_LOGGER);
+ private static final Logger LOGGER = LoggerFactory.getLogger(MetricsRegistry.class);
private static final DecimalFormat DECIMAL_FORMAT = new DecimalFormat("#");
diff --git a/kafka-manager-web/src/main/resources/application.yml b/kafka-manager-web/src/main/resources/application.yml
index c08ac897..fefa89f8 100644
--- a/kafka-manager-web/src/main/resources/application.yml
+++ b/kafka-manager-web/src/main/resources/application.yml
@@ -9,6 +9,7 @@ server:
spring:
application:
name: kafkamanager
+ version: 2.6.0
profiles:
active: dev
datasource:
@@ -30,27 +31,57 @@ logging:
custom:
idc: cn
- jmx:
- max-conn: 10 # 2.3版本配置不在这个地方生效
store-metrics-task:
community:
- broker-metrics-enabled: true
topic-metrics-enabled: true
- didi:
+ didi: # 滴滴Kafka特有的指标
app-topic-metrics-enabled: false
topic-request-time-metrics-enabled: false
- topic-throttled-metrics: false
- save-days: 7
+ topic-throttled-metrics-enabled: false
-# 任务相关的开关
+# 任务相关的配置
task:
op:
- sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
- order-auto-exec: # 工单自动化审批线程的开关
- topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启
- app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启
+ sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
+ order-auto-exec: # 工单自动化审批线程的开关
+ topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启
+ app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启
+ metrics:
+ collect: # 收集指标
+ broker-metrics-enabled: true # 收集Broker指标
+ sink: # 上报指标
+ cluster-metrics: # 上报cluster指标
+ sink-db-enabled: true # 上报到db
+ broker-metrics: # 上报broker指标
+ sink-db-enabled: true # 上报到db
+ delete: # 删除指标
+ delete-limit-size: 1000 # 单次删除的批大小
+ cluster-metrics-save-days: 14 # 集群指标保存天数
+ broker-metrics-save-days: 14 # Broker指标保存天数
+ topic-metrics-save-days: 7 # Topic指标保存天数
+ topic-request-time-metrics-save-days: 7 # Topic请求耗时指标保存天数
+ topic-throttled-metrics-save-days: 7 # Topic限流指标保存天数
+ app-topic-metrics-save-days: 7 # App+Topic指标保存天数
+
+thread-pool:
+ collect-metrics:
+ thread-num: 256 # 收集指标线程池大小
+ queue-size: 5000 # 收集指标线程池的queue大小
+ api-call:
+ thread-num: 16 # api服务线程池大小
+ queue-size: 5000 # api服务线程池的queue大小
+
+client-pool:
+ kafka-consumer:
+ min-idle-client-num: 24 # 最小空闲客户端数
+ max-idle-client-num: 24 # 最大空闲客户端数
+ max-total-client-num: 24 # 最大客户端数
+ borrow-timeout-unit-ms: 3000 # 租借超时时间,单位毫秒
account:
+ jump-login:
+ gateway-api: false # 网关接口
+ third-part-api: false # 第三方接口
ldap:
enabled: false
url: ldap://127.0.0.1:389/
@@ -64,19 +95,20 @@ account:
auth-user-registration: true
auth-user-registration-role: normal
-kcm:
- enabled: false
- s3:
+kcm: # 集群安装部署,仅安装broker
+ enabled: false # 是否开启
+ s3: # s3 存储服务
endpoint: s3.didiyunapi.com
access-key: 1234567890
secret-key: 0987654321
bucket: logi-kafka
- n9e:
- base-url: http://127.0.0.1:8004
- user-token: 12345678
- timeout: 300
- account: root
- script-file: kcm_script.sh
+ n9e: # 夜莺
+ base-url: http://127.0.0.1:8004 # 夜莺job服务地址
+ user-token: 12345678 # 用户的token
+ timeout: 300 # 当台操作的超时时间
+ account: root # 操作时使用的账号
+ script-file: kcm_script.sh # 脚本,已内置好,在源码的kcm模块内,此处配置无需修改
+ logikm-url: http://127.0.0.1:8080 # logikm部署地址,部署时kcm_script.sh会调用logikm检查部署中的一些状态
monitor:
enabled: false
diff --git a/kafka-manager-web/src/main/resources/logback-spring.xml b/kafka-manager-web/src/main/resources/logback-spring.xml
index c1c16136..83273633 100644
--- a/kafka-manager-web/src/main/resources/logback-spring.xml
+++ b/kafka-manager-web/src/main/resources/logback-spring.xml
@@ -131,15 +131,15 @@
-
-
- ${log.path}/metrics/collector_metrics.log
+
+
+ ${log.path}/log_task.log
%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n
UTF-8
- ${log.path}/metrics/collector_metrics_%d{yyyy-MM-dd}.%i.log
+ ${log.path}/log_task_%d{yyyy-MM-dd}.%i.log
100MB
@@ -147,15 +147,15 @@
-
+
- ${log.path}/metrics/api_metrics.log
+ ${log.path}/api_metrics.log
%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n
UTF-8
- ${log.path}/metrics/api_metrics_%d{yyyy-MM-dd}.%i.log
+ ${log.path}/api_metrics_%d{yyyy-MM-dd}.%i.log
100MB
@@ -163,31 +163,13 @@
-
-
- ${log.path}/metrics/scheduled_tasks.log
-
- %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n
- UTF-8
-
-
- ${log.path}/metrics/scheduled_tasks_%d{yyyy-MM-dd}.%i.log
-
- 100MB
-
- 5
-
-
+
+
+
-
-
-
-
+
-
-
-
@@ -199,17 +181,6 @@
-
+
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/pom.xml b/pom.xml
index 3aecddc6..8b8db3a2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -11,14 +11,15 @@
org.springframework.boot
spring-boot-starter-parent
- 2.1.1.RELEASE
+ 2.1.18.RELEASE
- 2.5
- 2.7.0
- 1.5.13
+ 2.6.0
+ 2.1.18.RELEASE
+ 2.9.2
+ 1.5.21
true
true
@@ -27,8 +28,8 @@
UTF-8
UTF-8
8.5.72
+ 2.16.0
3.0.0
-
@@ -65,6 +66,11 @@
swagger-annotations
${swagger.version}
+
+ io.swagger
+ swagger-models
+ ${swagger.version}
+
@@ -232,6 +238,13 @@
minio
7.1.0
+
+
+ org.projectlombok
+ lombok
+ 1.18.2
+ provided
+