mirror of
https://github.com/didi/KnowStreaming.git
synced 2025-12-24 20:22:12 +08:00
init
This commit is contained in:
79
service/pom.xml
Normal file
79
service/pom.xml
Normal file
@@ -0,0 +1,79 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.xiaojukeji.kafka</groupId>
|
||||
<artifactId>kafka-manager-service</artifactId>
|
||||
<version>1.0.0-SNAPSHOT</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<parent>
|
||||
<artifactId>kafka-manager</artifactId>
|
||||
<groupId>com.xiaojukeji.kafka</groupId>
|
||||
<version>1.0.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<properties>
|
||||
<!-- maven properties -->
|
||||
<maven.test.skip>true</maven.test.skip>
|
||||
<downloadSources>true</downloadSources>
|
||||
|
||||
<!-- compiler settings properties -->
|
||||
<java_source_version>1.8</java_source_version>
|
||||
<java_target_version>1.8</java_target_version>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<file_encoding>UTF-8</file_encoding>
|
||||
<spring-version>5.1.3.RELEASE</spring-version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.xiaojukeji.kafka</groupId>
|
||||
<artifactId>kafka-manager-dao</artifactId>
|
||||
<version>1.0.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<!-- spring -->
|
||||
<dependency>
|
||||
<groupId>org.springframework</groupId>
|
||||
<artifactId>spring-web</artifactId>
|
||||
<version>${spring-version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework</groupId>
|
||||
<artifactId>spring-test</artifactId>
|
||||
<version>${spring-version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-lang</groupId>
|
||||
<artifactId>commons-lang</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.10</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.alibaba</groupId>
|
||||
<artifactId>fastjson</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>javax.servlet</groupId>
|
||||
<artifactId>javax.servlet-api</artifactId>
|
||||
<version>4.0.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>javax.annotation</groupId>
|
||||
<artifactId>javax.annotation-api</artifactId>
|
||||
<version>1.3.2</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
385
service/src/main/java/com/xiaojukeji/kafka/manager/service/cache/ClusterMetadataManager.java
vendored
Normal file
385
service/src/main/java/com/xiaojukeji/kafka/manager/service/cache/ClusterMetadataManager.java
vendored
Normal file
@@ -0,0 +1,385 @@
|
||||
package com.xiaojukeji.kafka.manager.service.cache;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ControllerDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.BrokerMetadata;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.ControllerData;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.PartitionMap;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.TopicMetadata;
|
||||
import com.xiaojukeji.kafka.manager.common.exception.ConfigException;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.zk.StateChangeListener;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.zk.ZkConfigImpl;
|
||||
import com.xiaojukeji.kafka.manager.dao.ControllerDao;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConnectorWrap;
|
||||
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.zk.ZkPathUtil;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.dao.DuplicateKeyException;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
/**
|
||||
* 集群缓存信息
|
||||
* @author zengqiao
|
||||
* @date 19/4/3
|
||||
*/
|
||||
@Service
|
||||
public class ClusterMetadataManager {
|
||||
private final static Logger LOGGER = LoggerFactory.getLogger(ClusterMetadataManager.class);
|
||||
|
||||
@Autowired
|
||||
private ClusterService clusterService;
|
||||
|
||||
@Autowired
|
||||
private ControllerDao controllerDao;
|
||||
|
||||
private final static Map<Long, ClusterDO> CLUSTER_MAP = new ConcurrentHashMap<>();
|
||||
|
||||
private final static Map<Long, ControllerData> CONTROLLER_DATA_MAP = new ConcurrentHashMap<>();
|
||||
|
||||
private final static Map<Long, ZkConfigImpl> ZK_CONFIG_MAP = new ConcurrentHashMap<>();
|
||||
|
||||
private final static Map<Long, Map<String, TopicMetadata>> TOPIC_METADATA_MAP = new ConcurrentHashMap<>();
|
||||
|
||||
private final static Map<Long, Map<Integer, BrokerMetadata>> BROKER_METADATA_MAP = new ConcurrentHashMap<>();
|
||||
|
||||
private final static Map<Long, Map<Integer, JmxConnectorWrap>> JMX_CONNECTOR_MAP = new ConcurrentHashMap<>();
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
LOGGER.info("ClusterMetadataManager, init start ...");
|
||||
|
||||
List<ClusterDO> clusterDOList = clusterService.listAll();
|
||||
for (ClusterDO clusterDO : clusterDOList) {
|
||||
addNew(clusterDO);
|
||||
}
|
||||
LOGGER.info("ClusterMetadataManager, init finish ...");
|
||||
}
|
||||
|
||||
public boolean reload(ClusterDO clusterDO) {
|
||||
remove(clusterDO.getId());
|
||||
return addNew(clusterDO);
|
||||
}
|
||||
|
||||
public boolean addNew(ClusterDO clusterDO) {
|
||||
try {
|
||||
CLUSTER_MAP.put(clusterDO.getId(), clusterDO);
|
||||
TOPIC_METADATA_MAP.put(clusterDO.getId(), new ConcurrentHashMap<>());
|
||||
BROKER_METADATA_MAP.put(clusterDO.getId(), new ConcurrentHashMap<>());
|
||||
JMX_CONNECTOR_MAP.put(clusterDO.getId(), new ConcurrentHashMap<>());
|
||||
|
||||
ZkConfigImpl zkConfig = new ZkConfigImpl(clusterDO.getZookeeper());
|
||||
ZK_CONFIG_MAP.put(clusterDO.getId(), zkConfig);
|
||||
|
||||
//增加Broker监控
|
||||
zkConfig.watchChildren(ZkPathUtil.BROKER_IDS_ROOT, new BrokerStateListener(clusterDO.getId(), zkConfig));
|
||||
|
||||
//增加Topic监控
|
||||
zkConfig.watchChildren(ZkPathUtil.BROKER_TOPICS_ROOT, new TopicStateListener(clusterDO.getId(), zkConfig));
|
||||
|
||||
//增加Controller监控
|
||||
zkConfig.watch(ZkPathUtil.CONTROLLER_ROOT_NODE, new ControllerStateListener(clusterDO.getId(), zkConfig));
|
||||
} catch (ConfigException e) {
|
||||
LOGGER.error("addNew failed, clusterId:{}.", clusterDO.getId(), e);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private void remove(Long clusterId) {
|
||||
try {
|
||||
ZkConfigImpl zkConfig = ZK_CONFIG_MAP.remove(clusterId);
|
||||
if (zkConfig != null) {
|
||||
zkConfig.cancelWatchChildren(ZkPathUtil.BROKER_IDS_ROOT);
|
||||
zkConfig.cancelWatchChildren(ZkPathUtil.BROKER_TOPICS_ROOT);
|
||||
zkConfig.cancelWatchChildren(ZkPathUtil.CONTROLLER_ROOT_NODE);
|
||||
zkConfig.close();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("remove failed, clusterId:{}.", clusterId, e);
|
||||
}
|
||||
CLUSTER_MAP.remove(clusterId);
|
||||
CONTROLLER_DATA_MAP.remove(clusterId);
|
||||
BROKER_METADATA_MAP.remove(clusterId);
|
||||
TOPIC_METADATA_MAP.remove(clusterId);
|
||||
}
|
||||
|
||||
/**
|
||||
* topic更新监听器
|
||||
*/
|
||||
class TopicStateListener implements StateChangeListener {
|
||||
private Long clusterId;
|
||||
|
||||
private ZkConfigImpl zkConfig;
|
||||
|
||||
private TopicStateListener(Long clusterId, ZkConfigImpl zkConfig) {
|
||||
this.clusterId = clusterId;
|
||||
this.zkConfig = zkConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onChange(State state, String topicPath) {
|
||||
try {
|
||||
String topicName = ZkPathUtil.parseLastPartFromZkPath(topicPath);
|
||||
switch (state) {
|
||||
case CHILD_ADDED:
|
||||
case CHILD_UPDATED:
|
||||
processTopicAdded(topicName);
|
||||
break;
|
||||
case CHILD_DELETED:
|
||||
processTopicDelete(topicName);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("topic state {} for path {} process failed", state, topicPath, e);
|
||||
}
|
||||
}
|
||||
|
||||
private void processTopicDelete(String topicName) {
|
||||
LOGGER.warn("delete topic, clusterId:{} topicName:{}", clusterId, topicName);
|
||||
TOPIC_METADATA_MAP.get(clusterId).remove(topicName);
|
||||
}
|
||||
|
||||
private void processTopicAdded(String topicName) {
|
||||
LOGGER.info("create topic, clusterId:{} topicName:{}", clusterId, topicName);
|
||||
try {
|
||||
TopicMetadata topicMetadata = TOPIC_METADATA_MAP.get(clusterId).getOrDefault(topicName, new TopicMetadata());
|
||||
topicMetadata.setTopic(topicName);
|
||||
|
||||
// 获取 version 和 partitions 及 副本所在 BROKER id
|
||||
String topicNodePath = ZkPathUtil.getBrokerTopicRoot(topicName);
|
||||
|
||||
Stat stat = zkConfig.getNodeStat(topicNodePath);
|
||||
topicMetadata.setCreateTime(stat.getCtime());
|
||||
topicMetadata.setModifyTime(stat.getMtime());
|
||||
|
||||
PartitionMap partitionMap = zkConfig.get(topicNodePath, PartitionMap.class);
|
||||
topicMetadata.setPartitionMap(partitionMap);
|
||||
topicMetadata.setReplicaNum(partitionMap.getPartitions().values().iterator().next().size());
|
||||
topicMetadata.setPartitionNum(partitionMap.getPartitions().size());
|
||||
|
||||
Set<Integer> brokerIdSet = new HashSet<>();
|
||||
Map<Integer, List<Integer>> topicBrokers = partitionMap.getPartitions();
|
||||
for (Map.Entry<Integer, List<Integer>> entry : topicBrokers.entrySet()) {
|
||||
brokerIdSet.addAll(entry.getValue());
|
||||
}
|
||||
topicMetadata.setBrokerIdSet(brokerIdSet);
|
||||
TOPIC_METADATA_MAP.get(clusterId).put(topicName, topicMetadata);
|
||||
} catch (ConfigException e) {
|
||||
LOGGER.error("create topic, add cache failed, clusterId:{} topicName:{}.", clusterId, topicName, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* broker更新监听器
|
||||
*/
|
||||
private static class BrokerStateListener implements StateChangeListener {
|
||||
private Long clusterId;
|
||||
|
||||
private ZkConfigImpl zkConfig;
|
||||
|
||||
private BrokerStateListener(Long clusterId, ZkConfigImpl zkConfig) {
|
||||
this.clusterId = clusterId;
|
||||
this.zkConfig = zkConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onChange(State state, String brokerPath) {
|
||||
try {
|
||||
String brokerId = ZkPathUtil.parseLastPartFromZkPath(brokerPath);
|
||||
switch (state) {
|
||||
case CHILD_ADDED:
|
||||
case CHILD_UPDATED:
|
||||
processBrokerAdded(Integer.valueOf(brokerId));
|
||||
break;
|
||||
case CHILD_DELETED:
|
||||
processBrokerDelete(Integer.valueOf(brokerId));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("BROKER state {} for path {} process failed", state, brokerPath, e);
|
||||
}
|
||||
}
|
||||
|
||||
private void processBrokerDelete(Integer brokerId) {
|
||||
BROKER_METADATA_MAP.get(clusterId).remove(brokerId);
|
||||
JmxConnectorWrap jmxConnectorWrap = JMX_CONNECTOR_MAP.get(clusterId).remove(brokerId);
|
||||
if (jmxConnectorWrap == null) {
|
||||
return;
|
||||
}
|
||||
jmxConnectorWrap.close();
|
||||
}
|
||||
|
||||
private void processBrokerAdded(Integer brokerId) throws ConfigException {
|
||||
BrokerMetadata brokerMetadata = zkConfig.get(ZkPathUtil.getBrokerIdNodePath(brokerId), BrokerMetadata.class);
|
||||
if (!brokerMetadata.getEndpoints().isEmpty()) {
|
||||
String endpoint = brokerMetadata.getEndpoints().get(0);
|
||||
int idx = endpoint.indexOf("://");
|
||||
endpoint = endpoint.substring(idx + "://".length());
|
||||
idx = endpoint.indexOf(":");
|
||||
|
||||
brokerMetadata.setHost(endpoint.substring(0, idx));
|
||||
brokerMetadata.setPort(Integer.parseInt(endpoint.substring(idx + 1)));
|
||||
}
|
||||
brokerMetadata.setBrokerId(brokerId);
|
||||
BROKER_METADATA_MAP.get(clusterId).put(brokerId, brokerMetadata);
|
||||
JMX_CONNECTOR_MAP.get(clusterId).put(brokerId, new JmxConnectorWrap(brokerMetadata.getHost(), brokerMetadata.getJmxPort()));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* controller更新监听器
|
||||
*/
|
||||
class ControllerStateListener implements StateChangeListener {
|
||||
private Long clusterId;
|
||||
|
||||
private ZkConfigImpl zkConfig;
|
||||
|
||||
private ControllerStateListener(Long clusterId, ZkConfigImpl zkConfig) {
|
||||
this.clusterId = clusterId;
|
||||
this.zkConfig = zkConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onChange(State state, String controller) {
|
||||
try {
|
||||
switch (state) {
|
||||
case NODE_DATA_CHANGED:
|
||||
processControllerChange(zkConfig);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("controller state {} for path {} process failed", state, ZkPathUtil.getControllerRootNode(), e);
|
||||
}
|
||||
}
|
||||
|
||||
private void processControllerChange(ZkConfigImpl zkConfig) throws ConfigException{
|
||||
String controllerRootNode = ZkPathUtil.getControllerRootNode();
|
||||
ControllerData controllerData = zkConfig.get(controllerRootNode, ControllerData.class);
|
||||
if (controllerData == null) {
|
||||
return;
|
||||
}
|
||||
CONTROLLER_DATA_MAP.put(clusterId, controllerData);
|
||||
|
||||
BrokerMetadata brokerMetadata = BROKER_METADATA_MAP.get(clusterId).get(controllerData.getBrokerid());
|
||||
ControllerDO controllerDO = ControllerDO.newInstance(clusterId,
|
||||
controllerData.getBrokerid(),
|
||||
brokerMetadata != null? brokerMetadata.getHost(): "",
|
||||
controllerData.getTimestamp(),
|
||||
controllerData.getVersion()
|
||||
);
|
||||
try {
|
||||
controllerDao.insert(controllerDO);
|
||||
} catch (DuplicateKeyException e) {
|
||||
LOGGER.info("processControllerChange@ClusterMetadataManager, ignore controller change, controller:{}.", controllerDO);
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("processControllerChange@ClusterMetadataManager, insert controller change failed, controller:{}.", controllerDO, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取集群信息
|
||||
*/
|
||||
public static ClusterDO getClusterFromCache(Long clusterId) {
|
||||
return CLUSTER_MAP.get(clusterId);
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取集群信息
|
||||
*/
|
||||
public static ControllerData getControllerData(Long clusterId) {
|
||||
return CONTROLLER_DATA_MAP.get(clusterId);
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取集群信息
|
||||
*/
|
||||
public static JmxConnectorWrap getJmxConnectorWrap(Long clusterId, Integer brokerId) {
|
||||
Map<Integer, JmxConnectorWrap> jmxConnectorWrapMap = JMX_CONNECTOR_MAP.get(clusterId);
|
||||
if (jmxConnectorWrapMap == null) {
|
||||
return null;
|
||||
}
|
||||
return jmxConnectorWrapMap.get(brokerId);
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取Topic的元信息
|
||||
*/
|
||||
public static TopicMetadata getTopicMetaData(Long clusterId, String topicName) {
|
||||
if (!TOPIC_METADATA_MAP.containsKey(clusterId)) {
|
||||
return null;
|
||||
}
|
||||
if (!TOPIC_METADATA_MAP.get(clusterId).containsKey(topicName)) {
|
||||
return null;
|
||||
}
|
||||
return TOPIC_METADATA_MAP.get(clusterId).get(topicName);
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取集群的Topic列表
|
||||
*/
|
||||
public static List<String> getTopicNameList(Long clusterId) {
|
||||
if (clusterId == null || !TOPIC_METADATA_MAP.containsKey(clusterId)) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
return new ArrayList<>(TOPIC_METADATA_MAP.get(clusterId).keySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取集群的Topic列表
|
||||
*/
|
||||
public static boolean isTopicExist(Long clusterId, String topicName) {
|
||||
if (!TOPIC_METADATA_MAP.containsKey(clusterId)) {
|
||||
return false;
|
||||
}
|
||||
if (!TOPIC_METADATA_MAP.get(clusterId).containsKey(topicName)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取Broker的元信息
|
||||
*/
|
||||
public static BrokerMetadata getBrokerMetadata(Long clusterId, Integer brokerId) {
|
||||
if (!BROKER_METADATA_MAP.containsKey(clusterId)) {
|
||||
return null;
|
||||
}
|
||||
if (!BROKER_METADATA_MAP.get(clusterId).containsKey(brokerId)) {
|
||||
return null;
|
||||
}
|
||||
return (BrokerMetadata) BROKER_METADATA_MAP.get(clusterId).get(brokerId).clone();
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取BrokerId列表
|
||||
*/
|
||||
public static List<Integer> getBrokerIdList(Long clusterId) {
|
||||
if (clusterId == null || !BROKER_METADATA_MAP.containsKey(clusterId)) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
return new ArrayList<>(BROKER_METADATA_MAP.get(clusterId).keySet());
|
||||
}
|
||||
|
||||
public static ZkConfigImpl getZKConfig(Long clusterId) {
|
||||
if (!ZK_CONFIG_MAP.containsKey(clusterId)) {
|
||||
return null;
|
||||
}
|
||||
return ZK_CONFIG_MAP.get(clusterId);
|
||||
}
|
||||
}
|
||||
95
service/src/main/java/com/xiaojukeji/kafka/manager/service/cache/ConsumerMetadataCache.java
vendored
Normal file
95
service/src/main/java/com/xiaojukeji/kafka/manager/service/cache/ConsumerMetadataCache.java
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
package com.xiaojukeji.kafka.manager.service.cache;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.OffsetStoreLocation;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ConsumerMetadata;
|
||||
import kafka.admin.AdminClient;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
/**
|
||||
* 定时任务, 缓存消费组相关信息
|
||||
* @author zengqiao
|
||||
* @date 2019/05/01
|
||||
*/
|
||||
public class ConsumerMetadataCache {
|
||||
private static final Map<Long, ConsumerMetadata> ConsumerGroupMetadataInZKMap = new ConcurrentHashMap<>();
|
||||
|
||||
private static final Map<Long, ConsumerMetadata> ConsumerGroupMetadataInBKMap = new ConcurrentHashMap<>();
|
||||
|
||||
public static void putConsumerMetadataInZK(Long clusterId, ConsumerMetadata consumerMetadata) {
|
||||
if (clusterId == null || consumerMetadata == null) {
|
||||
return;
|
||||
}
|
||||
ConsumerGroupMetadataInZKMap.put(clusterId, consumerMetadata);
|
||||
}
|
||||
|
||||
public static void putConsumerMetadataInBK(Long clusterId, ConsumerMetadata consumerMetadata) {
|
||||
if (clusterId == null || consumerMetadata == null) {
|
||||
return;
|
||||
}
|
||||
ConsumerGroupMetadataInBKMap.put(clusterId, consumerMetadata);
|
||||
}
|
||||
|
||||
public static Set<String> getGroupInZkMap(Long clusterId) {
|
||||
ConsumerMetadata consumerMetadata = ConsumerGroupMetadataInZKMap.get(clusterId);
|
||||
if (consumerMetadata == null) {
|
||||
return new HashSet<>();
|
||||
}
|
||||
return consumerMetadata.getConsumerGroupSet();
|
||||
}
|
||||
|
||||
public static Set<String> getGroupInBrokerMap(Long clusterId) {
|
||||
ConsumerMetadata consumerMetadata = ConsumerGroupMetadataInBKMap.get(clusterId);
|
||||
if (consumerMetadata == null) {
|
||||
return new HashSet<>();
|
||||
}
|
||||
return consumerMetadata.getConsumerGroupSet();
|
||||
}
|
||||
|
||||
public static AdminClient.ConsumerGroupSummary getConsumerGroupSummary(Long clusterId, String consumerGroup) {
|
||||
ConsumerMetadata consumerMetadata = ConsumerGroupMetadataInBKMap.get(clusterId);
|
||||
if (consumerMetadata == null) {
|
||||
return null;
|
||||
}
|
||||
return consumerMetadata.getConsumerGroupSummaryMap().get(consumerGroup);
|
||||
}
|
||||
|
||||
public static List<String> getConsumerGroupConsumedTopicList(Long clusterId,
|
||||
String location,
|
||||
String consumerGroup) {
|
||||
ConsumerMetadata consumerMetadata = null;
|
||||
if(OffsetStoreLocation.ZOOKEEPER.getLocation().equals(location)){
|
||||
consumerMetadata = ConsumerGroupMetadataInZKMap.get(clusterId);
|
||||
} else if (OffsetStoreLocation.BROKER.getLocation().equals(location)) {
|
||||
consumerMetadata = ConsumerGroupMetadataInBKMap.get(clusterId);
|
||||
}
|
||||
if (consumerMetadata == null) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
|
||||
List<String> topicNameList = new ArrayList<>();
|
||||
for(Map.Entry<String, Set<String>> entry: consumerMetadata.getTopicNameConsumerGroupMap().entrySet()){
|
||||
if(entry.getValue().contains(consumerGroup)){
|
||||
topicNameList.add(entry.getKey());
|
||||
}
|
||||
}
|
||||
return topicNameList;
|
||||
}
|
||||
|
||||
public static Set<String> getTopicConsumerGroupInZk(Long clusterId, String topicName) {
|
||||
ConsumerMetadata consumerMetadata = ConsumerGroupMetadataInZKMap.get(clusterId);
|
||||
if(consumerMetadata == null){
|
||||
return new HashSet<>();
|
||||
}
|
||||
return consumerMetadata.getTopicNameConsumerGroupMap().getOrDefault(topicName, new HashSet<>());
|
||||
}
|
||||
|
||||
public static Set<String> getTopicConsumerGroupInBroker(Long clusterId, String topicName) {
|
||||
ConsumerMetadata consumerMetadata = ConsumerGroupMetadataInBKMap.get(clusterId);
|
||||
if(consumerMetadata == null){
|
||||
return new HashSet<>();
|
||||
}
|
||||
return consumerMetadata.getTopicNameConsumerGroupMap().getOrDefault(topicName, new HashSet<>());
|
||||
}
|
||||
}
|
||||
213
service/src/main/java/com/xiaojukeji/kafka/manager/service/cache/KafkaClientCache.java
vendored
Normal file
213
service/src/main/java/com/xiaojukeji/kafka/manager/service/cache/KafkaClientCache.java
vendored
Normal file
@@ -0,0 +1,213 @@
|
||||
package com.xiaojukeji.kafka.manager.service.cache;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
|
||||
import kafka.admin.AdminClient;
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
/**
|
||||
* Cache Kafka客户端
|
||||
* @author zengqiao
|
||||
* @date 19/12/24
|
||||
*/
|
||||
public class KafkaClientCache {
|
||||
private final static Logger logger = LoggerFactory.getLogger(KafkaClientCache.class);
|
||||
|
||||
/**
|
||||
* AdminClient
|
||||
*/
|
||||
private static Map<Long, AdminClient> AdminClientMap = new ConcurrentHashMap<>();
|
||||
|
||||
/**
|
||||
* API侧接口使用的Client
|
||||
*/
|
||||
private static Map<Long, KafkaConsumer> ApiKafkaConsumerClientMap = new ConcurrentHashMap<>();
|
||||
|
||||
/**
|
||||
* Common使用的Client
|
||||
*/
|
||||
private static Map<Long, KafkaConsumer> CommonKafkaConsumerClientMap = new ConcurrentHashMap<>();
|
||||
|
||||
/**
|
||||
* 公共Producer, 一个集群至多一个
|
||||
*/
|
||||
private static Map<Long, KafkaProducer<String, String>> KafkaProducerMap = new ConcurrentHashMap<>();
|
||||
|
||||
private static ReentrantLock lock = new ReentrantLock();
|
||||
|
||||
public static KafkaProducer<String, String> getKafkaProducerClient(Long clusterId) {
|
||||
KafkaProducer<String, String> kafkaProducer = KafkaProducerMap.get(clusterId);
|
||||
if (kafkaProducer != null) {
|
||||
return kafkaProducer;
|
||||
}
|
||||
ClusterDO clusterDO = ClusterMetadataManager.getClusterFromCache(clusterId);
|
||||
if (clusterDO == null) {
|
||||
return null;
|
||||
}
|
||||
Properties properties = createProperties(clusterDO, true);
|
||||
properties.setProperty(ProducerConfig.COMPRESSION_TYPE_CONFIG, "lz4");
|
||||
properties.setProperty(ProducerConfig.LINGER_MS_CONFIG, "10");
|
||||
properties.setProperty(ProducerConfig.RETRIES_CONFIG, "3");
|
||||
lock.lock();
|
||||
try {
|
||||
kafkaProducer = KafkaProducerMap.get(clusterId);
|
||||
if (kafkaProducer != null) {
|
||||
return kafkaProducer;
|
||||
}
|
||||
KafkaProducerMap.put(clusterId, new KafkaProducer<String, String>(properties));
|
||||
} catch (Exception e) {
|
||||
logger.error("create kafka producer client failed, clusterId:{}.", clusterId, e);
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
return KafkaProducerMap.get(clusterId);
|
||||
}
|
||||
|
||||
public static KafkaConsumer getApiKafkaConsumerClient(ClusterDO clusterDO) {
|
||||
if (clusterDO == null) {
|
||||
return null;
|
||||
}
|
||||
KafkaConsumer kafkaConsumer = ApiKafkaConsumerClientMap.get(clusterDO.getId());
|
||||
if (kafkaConsumer != null) {
|
||||
return kafkaConsumer;
|
||||
}
|
||||
Properties properties = createProperties(clusterDO, false);
|
||||
properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "10000");
|
||||
properties.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, "20000");
|
||||
properties.put("enable.auto.commit", "false");
|
||||
lock.lock();
|
||||
try {
|
||||
kafkaConsumer = ApiKafkaConsumerClientMap.get(clusterDO.getId());
|
||||
if (kafkaConsumer != null) {
|
||||
return kafkaConsumer;
|
||||
}
|
||||
ApiKafkaConsumerClientMap.put(clusterDO.getId(), new KafkaConsumer(properties));
|
||||
} catch (Exception e) {
|
||||
logger.error("create kafka consumer client failed, clusterId:{}.", clusterDO.getId(), e);
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
return ApiKafkaConsumerClientMap.get(clusterDO.getId());
|
||||
}
|
||||
|
||||
public static KafkaConsumer getCommonKafkaConsumerClient(Long clusterId) {
|
||||
KafkaConsumer kafkaConsumer = CommonKafkaConsumerClientMap.get(clusterId);
|
||||
if (kafkaConsumer != null) {
|
||||
return kafkaConsumer;
|
||||
}
|
||||
ClusterDO clusterDO = ClusterMetadataManager.getClusterFromCache(clusterId);
|
||||
if (clusterDO == null) {
|
||||
return null;
|
||||
}
|
||||
Properties properties = createProperties(clusterDO, false);
|
||||
properties.put("enable.auto.commit", "false");
|
||||
lock.lock();
|
||||
try {
|
||||
kafkaConsumer = CommonKafkaConsumerClientMap.get(clusterId);
|
||||
if (kafkaConsumer != null) {
|
||||
return kafkaConsumer;
|
||||
}
|
||||
CommonKafkaConsumerClientMap.put(clusterId, new KafkaConsumer(properties));
|
||||
} catch (Exception e) {
|
||||
logger.error("create kafka consumer client failed, clusterId:{}.", clusterId, e);
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
return CommonKafkaConsumerClientMap.get(clusterId);
|
||||
}
|
||||
|
||||
public static synchronized void closeApiKafkaConsumerClient(Long clusterId) {
|
||||
KafkaConsumer kafkaConsumer = ApiKafkaConsumerClientMap.remove(clusterId);
|
||||
if (kafkaConsumer == null) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
kafkaConsumer.close();
|
||||
} catch (Exception e) {
|
||||
logger.error("close kafka consumer client error, clusterId:{}.", clusterId, e);
|
||||
}
|
||||
}
|
||||
|
||||
public static synchronized void closeCommonKafkaConsumerClient(Long clusterId) {
|
||||
KafkaConsumer kafkaConsumer = CommonKafkaConsumerClientMap.remove(clusterId);
|
||||
if (kafkaConsumer == null) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
kafkaConsumer.close();
|
||||
} catch (Exception e) {
|
||||
logger.error("close kafka consumer client error, clusterId:{}.", clusterId, e);
|
||||
}
|
||||
}
|
||||
|
||||
public static AdminClient getAdminClient(Long clusterId) {
|
||||
AdminClient adminClient = AdminClientMap.get(clusterId);
|
||||
if (adminClient != null) {
|
||||
return adminClient;
|
||||
}
|
||||
ClusterDO clusterDO = ClusterMetadataManager.getClusterFromCache(clusterId);
|
||||
if (clusterDO == null) {
|
||||
return null;
|
||||
}
|
||||
Properties properties = createProperties(clusterDO, false);
|
||||
lock.lock();
|
||||
try {
|
||||
adminClient = AdminClientMap.get(clusterId);
|
||||
if (adminClient != null) {
|
||||
return adminClient;
|
||||
}
|
||||
AdminClientMap.put(clusterId, AdminClient.create(properties));
|
||||
} catch (Exception e) {
|
||||
logger.error("create kafka admin client failed, clusterId:{}.", clusterId, e);
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
return AdminClientMap.get(clusterId);
|
||||
}
|
||||
|
||||
public static void closeAdminClient(ClusterDO cluster) {
|
||||
if (AdminClientMap.containsKey(cluster.getId())) {
|
||||
AdminClientMap.get(cluster.getId()).close();
|
||||
}
|
||||
}
|
||||
|
||||
public static Properties createProperties(ClusterDO clusterDO, Boolean serialize) {
|
||||
Properties properties = new Properties();
|
||||
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterDO.getBootstrapServers());
|
||||
if (serialize) {
|
||||
properties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
|
||||
properties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
|
||||
} else {
|
||||
properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
|
||||
properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
|
||||
}
|
||||
if (!StringUtils.isEmpty(clusterDO.getSecurityProtocol())) {
|
||||
properties.setProperty (CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, clusterDO.getSecurityProtocol());
|
||||
}
|
||||
if (!StringUtils.isEmpty(clusterDO.getSaslMechanism())) {
|
||||
properties.setProperty ("sasl.mechanism", clusterDO.getSaslMechanism());
|
||||
}
|
||||
if (!StringUtils.isEmpty(clusterDO.getSaslJaasConfig())) {
|
||||
properties.put("sasl.jaas.config", clusterDO.getSaslJaasConfig());
|
||||
}
|
||||
return properties;
|
||||
}
|
||||
|
||||
private static String key(Long clusterId, String topicName) {
|
||||
if (StringUtils.isEmpty(topicName)) {
|
||||
return String.valueOf(clusterId);
|
||||
}
|
||||
return String.valueOf(clusterId) + '_' + topicName;
|
||||
}
|
||||
}
|
||||
87
service/src/main/java/com/xiaojukeji/kafka/manager/service/cache/KafkaMetricsCache.java
vendored
Normal file
87
service/src/main/java/com/xiaojukeji/kafka/manager/service/cache/KafkaMetricsCache.java
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
package com.xiaojukeji.kafka.manager.service.cache;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ConsumerMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
/**
|
||||
* 缓存Metrics数据
|
||||
* @author zengqiao
|
||||
* @date 2019-04-30
|
||||
*/
|
||||
public class KafkaMetricsCache {
|
||||
/**
|
||||
* <clusterId, <updateTimestamp, Metrics List>>
|
||||
*/
|
||||
private static Map<Long, Map.Entry<Long, List<TopicMetrics>>> TopicMetricsMap = new ConcurrentHashMap<>();
|
||||
|
||||
/**
|
||||
* <clusterId, <updateTimestamp, Metrics List>>
|
||||
*/
|
||||
private static Map<Long, Map.Entry<Long, List<BrokerMetrics>>> BrokerMetricsMap = new ConcurrentHashMap<>();
|
||||
|
||||
/**
|
||||
* <clusterId, <updateTimestamp, Metrics List>>
|
||||
*/
|
||||
private static Map<Long, Map.Entry<Long, List<ConsumerMetrics>>> ConsumerMetricsMap = new ConcurrentHashMap<>();
|
||||
|
||||
public static void putTopicMetricsToCache(Long clusterId, List<TopicMetrics> metricsList) {
|
||||
if (clusterId == null || metricsList == null) {
|
||||
return;
|
||||
}
|
||||
TopicMetricsMap.put(clusterId, new AbstractMap.SimpleEntry<>(System.currentTimeMillis(), metricsList));
|
||||
}
|
||||
|
||||
public static List<TopicMetrics> getTopicMetricsFromCache(Long clusterId) {
|
||||
Map.Entry<Long, List<TopicMetrics>> entry = TopicMetricsMap.get(clusterId);
|
||||
if (entry == null) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
return entry.getValue();
|
||||
}
|
||||
|
||||
public static Set<Long> getTopicMetricsClusterIdSet() {
|
||||
return TopicMetricsMap.keySet();
|
||||
}
|
||||
|
||||
public static void putBrokerMetricsToCache(Long clusterId, List<BrokerMetrics> metricsList) {
|
||||
if (clusterId == null || metricsList == null) {
|
||||
return;
|
||||
}
|
||||
BrokerMetricsMap.put(clusterId, new AbstractMap.SimpleEntry<>(System.currentTimeMillis(), metricsList));
|
||||
}
|
||||
|
||||
public static List<BrokerMetrics> getBrokerMetricsFromCache(Long clusterId) {
|
||||
Map.Entry<Long, List<BrokerMetrics>> entry = BrokerMetricsMap.get(clusterId);
|
||||
if (entry == null) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
return entry.getValue();
|
||||
}
|
||||
|
||||
public static Set<Long> getBrokerMetricsClusterIdSet() {
|
||||
return BrokerMetricsMap.keySet();
|
||||
}
|
||||
|
||||
public static void putConsumerMetricsToCache(Long clusterId, List<ConsumerMetrics> metricsList) {
|
||||
if (clusterId == null || metricsList == null) {
|
||||
return;
|
||||
}
|
||||
ConsumerMetricsMap.put(clusterId, new AbstractMap.SimpleEntry<>(System.currentTimeMillis(), metricsList));
|
||||
}
|
||||
|
||||
public static List<ConsumerMetrics> getConsumerMetricsFromCache(Long clusterId) {
|
||||
Map.Entry<Long, List<ConsumerMetrics>> entry = ConsumerMetricsMap.get(clusterId);
|
||||
if (entry == null) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
return entry.getValue();
|
||||
}
|
||||
|
||||
public static Set<Long> getConsumerMetricsClusterIdSet() {
|
||||
return ConsumerMetricsMap.keySet();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
package com.xiaojukeji.kafka.manager.service.collector;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/12/25
|
||||
*/
|
||||
public abstract class BaseCollectTask implements Runnable {
|
||||
protected Logger logger;
|
||||
|
||||
protected Long clusterId;
|
||||
|
||||
public BaseCollectTask(Logger logger, Long clusterId) {
|
||||
this.logger = logger;
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
long startTime = System.currentTimeMillis();
|
||||
try {
|
||||
collect();
|
||||
} catch (Throwable t) {
|
||||
logger.error("collect failed, clusterId:{}.", clusterId, t);
|
||||
return;
|
||||
}
|
||||
long endTime = System.currentTimeMillis();
|
||||
logger.info("collect finish, clusterId:{} costTime:{}", clusterId, endTime - startTime);
|
||||
}
|
||||
|
||||
public abstract void collect();
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
package com.xiaojukeji.kafka.manager.service.collector;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.MetricsType;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.KafkaMetricsCache;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ClusterMetadataManager;
|
||||
import com.xiaojukeji.kafka.manager.service.service.JmxService;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* 定时采集监控broker数据任务的执行
|
||||
* @author limeng
|
||||
* @date 2018/5/6.
|
||||
*/
|
||||
public class CollectBrokerMetricsTask extends BaseCollectTask {
|
||||
private final static Logger logger = LoggerFactory.getLogger(Constant.COLLECTOR_METRICS_LOGGER);
|
||||
|
||||
private JmxService jmxService;
|
||||
|
||||
public CollectBrokerMetricsTask(Long clusterId, JmxService jmxService) {
|
||||
super(logger, clusterId);
|
||||
this.clusterId = clusterId;
|
||||
this.jmxService = jmxService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect() {
|
||||
ClusterDO clusterDO = ClusterMetadataManager.getClusterFromCache(clusterId);
|
||||
if (clusterDO == null) {
|
||||
return;
|
||||
}
|
||||
List<BrokerMetrics> brokerMetricsList = new ArrayList<>();
|
||||
List<Integer> brokerIdList = ClusterMetadataManager.getBrokerIdList(clusterId);
|
||||
for (Integer brokerId: brokerIdList) {
|
||||
BrokerMetrics brokerMetrics = jmxService.getSpecifiedBrokerMetricsFromJmx(clusterId, brokerId, BrokerMetrics.getFieldNameList(MetricsType.BROKER_TO_DB_METRICS), true);
|
||||
if (brokerMetrics == null) {
|
||||
continue;
|
||||
}
|
||||
brokerMetricsList.add(brokerMetrics);
|
||||
}
|
||||
KafkaMetricsCache.putBrokerMetricsToCache(clusterId, brokerMetricsList);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,123 @@
|
||||
package com.xiaojukeji.kafka.manager.service.collector;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ConsumerMetadata;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ConsumerMetadataCache;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.KafkaClientCache;
|
||||
import kafka.admin.AdminClient;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.protocol.types.SchemaException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import scala.collection.JavaConversions;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/12/25
|
||||
*/
|
||||
public class CollectConsumerGroupBKMetadataTask extends BaseCollectTask {
|
||||
private final static Logger logger = LoggerFactory.getLogger(Constant.COLLECTOR_METRICS_LOGGER);
|
||||
|
||||
public CollectConsumerGroupBKMetadataTask(Long clusterId) {
|
||||
super(logger, clusterId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect() {
|
||||
// 获取消费组列表
|
||||
Set<String> consumerGroupSet = collectConsumerGroup();
|
||||
|
||||
// 获取消费组summary信息
|
||||
Map<String, Set<String>> topicNameConsumerGroupMap = new HashMap<>();
|
||||
Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummary = collectConsumerGroupSummary(consumerGroupSet, topicNameConsumerGroupMap);
|
||||
|
||||
// 获取Topic下的消费组
|
||||
topicNameConsumerGroupMap = collectTopicAndConsumerGroupMap(consumerGroupSet, topicNameConsumerGroupMap);
|
||||
ConsumerMetadataCache.putConsumerMetadataInBK(clusterId, new ConsumerMetadata(consumerGroupSet, topicNameConsumerGroupMap, consumerGroupSummary));
|
||||
}
|
||||
|
||||
private Set<String> collectConsumerGroup() {
|
||||
try {
|
||||
AdminClient adminClient = KafkaClientCache.getAdminClient(clusterId);
|
||||
Set<String> consumerGroupSet = new HashSet<>();
|
||||
scala.collection.immutable.Map<org.apache.kafka.common.Node, scala.collection.immutable.List<kafka.coordinator.GroupOverview>> brokerGroupMap = adminClient.listAllGroups();
|
||||
for (scala.collection.immutable.List<kafka.coordinator.GroupOverview> brokerGroup : JavaConversions.asJavaMap(brokerGroupMap).values()) {
|
||||
List<kafka.coordinator.GroupOverview> lists = JavaConversions.asJavaList(brokerGroup);
|
||||
for (kafka.coordinator.GroupOverview groupOverview : lists) {
|
||||
String consumerGroup = groupOverview.groupId();
|
||||
if (consumerGroup != null && consumerGroup.contains("#")) {
|
||||
consumerGroup = consumerGroup.split("#", 2)[1];
|
||||
}
|
||||
consumerGroupSet.add(consumerGroup);
|
||||
}
|
||||
}
|
||||
return consumerGroupSet;
|
||||
} catch (Exception e) {
|
||||
logger.error("collect consumerGroup failed, clusterId:{}.", clusterId, e);
|
||||
}
|
||||
return new HashSet<>();
|
||||
}
|
||||
|
||||
private Map<String, AdminClient.ConsumerGroupSummary> collectConsumerGroupSummary(Set<String> consumerGroupSet, Map<String, Set<String>> topicNameConsumerGroupMap) {
|
||||
if (consumerGroupSet == null || consumerGroupSet.isEmpty()) {
|
||||
return new HashMap<>();
|
||||
}
|
||||
AdminClient adminClient = KafkaClientCache.getAdminClient(clusterId);
|
||||
|
||||
Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap = new HashMap<>();
|
||||
for (String consumerGroup : consumerGroupSet) {
|
||||
try {
|
||||
AdminClient.ConsumerGroupSummary consumerGroupSummary = adminClient.describeConsumerGroup(consumerGroup);
|
||||
if (consumerGroupSummary == null) {
|
||||
continue;
|
||||
}
|
||||
consumerGroupSummaryMap.put(consumerGroup, consumerGroupSummary);
|
||||
|
||||
java.util.Iterator<scala.collection.immutable.List<AdminClient.ConsumerSummary>> it = JavaConversions.asJavaIterator(consumerGroupSummary.consumers().iterator());
|
||||
while (it.hasNext()) {
|
||||
List<AdminClient.ConsumerSummary> consumerSummaryList = JavaConversions.asJavaList(it.next());
|
||||
for (AdminClient.ConsumerSummary consumerSummary: consumerSummaryList) {
|
||||
List<TopicPartition> topicPartitionList = JavaConversions.asJavaList(consumerSummary.assignment());
|
||||
if (topicPartitionList == null) {
|
||||
continue;
|
||||
}
|
||||
for (TopicPartition topicPartition: topicPartitionList) {
|
||||
Set<String> groupSet = topicNameConsumerGroupMap.getOrDefault(topicPartition.topic(), new HashSet<>());
|
||||
groupSet.add(consumerGroup);
|
||||
topicNameConsumerGroupMap.put(topicPartition.topic(), groupSet);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (SchemaException e) {
|
||||
logger.error("schemaException exception, clusterId:{} consumerGroup:{}.", clusterId, consumerGroup, e);
|
||||
} catch (Exception e) {
|
||||
logger.error("collect consumerGroupSummary failed, clusterId:{} consumerGroup:{}.", clusterId, consumerGroup, e);
|
||||
}
|
||||
}
|
||||
return consumerGroupSummaryMap;
|
||||
}
|
||||
|
||||
private Map<String, Set<String>> collectTopicAndConsumerGroupMap(Set<String> consumerGroupSet, Map<String, Set<String>> topicNameConsumerGroupMap) {
|
||||
if (consumerGroupSet == null || consumerGroupSet.isEmpty()) {
|
||||
return new HashMap<>();
|
||||
}
|
||||
AdminClient adminClient = KafkaClientCache.getAdminClient(clusterId);
|
||||
|
||||
for (String consumerGroup: consumerGroupSet) {
|
||||
try {
|
||||
Map<TopicPartition, Object> topicPartitionAndOffsetMap = JavaConversions.asJavaMap(adminClient.listGroupOffsets(consumerGroup));
|
||||
for (Map.Entry<TopicPartition, Object> entry : topicPartitionAndOffsetMap.entrySet()) {
|
||||
TopicPartition tp = entry.getKey();
|
||||
Set<String> subConsumerGroupSet = topicNameConsumerGroupMap.getOrDefault(tp.topic(), new HashSet<>());
|
||||
subConsumerGroupSet.add(consumerGroup);
|
||||
topicNameConsumerGroupMap.put(tp.topic(), subConsumerGroupSet);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("collectTopicAndConsumerGroupMap@ConsumerMetaDataManager, update consumer group failed, clusterId:{} consumerGroup:{}.", clusterId, consumerGroup, e);
|
||||
}
|
||||
}
|
||||
return topicNameConsumerGroupMap;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,67 @@
|
||||
package com.xiaojukeji.kafka.manager.service.collector;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ConsumerMetadata;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.zk.ZkConfigImpl;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ConsumerMetadataCache;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ClusterMetadataManager;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.zk.ZkPathUtil;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/12/25
|
||||
*/
|
||||
public class CollectConsumerGroupZKMetadataTask extends BaseCollectTask {
|
||||
private final static Logger logger = LoggerFactory.getLogger(Constant.COLLECTOR_METRICS_LOGGER);
|
||||
|
||||
public CollectConsumerGroupZKMetadataTask(Long clusterId) {
|
||||
super(logger, clusterId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect() {
|
||||
Set<String> consumerGroupSet = collectConsumerGroup();
|
||||
Map<String, Set<String>> topicNameConsumerGroupMap = collectTopicAndConsumerGroupMap(consumerGroupSet);
|
||||
ConsumerMetadataCache.putConsumerMetadataInZK(clusterId, new ConsumerMetadata(consumerGroupSet, topicNameConsumerGroupMap, new HashMap<>()));
|
||||
}
|
||||
|
||||
private Set<String> collectConsumerGroup() {
|
||||
try {
|
||||
ZkConfigImpl zkConfigImpl = ClusterMetadataManager.getZKConfig(clusterId);
|
||||
List<String> consumerGroupList = zkConfigImpl.getChildren(ZkPathUtil.CONSUMER_ROOT_NODE);
|
||||
if (consumerGroupList == null) {
|
||||
return new HashSet<>();
|
||||
}
|
||||
return new HashSet<>(consumerGroupList);
|
||||
} catch (Exception e) {
|
||||
logger.error("collect consumerGroup failed, clusterId:{}.", clusterId, e);
|
||||
}
|
||||
return new HashSet<>();
|
||||
}
|
||||
|
||||
private Map<String, Set<String>> collectTopicAndConsumerGroupMap(Set<String> consumerGroupSet) {
|
||||
ZkConfigImpl zkConfigImpl = ClusterMetadataManager.getZKConfig(clusterId);
|
||||
Map<String, Set<String>> topicNameConsumerGroupMap = new HashMap<>();
|
||||
for (String consumerGroup: consumerGroupSet) {
|
||||
try {
|
||||
List<String> topicNameList = zkConfigImpl.getChildren(ZkPathUtil.getConsumerGroupOffsetRoot(consumerGroup));
|
||||
if (topicNameList == null || topicNameList.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (String topicName: topicNameList) {
|
||||
Set<String> subConsumerGroupSet = topicNameConsumerGroupMap.getOrDefault(topicName, new HashSet<>());
|
||||
subConsumerGroupSet.add(consumerGroup);
|
||||
topicNameConsumerGroupMap.put(topicName, subConsumerGroupSet);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("collect topicName and consumerGroup failed, clusterId:{} consumerGroup:{}.", clusterId, consumerGroup, e);
|
||||
}
|
||||
}
|
||||
return topicNameConsumerGroupMap;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,70 @@
|
||||
package com.xiaojukeji.kafka.manager.service.collector;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ConsumerMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.PartitionState;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.consumer.ConsumerDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ClusterMetadataManager;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.KafkaMetricsCache;
|
||||
import com.xiaojukeji.kafka.manager.service.service.ConsumerService;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* 消费数据收集
|
||||
* @author zengqiao
|
||||
* @date 19/4/17
|
||||
*/
|
||||
public class CollectConsumerMetricsTask extends BaseCollectTask {
|
||||
private final static Logger logger = LoggerFactory.getLogger(Constant.COLLECTOR_METRICS_LOGGER);
|
||||
|
||||
private ConsumerService consumerService;
|
||||
|
||||
public CollectConsumerMetricsTask(Long clusterId, ConsumerService consumerService) {
|
||||
super(logger, clusterId);
|
||||
this.consumerService = consumerService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect() {
|
||||
ClusterDO clusterDO = ClusterMetadataManager.getClusterFromCache(clusterId);
|
||||
if (clusterDO == null) {
|
||||
return;
|
||||
}
|
||||
Map<String, List<PartitionState>> topicNamePartitionStateListMap = new HashMap<>();
|
||||
List<ConsumerDTO> consumerDTOList = consumerService.getMonitoredConsumerList(clusterDO, topicNamePartitionStateListMap);
|
||||
|
||||
List<ConsumerMetrics> consumerMetricsList = convert2ConsumerMetrics(consumerDTOList);
|
||||
KafkaMetricsCache.putConsumerMetricsToCache(clusterId, consumerMetricsList);
|
||||
}
|
||||
|
||||
/**
|
||||
* 转换为ConsumerMetrics结构
|
||||
*/
|
||||
private List<ConsumerMetrics> convert2ConsumerMetrics(List<ConsumerDTO> consumerDTOList) {
|
||||
List<ConsumerMetrics> consumerMetricsList = new ArrayList<>();
|
||||
for (ConsumerDTO consumerDTO : consumerDTOList) {
|
||||
Map<String, List<PartitionState>> topicNamePartitionStateListMap = consumerDTO.getTopicPartitionMap();
|
||||
for(Map.Entry<String, List<PartitionState>> entry : topicNamePartitionStateListMap.entrySet()){
|
||||
String topicName = entry.getKey();
|
||||
List<PartitionState> partitionStateList = entry.getValue();
|
||||
ConsumerMetrics consumerMetrics = new ConsumerMetrics();
|
||||
consumerMetrics.setClusterId(clusterId);
|
||||
consumerMetrics.setConsumerGroup(consumerDTO.getConsumerGroup());
|
||||
consumerMetrics.setLocation(consumerDTO.getLocation());
|
||||
consumerMetrics.setTopicName(topicName);
|
||||
long sumLag = 0;
|
||||
for (PartitionState partitionState : partitionStateList) {
|
||||
Map.Entry<Long, Long> offsetEntry = new AbstractMap.SimpleEntry<>(partitionState.getOffset(), partitionState.getConsumeOffset());
|
||||
sumLag += (offsetEntry.getKey() - offsetEntry.getValue() > 0 ? offsetEntry.getKey() - offsetEntry.getValue(): 0);
|
||||
}
|
||||
consumerMetrics.setSumLag(sumLag);
|
||||
consumerMetricsList.add(consumerMetrics);
|
||||
}
|
||||
}
|
||||
return consumerMetricsList;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
package com.xiaojukeji.kafka.manager.service.collector;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.MetricsType;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.KafkaMetricsCache;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ClusterMetadataManager;
|
||||
import com.xiaojukeji.kafka.manager.service.service.JmxService;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* TopicMetrics信息收集
|
||||
* @author zengqiao
|
||||
* @date 19/4/17
|
||||
*/
|
||||
public class CollectTopicMetricsTask extends BaseCollectTask {
|
||||
private final static Logger logger = LoggerFactory.getLogger(Constant.COLLECTOR_METRICS_LOGGER);
|
||||
|
||||
private JmxService jmxService;
|
||||
|
||||
public CollectTopicMetricsTask(Long clusterId, JmxService jmxService) {
|
||||
super(logger, clusterId);
|
||||
this.clusterId = clusterId;
|
||||
this.jmxService = jmxService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect() {
|
||||
List<String> specifiedFieldList = TopicMetrics.getFieldNameList(MetricsType.TOPIC_METRICS_TO_DB);
|
||||
List<String> topicNameList = ClusterMetadataManager.getTopicNameList(clusterId);
|
||||
List<TopicMetrics> topicMetricsList = new ArrayList<>();
|
||||
for (String topicName: topicNameList) {
|
||||
TopicMetrics topicMetrics = jmxService.getSpecifiedTopicMetricsFromJmx(clusterId, topicName, specifiedFieldList, true);
|
||||
if (topicMetrics == null) {
|
||||
continue;
|
||||
}
|
||||
topicMetricsList.add(topicMetrics);
|
||||
}
|
||||
KafkaMetricsCache.putTopicMetricsToCache(clusterId, topicMetricsList);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
package com.xiaojukeji.kafka.manager.service.monitor;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.monitor.MonitorMatchStatus;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.alarm.AlarmRuleDTO;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* 告警检查
|
||||
* @author zengqiao
|
||||
* @date 20/3/18
|
||||
*/
|
||||
public abstract class AbstractMonitorMatchService<T> {
|
||||
|
||||
public abstract MonitorMatchStatus validate(AlarmRuleDTO alarmRuleDTO, List<T> dataList);
|
||||
|
||||
protected MonitorMatchStatus condition(Double v1, Double v2, String cond) {
|
||||
switch (cond) {
|
||||
case ">": return v1.compareTo(v2) > 0 ? MonitorMatchStatus.YES: MonitorMatchStatus.NO;
|
||||
case "<": return v1.compareTo(v2) < 0 ? MonitorMatchStatus.YES: MonitorMatchStatus.NO;
|
||||
case "=": return v1.equals(v2) ? MonitorMatchStatus.YES: MonitorMatchStatus.NO;
|
||||
case "!=": return !v1.equals(v2) ? MonitorMatchStatus.YES: MonitorMatchStatus.NO;
|
||||
default: return MonitorMatchStatus.UNKNOWN;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
package com.xiaojukeji.kafka.manager.service.monitor;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.alarm.AlarmNotifyDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.alarm.AlarmRuleDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.alarm.AlarmStrategyActionDTO;
|
||||
import com.xiaojukeji.kafka.manager.service.notify.KafkaNotifier;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* 告警通知
|
||||
* @author zengqiao
|
||||
* @date 20/3/18
|
||||
*/
|
||||
@Component
|
||||
public class AlarmNotifyService {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(AlarmNotifyService.class);
|
||||
|
||||
@Value("${kafka-monitor.notify-kafka.cluster-id:}")
|
||||
private Long clusterId;
|
||||
|
||||
@Value("${kafka-monitor.notify-kafka.topic-name:}")
|
||||
private String topicName;
|
||||
|
||||
@Autowired
|
||||
private KafkaNotifier kafkaNotifier;
|
||||
|
||||
public void send(AlarmRuleDTO alarmRuleDTO) {
|
||||
if (clusterId == null || StringUtils.isEmpty(topicName)) {
|
||||
LOGGER.error("application.yml monitor config illegal");
|
||||
return;
|
||||
}
|
||||
// todo 当前只有Kafka的方式,所以这里没有进行判断
|
||||
kafkaNotifier.produce(clusterId, topicName, JSON.toJSONString(convert2AlarmNotifyDTO(alarmRuleDTO)));
|
||||
}
|
||||
|
||||
private AlarmNotifyDTO convert2AlarmNotifyDTO(AlarmRuleDTO alarmRuleDTO) {
|
||||
AlarmNotifyDTO alarmNotifyDTO = new AlarmNotifyDTO();
|
||||
alarmNotifyDTO.setAlarmRuleId(alarmRuleDTO.getId());
|
||||
AlarmStrategyActionDTO alarmStrategyActionDTO = alarmRuleDTO.getStrategyActionMap().get("KAFKA");
|
||||
if (alarmStrategyActionDTO == null) {
|
||||
alarmNotifyDTO.setActionTag("");
|
||||
}
|
||||
alarmNotifyDTO.setMessage(JSON.toJSONString(alarmRuleDTO));
|
||||
return alarmNotifyDTO;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,145 @@
|
||||
package com.xiaojukeji.kafka.manager.service.monitor;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.bizenum.DBStatusEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.alarm.AlarmRuleDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.alarm.AlarmStrategyActionDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.alarm.AlarmStrategyExpressionDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.alarm.AlarmStrategyFilterDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.AlarmRuleDO;
|
||||
import com.xiaojukeji.kafka.manager.dao.AlarmRuleDao;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.scheduling.annotation.Scheduled;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.*;
|
||||
|
||||
/**
|
||||
* 监控告警规则管理
|
||||
* @author zengqiao
|
||||
* @date 2019-05-05
|
||||
*/
|
||||
@Service
|
||||
public class AlarmRuleManager {
|
||||
private final static Logger LOGGER = LoggerFactory.getLogger(AlarmRuleManager.class);
|
||||
|
||||
@Autowired
|
||||
private AlarmRuleDao alarmRuleDao;
|
||||
|
||||
private static long activeLastGmtModifyTime = 0L;
|
||||
|
||||
private static volatile Map<Long, AlarmRuleDTO> activeAlarmRuleMap = new ConcurrentHashMap<>();
|
||||
|
||||
@Scheduled(cron="0 0/1 * * * ?")
|
||||
public void flushAlarmRuleCache() {
|
||||
long startTime = System.currentTimeMillis();
|
||||
LOGGER.info("alarm rule flush, start.");
|
||||
try {
|
||||
flush();
|
||||
} catch (Throwable t) {
|
||||
LOGGER.error("alarm rule flush, throw exception.", t);
|
||||
}
|
||||
LOGGER.info("alarm rule flush, finished, costTime:{}ms.", System.currentTimeMillis() - startTime);
|
||||
}
|
||||
|
||||
private void flush() {
|
||||
List<AlarmRuleDO> dbAlarmRuleDOList = alarmRuleDao.listAll();
|
||||
if(dbAlarmRuleDOList == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
long maxGmtModifyTime = activeLastGmtModifyTime;
|
||||
Set<Long> dbAlarmRuleIdSet = new HashSet<>();
|
||||
for (AlarmRuleDO alarmRuleDO: dbAlarmRuleDOList) {
|
||||
dbAlarmRuleIdSet.add(alarmRuleDO.getId());
|
||||
if (alarmRuleDO.getGmtModify().getTime() > maxGmtModifyTime) {
|
||||
maxGmtModifyTime = alarmRuleDO.getGmtModify().getTime();
|
||||
}
|
||||
|
||||
if (!DBStatusEnum.PASSED.getStatus().equals(alarmRuleDO.getStatus())) {
|
||||
// 移除暂不生效的告警规则
|
||||
activeAlarmRuleMap.remove(alarmRuleDO.getId());
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!activeAlarmRuleMap.containsKey(alarmRuleDO.getId())
|
||||
|| activeLastGmtModifyTime < alarmRuleDO.getGmtModify().getTime()) {
|
||||
// 新增或修改的告警规则
|
||||
AlarmRuleDTO alarmRuleDTO = convert2AlarmRuleDTO(alarmRuleDO);
|
||||
if (alarmRuleDTO == null) {
|
||||
LOGGER.error("alarm rule flush, convert 2 dto failed.");
|
||||
continue;
|
||||
}
|
||||
activeAlarmRuleMap.put(alarmRuleDO.getId(), alarmRuleDTO);
|
||||
}
|
||||
}
|
||||
|
||||
// 移除已被删除的告警规则
|
||||
Set<Long> activeAlarmRuleIdSet = new HashSet<>(activeAlarmRuleMap.keySet());
|
||||
activeAlarmRuleIdSet.removeAll(dbAlarmRuleIdSet);
|
||||
for (Long ruleId: activeAlarmRuleIdSet) {
|
||||
activeAlarmRuleMap.remove(ruleId);
|
||||
}
|
||||
|
||||
// 更新最近更新时间
|
||||
activeLastGmtModifyTime = maxGmtModifyTime;
|
||||
}
|
||||
|
||||
public static Map<Long, AlarmRuleDTO> getActiveAlarmRuleMap() {
|
||||
return activeAlarmRuleMap;
|
||||
}
|
||||
|
||||
private AlarmRuleDTO convert2AlarmRuleDTO(AlarmRuleDO alarmRuleDO) {
|
||||
if (alarmRuleDO == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
AlarmRuleDTO alarmRuleDTO = new AlarmRuleDTO();
|
||||
alarmRuleDTO.setId(alarmRuleDO.getId());
|
||||
alarmRuleDTO.setName(alarmRuleDO.getAlarmName());
|
||||
alarmRuleDTO.setDuration(0);
|
||||
alarmRuleDTO.setClusterId(null);
|
||||
alarmRuleDTO.setStrategyExpression(null);
|
||||
alarmRuleDTO.setStrategyFilterMap(new HashMap<>());
|
||||
alarmRuleDTO.setStrategyActionMap(new HashMap<>());
|
||||
alarmRuleDTO.setGmtModify(alarmRuleDO.getGmtModify().getTime());
|
||||
try {
|
||||
List<AlarmStrategyExpressionDTO> alarmStrategyExpressionDTOList = JSON.parseArray(alarmRuleDO.getStrategyExpressions(), AlarmStrategyExpressionDTO.class);
|
||||
if (alarmStrategyExpressionDTOList == null || alarmStrategyExpressionDTOList.size() != 1) {
|
||||
// 策略表达式不符合要求
|
||||
return null;
|
||||
}
|
||||
alarmRuleDTO.setStrategyExpression(alarmStrategyExpressionDTOList.get(0));
|
||||
|
||||
List<AlarmStrategyFilterDTO> alarmStrategyFilterDTOList = JSON.parseArray(alarmRuleDO.getStrategyFilters(), AlarmStrategyFilterDTO.class);
|
||||
if (alarmStrategyFilterDTOList == null || alarmStrategyFilterDTOList.isEmpty()) {
|
||||
// 无过滤策略
|
||||
return null;
|
||||
}
|
||||
for (AlarmStrategyFilterDTO alarmStrategyFilterDTO: alarmStrategyFilterDTOList) {
|
||||
if ("clusterId".equals(alarmStrategyFilterDTO.getKey())) {
|
||||
alarmRuleDTO.setClusterId(Long.valueOf(alarmStrategyFilterDTO.getValue()));
|
||||
continue;
|
||||
}
|
||||
alarmRuleDTO.getStrategyFilterMap().put(alarmStrategyFilterDTO.getKey(), alarmStrategyFilterDTO.getValue());
|
||||
}
|
||||
|
||||
List<AlarmStrategyActionDTO> alarmStrategyActionDTOList = JSON.parseArray(alarmRuleDO.getStrategyActions(), AlarmStrategyActionDTO.class);
|
||||
if (alarmStrategyActionDTOList == null || alarmStrategyActionDTOList.isEmpty()) {
|
||||
// 无告知方式
|
||||
return null;
|
||||
}
|
||||
for (AlarmStrategyActionDTO alarmStrategyActionDTO: alarmStrategyActionDTOList) {
|
||||
alarmRuleDTO.getStrategyActionMap().put(alarmStrategyActionDTO.getActionWay(), alarmStrategyActionDTO);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("alarm rule flush, convert 2 object failed, alarmRuleDO:{}.", alarmRuleDO, e);
|
||||
return null;
|
||||
}
|
||||
|
||||
return alarmRuleDTO;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,131 @@
|
||||
package com.xiaojukeji.kafka.manager.service.monitor;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.monitor.MonitorMatchStatus;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ConsumerMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.alarm.AlarmRuleDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.KafkaMetricsCache;
|
||||
import com.xiaojukeji.kafka.manager.service.monitor.impl.BrokerMonitorMatchServiceImpl;
|
||||
import com.xiaojukeji.kafka.manager.service.monitor.impl.ConsumerGroupMonitorMatchServiceImpl;
|
||||
import com.xiaojukeji.kafka.manager.service.monitor.impl.TopicMonitorMatchServiceImpl;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.scheduling.annotation.Scheduled;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/12/25
|
||||
*/
|
||||
@Component
|
||||
@ConditionalOnProperty(prefix = "kafka-monitor", name = "enabled", havingValue = "true")
|
||||
public class AlarmScheduleCheckTask {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(AlarmScheduleCheckTask.class);
|
||||
|
||||
@Autowired
|
||||
private TopicMonitorMatchServiceImpl topicMonitorMatchService;
|
||||
|
||||
@Autowired
|
||||
private BrokerMonitorMatchServiceImpl brokerMonitorMatchService;
|
||||
|
||||
@Autowired
|
||||
private ConsumerGroupMonitorMatchServiceImpl consumerGroupMonitorMatchService;
|
||||
|
||||
@Autowired
|
||||
private AlarmNotifyService alarmNotifyService;
|
||||
|
||||
@Scheduled(cron="0 0/1 * * * ?")
|
||||
public void checkIfMatch() {LOGGER.info("alarm check, start.");
|
||||
long startTime = System.currentTimeMillis();
|
||||
|
||||
Map<Long, AlarmRuleDTO> alarmRuleDTOMap = AlarmRuleManager.getActiveAlarmRuleMap();
|
||||
for (Long alarmRuleId: alarmRuleDTOMap.keySet()) {
|
||||
AlarmRuleDTO alarmRuleDTO = alarmRuleDTOMap.get(alarmRuleId);
|
||||
if (alarmRuleDTO == null) {
|
||||
continue;
|
||||
}
|
||||
MonitorMatchStatus status = checkIfMatch(alarmRuleDTO);
|
||||
if (MonitorMatchStatus.NO.equals(status)) {
|
||||
alarmRuleDTO.setDuration(0);
|
||||
continue;
|
||||
} else if (MonitorMatchStatus.UNKNOWN.equals(status)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
alarmRuleDTO.setDuration(alarmRuleDTO.getDuration() + 1);
|
||||
if (alarmRuleDTO.getDuration() < alarmRuleDTO.getStrategyExpression().getDuration()) {
|
||||
continue;
|
||||
}
|
||||
// 达到告警阈值
|
||||
alarmNotifyService.send(alarmRuleDTO);
|
||||
alarmRuleDTO.setDuration(0);
|
||||
}
|
||||
LOGGER.info("alarm check, finish, costTime:{}ms.", System.currentTimeMillis() - startTime);
|
||||
}
|
||||
|
||||
private MonitorMatchStatus checkIfMatch(AlarmRuleDTO alarmRuleDTO) {
|
||||
if (alarmRuleDTO.getStrategyFilterMap().keySet().contains("brokerId")) {
|
||||
// Broker
|
||||
return checkIfMatchBroker(alarmRuleDTO);
|
||||
} else if (alarmRuleDTO.getStrategyFilterMap().keySet().contains("consumerGroup")) {
|
||||
// ConsumerGroup Lag
|
||||
return checkIfMatchConsumerGroup(alarmRuleDTO);
|
||||
} else if (alarmRuleDTO.getStrategyFilterMap().keySet().contains("topicName")){
|
||||
// Topic
|
||||
return checkIfMatchTopic(alarmRuleDTO);
|
||||
}
|
||||
// unknown
|
||||
return MonitorMatchStatus.UNKNOWN;
|
||||
}
|
||||
|
||||
private MonitorMatchStatus checkIfMatchTopic(AlarmRuleDTO alarmRuleDTO) {
|
||||
for (Long clusterId: KafkaMetricsCache.getTopicMetricsClusterIdSet()) {
|
||||
if (!clusterId.equals(alarmRuleDTO.getClusterId())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
List<TopicMetrics> topicMetricsList = KafkaMetricsCache.getTopicMetricsFromCache(clusterId);
|
||||
if (topicMetricsList == null || topicMetricsList.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
return topicMonitorMatchService.validate(alarmRuleDTO, topicMetricsList);
|
||||
}
|
||||
return MonitorMatchStatus.UNKNOWN;
|
||||
}
|
||||
|
||||
private MonitorMatchStatus checkIfMatchBroker(AlarmRuleDTO alarmRuleDTO) {
|
||||
for (Long clusterId: KafkaMetricsCache.getBrokerMetricsClusterIdSet()) {
|
||||
if (!clusterId.equals(alarmRuleDTO.getClusterId())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
List<BrokerMetrics> brokerMetricsList = KafkaMetricsCache.getBrokerMetricsFromCache(clusterId);
|
||||
if (brokerMetricsList == null || brokerMetricsList.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
return brokerMonitorMatchService.validate(alarmRuleDTO, brokerMetricsList);
|
||||
}
|
||||
return MonitorMatchStatus.UNKNOWN;
|
||||
}
|
||||
|
||||
private MonitorMatchStatus checkIfMatchConsumerGroup(AlarmRuleDTO alarmRuleDTO) {
|
||||
for (Long clusterId: KafkaMetricsCache.getConsumerMetricsClusterIdSet()) {
|
||||
if (!clusterId.equals(alarmRuleDTO.getClusterId())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
List<ConsumerMetrics> consumerMetricsList = KafkaMetricsCache.getConsumerMetricsFromCache(clusterId);
|
||||
if (consumerMetricsList == null || consumerMetricsList.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
return consumerGroupMonitorMatchService.validate(alarmRuleDTO, consumerMetricsList);
|
||||
}
|
||||
return MonitorMatchStatus.UNKNOWN;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
package com.xiaojukeji.kafka.manager.service.monitor.impl;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.monitor.MonitorMatchStatus;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.monitor.MonitorMetricsType;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.alarm.AlarmRuleDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.alarm.AlarmStrategyExpressionDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||
import com.xiaojukeji.kafka.manager.service.monitor.AbstractMonitorMatchService;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/3/18
|
||||
*/
|
||||
@Service
|
||||
public class BrokerMonitorMatchServiceImpl extends AbstractMonitorMatchService<BrokerMetrics> {
|
||||
@Override
|
||||
public MonitorMatchStatus validate(AlarmRuleDTO alarmRuleDTO, List<BrokerMetrics> dataList) {
|
||||
if (dataList == null || dataList.isEmpty()) {
|
||||
return MonitorMatchStatus.UNKNOWN;
|
||||
}
|
||||
for (BrokerMetrics data: dataList) {
|
||||
MonitorMatchStatus status = validate(alarmRuleDTO, data);
|
||||
if (!MonitorMatchStatus.UNKNOWN.equals(status)) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
return MonitorMatchStatus.UNKNOWN;
|
||||
}
|
||||
|
||||
private MonitorMatchStatus validate(AlarmRuleDTO alarmRuleDTO, BrokerMetrics data) {
|
||||
if (!data.getBrokerId().equals(Integer.valueOf(alarmRuleDTO.getStrategyFilterMap().get("brokerId")))
|
||||
|| !data.getClusterId().equals(alarmRuleDTO.getClusterId())) {
|
||||
// 数值不一致
|
||||
return MonitorMatchStatus.UNKNOWN;
|
||||
}
|
||||
AlarmStrategyExpressionDTO alarmStrategyExpressionDTO = alarmRuleDTO.getStrategyExpression();
|
||||
if (MonitorMetricsType.BYTES_IN.getName().equals(alarmStrategyExpressionDTO.getMetric())) {
|
||||
return condition(
|
||||
data.getBytesInPerSec(),
|
||||
Double.valueOf(alarmStrategyExpressionDTO.getThreshold()),
|
||||
alarmStrategyExpressionDTO.getOpt()
|
||||
);
|
||||
} else if (MonitorMetricsType.BYTES_OUT.getName().equals(alarmStrategyExpressionDTO.getMetric())){
|
||||
return condition(
|
||||
data.getBytesOutPerSec(),
|
||||
Double.valueOf(alarmStrategyExpressionDTO.getThreshold()),
|
||||
alarmStrategyExpressionDTO.getOpt()
|
||||
);
|
||||
}
|
||||
return MonitorMatchStatus.UNKNOWN;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
package com.xiaojukeji.kafka.manager.service.monitor.impl;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.monitor.MonitorMatchStatus;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.monitor.MonitorMetricsType;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.ConsumerMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.alarm.AlarmRuleDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.alarm.AlarmStrategyExpressionDTO;
|
||||
import com.xiaojukeji.kafka.manager.service.monitor.AbstractMonitorMatchService;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import javax.xml.crypto.Data;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/3/18
|
||||
*/
|
||||
@Service
|
||||
public class ConsumerGroupMonitorMatchServiceImpl extends AbstractMonitorMatchService<ConsumerMetrics> {
|
||||
@Override
|
||||
public MonitorMatchStatus validate(AlarmRuleDTO alarmRuleDTO, List<ConsumerMetrics> dataList) {
|
||||
if (dataList == null || dataList.isEmpty()) {
|
||||
return MonitorMatchStatus.UNKNOWN;
|
||||
}
|
||||
for (ConsumerMetrics data: dataList) {
|
||||
MonitorMatchStatus status = validate(alarmRuleDTO, data);
|
||||
if (!MonitorMatchStatus.UNKNOWN.equals(status)) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
return MonitorMatchStatus.UNKNOWN;
|
||||
}
|
||||
|
||||
private MonitorMatchStatus validate(AlarmRuleDTO alarmRuleDTO, ConsumerMetrics data) {
|
||||
if (!data.getTopicName().equals(alarmRuleDTO.getStrategyFilterMap().get("topicName"))
|
||||
|| !data.getConsumerGroup().equals(alarmRuleDTO.getStrategyFilterMap().get("consumerGroup"))
|
||||
|| !data.getClusterId().equals(alarmRuleDTO.getClusterId())) {
|
||||
// 数值不一致
|
||||
return MonitorMatchStatus.UNKNOWN;
|
||||
}
|
||||
|
||||
AlarmStrategyExpressionDTO alarmStrategyExpressionDTO = alarmRuleDTO.getStrategyExpression();
|
||||
if (MonitorMetricsType.LAG.getName().equals(alarmStrategyExpressionDTO.getMetric())) {
|
||||
return condition(
|
||||
Double.valueOf(data.getSumLag()),
|
||||
Double.valueOf(alarmStrategyExpressionDTO.getThreshold()),
|
||||
alarmStrategyExpressionDTO.getOpt()
|
||||
);
|
||||
}
|
||||
return MonitorMatchStatus.UNKNOWN;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,56 @@
|
||||
package com.xiaojukeji.kafka.manager.service.monitor.impl;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.monitor.MonitorMetricsType;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.monitor.MonitorMatchStatus;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.alarm.AlarmRuleDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.alarm.AlarmStrategyExpressionDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
|
||||
import com.xiaojukeji.kafka.manager.service.monitor.AbstractMonitorMatchService;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/3/18
|
||||
*/
|
||||
@Service
|
||||
public class TopicMonitorMatchServiceImpl extends AbstractMonitorMatchService<TopicMetrics> {
|
||||
|
||||
@Override
|
||||
public MonitorMatchStatus validate(AlarmRuleDTO alarmRuleDTO, List<TopicMetrics> dataList) {
|
||||
if (dataList == null || dataList.isEmpty()) {
|
||||
return MonitorMatchStatus.UNKNOWN;
|
||||
}
|
||||
for (TopicMetrics data: dataList) {
|
||||
MonitorMatchStatus status = validate(alarmRuleDTO, data);
|
||||
if (!MonitorMatchStatus.UNKNOWN.equals(status)) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
return MonitorMatchStatus.UNKNOWN;
|
||||
}
|
||||
|
||||
private MonitorMatchStatus validate(AlarmRuleDTO alarmRuleDTO, TopicMetrics data) {
|
||||
if (!data.getTopicName().equals(alarmRuleDTO.getStrategyFilterMap().get("topicName"))
|
||||
|| !data.getClusterId().equals(alarmRuleDTO.getClusterId())) {
|
||||
// 数值不一致
|
||||
return MonitorMatchStatus.UNKNOWN;
|
||||
}
|
||||
AlarmStrategyExpressionDTO alarmStrategyExpressionDTO = alarmRuleDTO.getStrategyExpression();
|
||||
if (MonitorMetricsType.BYTES_IN.getName().equals(alarmStrategyExpressionDTO.getMetric())) {
|
||||
return condition(
|
||||
data.getBytesInPerSec(),
|
||||
Double.valueOf(alarmStrategyExpressionDTO.getThreshold()),
|
||||
alarmStrategyExpressionDTO.getOpt()
|
||||
);
|
||||
} else if (MonitorMetricsType.BYTES_OUT.getName().equals(alarmStrategyExpressionDTO.getMetric())){
|
||||
return condition(
|
||||
data.getBytesOutPerSec(),
|
||||
Double.valueOf(alarmStrategyExpressionDTO.getThreshold()),
|
||||
alarmStrategyExpressionDTO.getOpt()
|
||||
);
|
||||
}
|
||||
return MonitorMatchStatus.UNKNOWN;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
package com.xiaojukeji.kafka.manager.service.notify;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.service.cache.KafkaClientCache;
|
||||
import com.xiaojukeji.kafka.manager.service.monitor.AlarmScheduleCheckTask;
|
||||
import org.apache.kafka.clients.producer.Callback;
|
||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.clients.producer.RecordMetadata;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 20/3/18
|
||||
*/
|
||||
@Component
|
||||
public class KafkaNotifier {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(AlarmScheduleCheckTask.class);
|
||||
|
||||
public void produce(Long clusterId, String topicName, String message) {
|
||||
KafkaProducer<String, String> kafkaProducer = KafkaClientCache.getKafkaProducerClient(clusterId);
|
||||
if (kafkaProducer == null) {
|
||||
LOGGER.error("param illegal, get kafka producer client failed, clusterId:{}.", clusterId);
|
||||
return;
|
||||
}
|
||||
|
||||
kafkaProducer.send(new ProducerRecord<String, String>(topicName, message), new Callback() {
|
||||
@Override
|
||||
public void onCompletion(RecordMetadata recordMetadata, Exception exception) {
|
||||
if (null != exception) {
|
||||
LOGGER.info("produce failed, topicName:{} recordMetadata:{} exception:{}.", topicName, recordMetadata, exception);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,108 @@
|
||||
package com.xiaojukeji.kafka.manager.service.schedule;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.service.collector.*;
|
||||
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
|
||||
import com.xiaojukeji.kafka.manager.service.service.ConsumerService;
|
||||
import com.xiaojukeji.kafka.manager.service.service.JmxService;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.*;
|
||||
|
||||
/**
|
||||
* 负责管理定时数据采集任务
|
||||
* @author zengqiao
|
||||
* @date 19/4/17
|
||||
*/
|
||||
@Component
|
||||
public class ScheduleCollectDataManager {
|
||||
private final static Logger logger = LoggerFactory.getLogger(ScheduleCollectDataManager.class);
|
||||
|
||||
/**
|
||||
* 存储每个Cluster的schedulerExecutor
|
||||
*/
|
||||
private static Map<Long, ScheduledExecutorService> clusterScheduleServiceMap = new HashMap<>();
|
||||
|
||||
|
||||
/**
|
||||
* 定时采集周期,单位为s
|
||||
*/
|
||||
private Integer collectTaskTimeInterval = 60;
|
||||
|
||||
@Autowired
|
||||
private ConsumerService consumeService;
|
||||
|
||||
@Autowired
|
||||
private ClusterService clusterService;
|
||||
|
||||
@Autowired
|
||||
private JmxService jmxService;
|
||||
|
||||
|
||||
/**
|
||||
* 1.对每个Cluster新建一个scheduler
|
||||
* 2.启动每个scheduler
|
||||
*/
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
List<ClusterDO> clusterDOList = clusterService.listAll();
|
||||
if(clusterDOList == null){
|
||||
return;
|
||||
}
|
||||
ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(4 * Runtime.getRuntime().availableProcessors(), new ThreadFactory() {
|
||||
@Override
|
||||
public Thread newThread(Runnable r) {
|
||||
return new Thread(r, "DataCollectorManager-ScheduleTask");
|
||||
}
|
||||
});
|
||||
for (ClusterDO clusterDO : clusterDOList) {
|
||||
clusterScheduleServiceMap.put(clusterDO.getId(), scheduler);
|
||||
scheduler.scheduleAtFixedRate(new CollectBrokerMetricsTask(clusterDO.getId(), jmxService), 1, collectTaskTimeInterval, TimeUnit.SECONDS);
|
||||
scheduler.scheduleAtFixedRate(new CollectTopicMetricsTask(clusterDO.getId(), jmxService), 2, collectTaskTimeInterval, TimeUnit.SECONDS);
|
||||
scheduler.scheduleAtFixedRate(new CollectConsumerMetricsTask(clusterDO.getId(), consumeService), 3, collectTaskTimeInterval, TimeUnit.SECONDS);
|
||||
scheduler.scheduleAtFixedRate(new CollectConsumerGroupZKMetadataTask(clusterDO.getId()), 4, collectTaskTimeInterval, TimeUnit.SECONDS);
|
||||
scheduler.scheduleAtFixedRate(new CollectConsumerGroupBKMetadataTask(clusterDO.getId()), 5, collectTaskTimeInterval, TimeUnit.SECONDS);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 启动指定Cluster的定时数据采集任务
|
||||
*/
|
||||
public void start(final ClusterDO cluster) {
|
||||
ScheduledExecutorService scheduler = clusterScheduleServiceMap.get(cluster.getId());
|
||||
if (scheduler == null || scheduler.isTerminated() || scheduler.isShutdown()) {
|
||||
scheduler = Executors.newScheduledThreadPool(20, new ThreadFactory() {
|
||||
@Override
|
||||
public Thread newThread(Runnable r) {
|
||||
return new Thread(r, "DataCollectorManager-ScheduleTask-ClusterId-" + cluster.getId());
|
||||
}
|
||||
});
|
||||
clusterScheduleServiceMap.put(cluster.getId(), scheduler);
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
scheduler.scheduleAtFixedRate(new CollectBrokerMetricsTask(cluster.getId(), jmxService), 30, collectTaskTimeInterval, TimeUnit.SECONDS);
|
||||
scheduler.scheduleAtFixedRate(new CollectTopicMetricsTask(cluster.getId(), jmxService), 60, collectTaskTimeInterval, TimeUnit.SECONDS);
|
||||
scheduler.scheduleAtFixedRate(new CollectConsumerMetricsTask(cluster.getId(), consumeService), 90, collectTaskTimeInterval, TimeUnit.SECONDS);
|
||||
scheduler.scheduleAtFixedRate(new CollectConsumerGroupZKMetadataTask(cluster.getId()), 90, collectTaskTimeInterval, TimeUnit.SECONDS);
|
||||
scheduler.scheduleAtFixedRate(new CollectConsumerGroupBKMetadataTask(cluster.getId()), 90, collectTaskTimeInterval, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
/**
|
||||
* 停止Cluster的定时数据采集任务
|
||||
*/
|
||||
public void stop(ClusterDO clusterDO) {
|
||||
ScheduledExecutorService scheduler = clusterScheduleServiceMap.get(clusterDO.getId());
|
||||
if (scheduler != null) {
|
||||
scheduler.shutdown();
|
||||
clusterScheduleServiceMap.remove(clusterDO.getId());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,71 @@
|
||||
package com.xiaojukeji.kafka.manager.service.schedule;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.dao.BrokerMetricsDao;
|
||||
import com.xiaojukeji.kafka.manager.dao.ClusterMetricsDao;
|
||||
import com.xiaojukeji.kafka.manager.dao.TopicMetricsDao;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.scheduling.annotation.Scheduled;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* 定期删除Metrics信息
|
||||
* @author zengqiao
|
||||
* @date 20/1/8
|
||||
*/
|
||||
@Component
|
||||
@ConditionalOnProperty(prefix = "cluster-metrics", name = "enabled", havingValue = "true")
|
||||
public class ScheduleDeleteMetrics {
|
||||
private final static Logger LOGGER = LoggerFactory.getLogger(ScheduleDeleteMetrics.class);
|
||||
|
||||
@Autowired
|
||||
private TopicMetricsDao topicMetricsDao;
|
||||
|
||||
@Autowired
|
||||
private BrokerMetricsDao brokerMetricsDao;
|
||||
|
||||
@Autowired
|
||||
private ClusterMetricsDao clusterMetricsDao;
|
||||
|
||||
@Scheduled(cron="0/7 * * * * ?")
|
||||
public void deleteDBMetrics(){
|
||||
return;
|
||||
// long startTime = System.currentTimeMillis();
|
||||
// LOGGER.info("start delete metrics");
|
||||
// try {
|
||||
// deleteTopicMetrics();
|
||||
// } catch (Exception e) {
|
||||
// LOGGER.error("delete topic metrics failed.", e);
|
||||
// }
|
||||
// try {
|
||||
// deleteBrokerMetrics();
|
||||
// } catch (Exception e) {
|
||||
// LOGGER.error("delete broker metrics failed.", e);
|
||||
// }
|
||||
// try {
|
||||
// deleteClusterMetrics();
|
||||
// } catch (Exception e) {
|
||||
// LOGGER.error("delete cluster metrics failed.", e);
|
||||
// }
|
||||
// LOGGER.info("finish delete metrics, costTime:{}ms.", System.currentTimeMillis() - startTime);
|
||||
}
|
||||
|
||||
private void deleteTopicMetrics() {
|
||||
Date endTime = new Date(System.currentTimeMillis() - 1 * 24 * 60 * 60 * 1000);
|
||||
topicMetricsDao.deleteBeforeTime(endTime);
|
||||
}
|
||||
|
||||
private void deleteBrokerMetrics() {
|
||||
Date endTime = new Date(System.currentTimeMillis() - 5 * 24 * 60 * 60 * 1000);
|
||||
brokerMetricsDao.deleteBeforeTime(endTime);
|
||||
}
|
||||
|
||||
private void deleteClusterMetrics() {
|
||||
Date endTime = new Date(System.currentTimeMillis() - 10 * 24 * 60 * 60 * 1000);
|
||||
clusterMetricsDao.deleteBeforeTime(endTime);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,118 @@
|
||||
package com.xiaojukeji.kafka.manager.service.schedule;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterMetricsDO;
|
||||
import com.xiaojukeji.kafka.manager.dao.BrokerMetricsDao;
|
||||
import com.xiaojukeji.kafka.manager.dao.ClusterMetricsDao;
|
||||
import com.xiaojukeji.kafka.manager.dao.TopicMetricsDao;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.KafkaMetricsCache;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.scheduling.annotation.Scheduled;
|
||||
import org.springframework.scheduling.annotation.SchedulingConfigurer;
|
||||
import org.springframework.scheduling.config.ScheduledTaskRegistrar;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
/**
|
||||
* Metrics信息存DB
|
||||
* @author zengqiao
|
||||
* @date 2019-05-10
|
||||
*/
|
||||
@Component
|
||||
public class ScheduleStoreMetrics implements SchedulingConfigurer {
|
||||
private final static Logger LOGGER = LoggerFactory.getLogger(ScheduleStoreMetrics.class);
|
||||
|
||||
@Autowired
|
||||
private TopicMetricsDao topicMetricsDao;
|
||||
|
||||
@Autowired
|
||||
private BrokerMetricsDao brokerMetricsDao;
|
||||
|
||||
@Autowired
|
||||
private ClusterMetricsDao clusterMetricsDao;
|
||||
|
||||
private static final Integer INSERT_BATCH_SIZE = 100;
|
||||
|
||||
@Override
|
||||
public void configureTasks(ScheduledTaskRegistrar scheduledTaskRegistrar) {
|
||||
scheduledTaskRegistrar.setScheduler(Executors.newScheduledThreadPool(3));
|
||||
}
|
||||
|
||||
@Scheduled(cron="0 0/1 * * * ?")
|
||||
public void storeTopicMetrics(){
|
||||
long startTime = System.currentTimeMillis();
|
||||
for (Long clusterId: KafkaMetricsCache.getTopicMetricsClusterIdSet()) {
|
||||
try {
|
||||
List<TopicMetrics> topicMetricsList = KafkaMetricsCache.getTopicMetricsFromCache(clusterId);
|
||||
if (topicMetricsList == null || topicMetricsList.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
int i = 0;
|
||||
do {
|
||||
topicMetricsDao.batchAdd(topicMetricsList.subList(i, Math.min(i + INSERT_BATCH_SIZE, topicMetricsList.size())));
|
||||
i += INSERT_BATCH_SIZE;
|
||||
} while (i < topicMetricsList.size());
|
||||
} catch (Throwable t) {
|
||||
LOGGER.error("save topic metrics failed, clusterId:{}.", clusterId, t);
|
||||
}
|
||||
}
|
||||
LOGGER.info("save topic metrics finished, costTime:{}ms.", System.currentTimeMillis() - startTime);
|
||||
}
|
||||
|
||||
@Scheduled(cron="0 0/1 * * * ?")
|
||||
public void storeBrokerMetrics(){
|
||||
long startTime = System.currentTimeMillis();
|
||||
for (Long clusterId: KafkaMetricsCache.getBrokerMetricsClusterIdSet()) {
|
||||
try {
|
||||
List<BrokerMetrics> brokerMetricsList = KafkaMetricsCache.getBrokerMetricsFromCache(clusterId);
|
||||
if (brokerMetricsList == null || brokerMetricsList.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
int i = 0;
|
||||
do {
|
||||
brokerMetricsDao.batchAdd(brokerMetricsList.subList(i, Math.min(i + INSERT_BATCH_SIZE, brokerMetricsList.size())));
|
||||
i += INSERT_BATCH_SIZE;
|
||||
} while (i < brokerMetricsList.size());
|
||||
} catch (Throwable t) {
|
||||
LOGGER.error("save broker metrics failed, clusterId:{}.", clusterId, t);
|
||||
}
|
||||
}
|
||||
LOGGER.info("save broker metrics finished, costTime:{}ms.", System.currentTimeMillis() - startTime);
|
||||
}
|
||||
|
||||
@Scheduled(cron="0 0/1 * * * ?")
|
||||
public void storeClusterMetrics(){
|
||||
long startTime = System.currentTimeMillis();
|
||||
for (Long clusterId: KafkaMetricsCache.getBrokerMetricsClusterIdSet()) {
|
||||
try {
|
||||
List<BrokerMetrics> brokerMetricsList = KafkaMetricsCache.getBrokerMetricsFromCache(clusterId);
|
||||
if (brokerMetricsList == null || brokerMetricsList.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
ClusterMetricsDO clusterMetricsDO = new ClusterMetricsDO();
|
||||
clusterMetricsDO.setClusterId(clusterId);
|
||||
clusterMetricsDO.setTopicNum(0);
|
||||
clusterMetricsDO.setPartitionNum(0);
|
||||
clusterMetricsDO.setBrokerNum(0);
|
||||
for (BrokerMetrics brokerMetrics: brokerMetricsList) {
|
||||
clusterMetricsDO.setBytesInPerSec(clusterMetricsDO.getBytesInPerSec() + brokerMetrics.getBytesInPerSec());
|
||||
clusterMetricsDO.setBytesOutPerSec(clusterMetricsDO.getBytesOutPerSec() + brokerMetrics.getBytesOutPerSec());
|
||||
clusterMetricsDO.setBytesRejectedPerSec(clusterMetricsDO.getBytesRejectedPerSec() + brokerMetrics.getBytesRejectedPerSec());
|
||||
clusterMetricsDO.setMessagesInPerSec(clusterMetricsDO.getMessagesInPerSec() + brokerMetrics.getMessagesInPerSec());
|
||||
}
|
||||
List<ClusterMetricsDO> clusterMetricsDOList = new ArrayList<>();
|
||||
clusterMetricsDOList.add(clusterMetricsDO);
|
||||
clusterMetricsDao.batchAdd(clusterMetricsDOList);
|
||||
} catch (Throwable t) {
|
||||
LOGGER.error("save cluster metrics failed, clusterId:{}.", clusterId, t);
|
||||
}
|
||||
}
|
||||
LOGGER.info("save cluster metrics finished, costTime:{}ms.", System.currentTimeMillis() - startTime);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,72 @@
|
||||
package com.xiaojukeji.kafka.manager.service.schedule;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.bizenum.ReassignmentStatusEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.MigrationTaskDO;
|
||||
import com.xiaojukeji.kafka.manager.dao.MigrationTaskDao;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ClusterMetadataManager;
|
||||
import com.xiaojukeji.kafka.manager.service.service.MigrationService;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.scheduling.annotation.Scheduled;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* 分区迁移
|
||||
* @author zengqiao
|
||||
* @date 19/12/29
|
||||
*/
|
||||
@Component
|
||||
public class ScheduleTriggerTopicReassignmentTask {
|
||||
private final static Logger LOGGER = LoggerFactory.getLogger(ScheduleStoreMetrics.class);
|
||||
|
||||
@Autowired
|
||||
private MigrationService migrationService;
|
||||
|
||||
@Autowired
|
||||
private MigrationTaskDao migrationTaskDao;
|
||||
|
||||
@Scheduled(cron="0 0/1 * * * ?")
|
||||
public void reFlushTaskStatus(){
|
||||
List<MigrationTaskDO> migrationTaskDOList = migrationService.getByStatus(ReassignmentStatusEnum.RUNNING.getCode());
|
||||
if (migrationTaskDOList == null || migrationTaskDOList.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (MigrationTaskDO migrationTaskDO: migrationTaskDOList) {
|
||||
try {
|
||||
ClusterDO clusterDO = ClusterMetadataManager.getClusterFromCache(migrationTaskDO.getClusterId());
|
||||
if (clusterDO == null) {
|
||||
continue;
|
||||
}
|
||||
Map<Integer, Integer> statusMap = migrationService.getMigrationStatus(clusterDO, migrationTaskDO.getReassignmentJson());
|
||||
int running = 0;
|
||||
int failed = 0;
|
||||
for (Integer status: statusMap.values()) {
|
||||
if (ReassignmentStatusEnum.RUNNING.getCode().equals(status)) {
|
||||
running += 1;
|
||||
break;
|
||||
} else if (ReassignmentStatusEnum.SUCCESS.getCode().equals(status)) {
|
||||
;
|
||||
} else {
|
||||
failed += 1;
|
||||
}
|
||||
}
|
||||
if (running > 0) {
|
||||
continue;
|
||||
} else if (failed > 0) {
|
||||
migrationTaskDO.setStatus(ReassignmentStatusEnum.FAILED.getCode());
|
||||
} else {
|
||||
migrationTaskDO.setStatus(ReassignmentStatusEnum.SUCCESS.getCode());
|
||||
}
|
||||
migrationTaskDao.updateById(migrationTaskDO.getId(), migrationTaskDO.getStatus(), migrationTaskDO.getThrottle());
|
||||
} catch (Exception e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.bizenum.PreferredReplicaElectEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
|
||||
|
||||
public interface AdminPreferredReplicaElectService {
|
||||
PreferredReplicaElectEnum preferredReplicaElectionStatus(ClusterDO clusterDO);
|
||||
|
||||
PreferredReplicaElectEnum preferredReplicaElection(ClusterDO clusterDO, String operator);
|
||||
|
||||
PreferredReplicaElectEnum preferredReplicaElection(ClusterDO clusterDO, Integer brokerId, String operator);
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.bizenum.AdminTopicStatusEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.TopicDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.TopicMetadata;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/11/21
|
||||
*/
|
||||
public interface AdminTopicService {
|
||||
AdminTopicStatusEnum createTopic(ClusterDO clusterDO, TopicMetadata topicMetadata, TopicDO topicDO, Properties topicConfig, String operator);
|
||||
|
||||
AdminTopicStatusEnum deleteTopic(ClusterDO clusterDO, String topicName, String operator);
|
||||
|
||||
AdminTopicStatusEnum modifyTopic(ClusterDO clusterDO, TopicDO topicDO, Properties topicConfig, String operator);
|
||||
|
||||
AdminTopicStatusEnum expandTopic(ClusterDO clusterDO, TopicMetadata topicMetadata, String operator);
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.AlarmRuleDO;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/4/3
|
||||
*/
|
||||
public interface AlarmRuleService {
|
||||
Result addAlarmRule(AlarmRuleDO alarmRuleDO);
|
||||
|
||||
Result deleteById(String operator, Long id);
|
||||
|
||||
Result updateById(String operator, AlarmRuleDO alarmRuleDO);
|
||||
|
||||
AlarmRuleDO getById(Long id);
|
||||
|
||||
List<AlarmRuleDO> listAll();
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.analysis.AnalysisBrokerDTO;
|
||||
|
||||
/**
|
||||
* @author huangyiminghappy@163.com, zengqiao_cn@163.com
|
||||
* @date 2019-06-14
|
||||
*/
|
||||
public interface AnalysisService {
|
||||
AnalysisBrokerDTO doAnalysisBroker(Long clusterId , Integer brokerId);
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.BrokerBasicDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.BrokerOverviewDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.BrokerOverallDTO;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Broker模块的service接口
|
||||
* @author tukun, zengqiao
|
||||
* @date 2015/11/9
|
||||
*/
|
||||
public interface BrokerService {
|
||||
/**
|
||||
* 获取Broker列表信息
|
||||
*/
|
||||
List<BrokerOverviewDTO> getBrokerOverviewList(Long clusterId, List<String> specifiedFieldList, boolean simple);
|
||||
|
||||
/**
|
||||
* 获取Broker概述信息
|
||||
*/
|
||||
List<BrokerOverallDTO> getBrokerOverallList(Long clusterId, List<String> specifiedFieldList);
|
||||
|
||||
/**
|
||||
* 从JMX获取BrokerMetrics中指定字段的信息
|
||||
*/
|
||||
Map<Integer, BrokerMetrics> getSpecifiedBrokerMetrics(Long clusterId, List<String> specifiedFieldList, boolean simple);
|
||||
|
||||
/**
|
||||
* 获取Broker流量信息
|
||||
*/
|
||||
BrokerMetrics getSpecifiedBrokerMetrics(Long clusterId, Integer brokerId, List<String> specifiedFieldList, Boolean simple);
|
||||
|
||||
/**
|
||||
* 根据时间区间获取Broker监控数据
|
||||
*/
|
||||
List<BrokerMetrics> getBrokerMetricsByInterval(Long clusterId, Integer brokerId, Date startTime, Date endTime);
|
||||
|
||||
/**
|
||||
* 根据Cluster和brokerId获取broker的具体信息
|
||||
*/
|
||||
BrokerBasicDTO getBrokerBasicDTO(Long clusterId, Integer brokerId, List<String> specifiedFieldList);
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterMetricsDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ControllerDO;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Cluster Service
|
||||
* @author zengqiao
|
||||
* @date 19/4/3
|
||||
*/
|
||||
public interface ClusterService {
|
||||
ClusterDO getById(Long clusterId);
|
||||
|
||||
Result addNewCluster(ClusterDO clusterDO, String operator);
|
||||
|
||||
Result updateCluster(ClusterDO newClusterDO, boolean reload, String operator);
|
||||
|
||||
List<ClusterDO> listAll();
|
||||
|
||||
List<ClusterMetricsDO> getClusterMetricsByInterval(long clusterId, Date startTime, Date endTime);
|
||||
|
||||
List<ControllerDO> getKafkaControllerHistory(Long clusterId);
|
||||
}
|
||||
@@ -0,0 +1,76 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.PartitionState;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.consumer.ConsumerDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.consumer.ConsumerGroupDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.PartitionOffsetDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.consumer.ConsumeDetailDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* consumer相关的服务接口
|
||||
* @author tukun
|
||||
* @date 2015/11/12.
|
||||
*/
|
||||
public interface ConsumerService {
|
||||
/**
|
||||
* 获取消费组列表
|
||||
* @param clusterId 集群Id
|
||||
* @author zengqiao
|
||||
* @date 19/5/14
|
||||
* @return java.util.List<com.didichuxing.datachannel.kafka.manager.common.entity.dto.ConsumerGroupDTO>
|
||||
*/
|
||||
List<ConsumerGroupDTO> getConsumerGroupList(Long clusterId);
|
||||
|
||||
/**
|
||||
* 查询消费Topic的消费组
|
||||
* @param clusterId 集群Id
|
||||
* @param topicName Topic名称
|
||||
* @author zengqiao
|
||||
* @date 19/5/14
|
||||
* @return java.util.List<com.didichuxing.datachannel.kafka.manager.common.entity.dto.ConsumerGroupDTO>
|
||||
*/
|
||||
List<ConsumerGroupDTO> getConsumerGroupList(Long clusterId, String topicName);
|
||||
|
||||
/**
|
||||
* 查询消费详情
|
||||
*/
|
||||
List<ConsumeDetailDTO> getConsumeDetail(ClusterDO clusterDO, String topicName, ConsumerGroupDTO consumerGroupDTO);
|
||||
|
||||
/**
|
||||
* 获取消费组消费的Topic列表
|
||||
* @param clusterDO 集群
|
||||
* @param consumerGroupDTO 消费组
|
||||
* @author zengqiao
|
||||
* @date 19/5/14
|
||||
* @return java.util.List<java.lang.String>
|
||||
*/
|
||||
List<String> getConsumerGroupConsumedTopicList(ClusterDO clusterDO, ConsumerGroupDTO consumerGroupDTO);
|
||||
|
||||
/**
|
||||
* 获取监控的消费者列表
|
||||
* @param clusterDO
|
||||
* @return
|
||||
*/
|
||||
List<ConsumerDTO> getMonitoredConsumerList(ClusterDO clusterDO,
|
||||
Map<String, List<PartitionState>> topicNamePartitionStateListMap);
|
||||
|
||||
/**
|
||||
* 重置offset
|
||||
* @param clusterDO 集群信息
|
||||
* @param topicName topic名称
|
||||
* @param consumerGroupDTO 消费组
|
||||
* @param partitionOffsetDTOList 设置的offset
|
||||
* @return List<Result>
|
||||
*/
|
||||
List<Result> resetConsumerOffset(ClusterDO clusterDO,
|
||||
String topicName,
|
||||
ConsumerGroupDTO consumerGroupDTO,
|
||||
List<PartitionOffsetDTO> partitionOffsetDTOList);
|
||||
|
||||
Map<Long, Integer> getConsumerGroupNumMap(List<ClusterDO> clusterDOList);
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* 从Jmx获取相关数据的服务接口
|
||||
*
|
||||
* @author tukun, zengqiao
|
||||
* @date 2015/11/11.
|
||||
*/
|
||||
public interface JmxService {
|
||||
/**
|
||||
* 通过JMX获取指定字段的Broker的监控信息
|
||||
*/
|
||||
BrokerMetrics getSpecifiedBrokerMetricsFromJmx(Long clusterId, Integer brokerId, List<String> specifiedFieldList, Boolean simple);
|
||||
|
||||
TopicMetrics getSpecifiedTopicMetricsFromJmx(Long clusterId, String topicName, List<String> specifiedFieldList, Boolean simple);
|
||||
|
||||
TopicMetrics getSpecifiedBrokerTopicMetricsFromJmx(Long clusterId, Integer brokerId, String topicName, List<String> specifiedFieldList, Boolean simple);
|
||||
}
|
||||
@@ -0,0 +1,66 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.bizenum.AccountRoleEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.AccountDO;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author huangyiminghappy@163.com
|
||||
* @date 2019-04-26
|
||||
*/
|
||||
public interface LoginService {
|
||||
/**
|
||||
* 登陆
|
||||
* @param request request
|
||||
* @param username username
|
||||
* @param password password
|
||||
* @return Result
|
||||
*/
|
||||
Result<AccountRoleEnum> login(HttpServletRequest request, String username, String password);
|
||||
|
||||
/**
|
||||
* 登出
|
||||
* @param request request
|
||||
* @param username username
|
||||
* @return Result
|
||||
*/
|
||||
Result logoff(HttpServletRequest request, String username);
|
||||
|
||||
/**
|
||||
* 添加新账号
|
||||
* @param accountDO accountDO
|
||||
* @return Result
|
||||
*/
|
||||
Result addNewAccount(AccountDO accountDO);
|
||||
|
||||
/**
|
||||
* 删除用户
|
||||
* @param username username
|
||||
* @return boolean
|
||||
*/
|
||||
boolean deleteByName(String username);
|
||||
|
||||
/**
|
||||
* 更新账户
|
||||
* @param accountDO accountDO
|
||||
* @param oldPassword 老密码
|
||||
* @return Result
|
||||
*/
|
||||
Result updateAccount(AccountDO accountDO, String oldPassword);
|
||||
|
||||
/**
|
||||
* 用户列表
|
||||
* @return List<AccountDO>
|
||||
*/
|
||||
List<AccountDO> list();
|
||||
|
||||
/**
|
||||
* 是否登陆了
|
||||
* @param username username
|
||||
* @return boolean
|
||||
*/
|
||||
boolean isLogin(String username);
|
||||
}
|
||||
@@ -0,0 +1,69 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.MigrationTaskDO;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* migrate topic service
|
||||
* @author zengqiao_cn@163.com
|
||||
* @date 19/4/16
|
||||
*/
|
||||
public interface MigrationService {
|
||||
/**
|
||||
* 创建迁移任务
|
||||
*/
|
||||
Result<MigrationTaskDO> createMigrationTask(Long clusterId, String topicName, List<Integer> partitionList, Long throttle, List<Integer> brokerIdList, String description);
|
||||
|
||||
/**
|
||||
* 获取迁移任务基本信息
|
||||
* @param taskId task id
|
||||
* @author zengqiao_cn@163.com
|
||||
* @date 19/4/16
|
||||
* @return MigrationTaskDO
|
||||
*/
|
||||
MigrationTaskDO getMigrationTask(Long taskId);
|
||||
|
||||
/**
|
||||
* 查看迁移进度
|
||||
* @param cluster 集群
|
||||
* @param reassignmentJson 迁移JSON
|
||||
* @author zengqiao_cn@163.com
|
||||
* @date 19/4/16
|
||||
* @return Map<partitionId, MigrationStatus>
|
||||
*/
|
||||
Map<Integer, Integer> getMigrationStatus(ClusterDO cluster, String reassignmentJson);
|
||||
|
||||
/**
|
||||
* 执行迁移任务
|
||||
* @param taskId 任务ID
|
||||
* @author zengqiao_cn@163.com
|
||||
* @date 19/4/16
|
||||
* @return Result
|
||||
*/
|
||||
Result executeMigrationTask(Long taskId);
|
||||
|
||||
Result modifyMigrationTask(Long taskId, Long throttle);
|
||||
|
||||
/**
|
||||
* 列出所有的迁移任务
|
||||
* @author zengqiao_cn@163.com
|
||||
* @date 19/4/16
|
||||
* @return List<MigrationTaskDO>
|
||||
*/
|
||||
List<MigrationTaskDO> getMigrationTaskList();
|
||||
|
||||
List<MigrationTaskDO> getByStatus(Integer status);
|
||||
|
||||
/**
|
||||
* 删除迁移任务
|
||||
* @param taskId 任务ID
|
||||
* @author zengqiao_cn@163.com
|
||||
* @date 19/4/16
|
||||
* @return Result
|
||||
*/
|
||||
Result deleteMigrationTask(Long taskId);
|
||||
}
|
||||
@@ -0,0 +1,85 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.bizenum.OrderTypeEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.OrderPartitionDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.OrderTopicDO;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author arthur
|
||||
* @date 2017/7/30.
|
||||
*/
|
||||
public interface OrderService {
|
||||
/**
|
||||
* 创建Topic申请工单
|
||||
* @param orderTopicDO 工单信息
|
||||
* @return java.lang.Boolean
|
||||
*/
|
||||
Boolean createOrderTopic(OrderTopicDO orderTopicDO);
|
||||
|
||||
/**
|
||||
* 取消工单
|
||||
* @param orderId 工单Id
|
||||
* @param operator 操作人
|
||||
* @param orderTypeEnum 工单类型
|
||||
* @date 19/6/23
|
||||
* @return Result
|
||||
*/
|
||||
Result cancelOrder(Long orderId, String operator, OrderTypeEnum orderTypeEnum);
|
||||
|
||||
/**
|
||||
* 修改Topic工单
|
||||
* @param orderTopicDO 工单
|
||||
* @param operator 操作人
|
||||
* @date 19/6/23
|
||||
* @return Result
|
||||
*/
|
||||
Result modifyOrderTopic(OrderTopicDO orderTopicDO, String operator, boolean admin);
|
||||
|
||||
/**
|
||||
* 修改Partition工单
|
||||
* @param orderPartitionDO 工单
|
||||
* @param operator 操作人
|
||||
* @date 19/6/23
|
||||
* @return Result
|
||||
*/
|
||||
Result modifyOrderPartition(OrderPartitionDO orderPartitionDO, String operator);
|
||||
|
||||
/**
|
||||
* 查询Topic工单
|
||||
* @param username 用户名
|
||||
* @return List<OrderTopicDO>
|
||||
*/
|
||||
List<OrderTopicDO> getOrderTopics(String username);
|
||||
|
||||
/**
|
||||
* 查询Topic工单
|
||||
* @param orderId 工单ID
|
||||
* @return OrderTopicDO
|
||||
*/
|
||||
OrderTopicDO getOrderTopicById(Long orderId);
|
||||
|
||||
/**
|
||||
* 创建partition申请工单
|
||||
* @param orderPartitionDO 工单信息
|
||||
* @return java.lang.Boolean
|
||||
*/
|
||||
Boolean createOrderPartition(OrderPartitionDO orderPartitionDO);
|
||||
|
||||
/**
|
||||
* 查询partition工单
|
||||
* @param username 用户名
|
||||
* @param orderId 工单Id
|
||||
* @return List<OrderPartitionDO>
|
||||
*/
|
||||
List<OrderPartitionDO> getOrderPartitions(String username, Long orderId);
|
||||
|
||||
/**
|
||||
* 查询Partition工单
|
||||
* @param orderId 工单ID
|
||||
* @return OrderPartitionDO
|
||||
*/
|
||||
OrderPartitionDO getOrderPartitionById(Long orderId);
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.RegionDO;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/4/21
|
||||
*/
|
||||
public interface RegionService {
|
||||
/**
|
||||
* 创建Region
|
||||
*/
|
||||
Result createRegion(RegionDO regionDO);
|
||||
|
||||
/**
|
||||
* 删除Region
|
||||
*/
|
||||
Boolean deleteById(Long regionId);
|
||||
|
||||
/**
|
||||
* 修改Region信息
|
||||
*/
|
||||
Result updateRegion(RegionDO regionDO);
|
||||
|
||||
/**
|
||||
* 查询Region详情
|
||||
*/
|
||||
List<RegionDO> getByClusterId(Long clusterId);
|
||||
|
||||
/**
|
||||
* 获取集群有几个Region
|
||||
*/
|
||||
Map<Long, Long> getRegionNum();
|
||||
|
||||
/**
|
||||
* 获取Topic所属Region
|
||||
*/
|
||||
List<RegionDO> getRegionByTopicName(Long clusterId, String topicName);
|
||||
|
||||
/**
|
||||
* 合并regionIdList和brokerIdList中的brokerId
|
||||
*/
|
||||
List<Integer> getFullBrokerId(Long clusterId, List<Long> regionIdList, List<Integer> brokerIdList);
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.TopicDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.TopicFavoriteDO;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author arthur
|
||||
* @date 2017/7/20.
|
||||
*/
|
||||
public interface TopicManagerService {
|
||||
List<TopicDO> getByClusterId(Long clusterId);
|
||||
|
||||
TopicDO getByTopicName(Long clusterId, String topicName);
|
||||
|
||||
// Properties getTopicProperties(Long clusterId, String topicName);
|
||||
|
||||
/**
|
||||
* 收藏Topic
|
||||
*/
|
||||
Boolean addFavorite(List<TopicFavoriteDO> topicFavoriteDOList);
|
||||
|
||||
/**
|
||||
* 取消收藏Topic
|
||||
*/
|
||||
Boolean delFavorite(List<TopicFavoriteDO> topicFavoriteDOList);
|
||||
|
||||
/**
|
||||
* 获取收藏的Topic列表
|
||||
*/
|
||||
List<TopicFavoriteDO> getFavorite(String username);
|
||||
|
||||
/**
|
||||
* 获取收藏的Topic列表
|
||||
*/
|
||||
List<TopicFavoriteDO> getFavorite(String username, Long clusterId);
|
||||
}
|
||||
|
||||
@@ -0,0 +1,72 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.PartitionState;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.PartitionOffsetDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.TopicBasicDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.TopicOverviewDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.TopicPartitionDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Topic相关的接口
|
||||
* @author tukun
|
||||
* @date 2015/11/11.
|
||||
*/
|
||||
public interface TopicService {
|
||||
Long calTopicMaxAvgBytesIn(List<TopicMetrics> topicMetricsList, Integer maxAvgBytesInDuration);
|
||||
|
||||
/**
|
||||
* 根据时间区间获取Topic监控数据
|
||||
*/
|
||||
List<TopicMetrics> getTopicMetricsByInterval(Long clusterId, String topic, Date startTime, Date endTime);
|
||||
|
||||
/**
|
||||
* 获取brokerId下所有的Topic及其对应的PartitionId
|
||||
*/
|
||||
Map<String, List<Integer>> getTopicPartitionIdMap(Long clusterId, Integer brokerId);
|
||||
|
||||
/**
|
||||
* 获取 Topic 的 basic-info 信息
|
||||
*/
|
||||
TopicBasicDTO getTopicBasicDTO(Long clusterId, String topicName);
|
||||
|
||||
/**
|
||||
* 获取Topic的PartitionState信息
|
||||
*/
|
||||
List<TopicPartitionDTO> getTopicPartitionDTO(ClusterDO cluster, String topicName, Boolean needOffsets);
|
||||
|
||||
/**
|
||||
* 得到topic流量信息
|
||||
*/
|
||||
TopicMetrics getTopicMetrics(Long clusterId, String topicName, List<String> specifiedFieldList);
|
||||
|
||||
/**
|
||||
* 获取Topic的分区的offset
|
||||
*/
|
||||
Map<TopicPartition, Long> getTopicPartitionOffset(ClusterDO cluster, String topicName);
|
||||
|
||||
/**
|
||||
* 获取Topic概览信息
|
||||
*/
|
||||
List<TopicOverviewDTO> getTopicOverviewDTOList(Long clusterId,
|
||||
Integer filterBrokerId,
|
||||
List<String> filterTopicNameList);
|
||||
|
||||
/**
|
||||
* 获取指定时间的offset信息
|
||||
*/
|
||||
List<PartitionOffsetDTO> getPartitionOffsetList(ClusterDO cluster, String topicName, Long timestamp);
|
||||
|
||||
Map<String, List<PartitionState>> getTopicPartitionState(Long clusterId, Integer filterBrokerId);
|
||||
|
||||
/**
|
||||
* 数据采样
|
||||
*/
|
||||
List<String> fetchTopicData(ClusterDO cluster, List<TopicPartition> topicPartitionList, int timeout, int maxMsgNum, long offset, boolean truncate);
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.PartitionState;
|
||||
import com.xiaojukeji.kafka.manager.common.exception.ConfigException;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
|
||||
public interface ZookeeperService {
|
||||
/**
|
||||
* 获取Topic的Properties
|
||||
*/
|
||||
Properties getTopicProperties(Long clusterId, String topicName) throws ConfigException;
|
||||
|
||||
/**
|
||||
* 获取PartitionState信息
|
||||
*/
|
||||
List<PartitionState> getTopicPartitionState(Long clusterId, String topicName) throws ConfigException;
|
||||
|
||||
Long getRetentionTime(Long clusterId, String topicName);
|
||||
}
|
||||
@@ -0,0 +1,127 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service.impl;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.bizenum.PreferredReplicaElectEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.BrokerMetadata;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ClusterMetadataManager;
|
||||
import com.xiaojukeji.kafka.manager.service.service.AdminPreferredReplicaElectService;
|
||||
import com.xiaojukeji.kafka.manager.service.service.TopicService;
|
||||
import kafka.admin.AdminOperationException;
|
||||
import kafka.admin.PreferredReplicaLeaderElectionCommand;
|
||||
import kafka.utils.ZkUtils;
|
||||
import org.apache.kafka.common.security.JaasUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* 优先副本选举
|
||||
* @author zengqiao
|
||||
* @date 2019/11/26.
|
||||
*/
|
||||
@Service("adminPreferredReplicaElectService")
|
||||
public class AdminPreferredReplicaElectServiceImpl implements AdminPreferredReplicaElectService {
|
||||
private static final Logger logger = LoggerFactory.getLogger(AdminPreferredReplicaElectServiceImpl.class);
|
||||
|
||||
private static final int DEFAULT_SESSION_TIMEOUT = 90000;
|
||||
|
||||
@Autowired
|
||||
private TopicService topicService;
|
||||
|
||||
@Override
|
||||
public PreferredReplicaElectEnum preferredReplicaElectionStatus(ClusterDO clusterDO) {
|
||||
ZkUtils zkUtils = null;
|
||||
try {
|
||||
zkUtils = ZkUtils.apply(clusterDO.getZookeeper(),
|
||||
DEFAULT_SESSION_TIMEOUT,
|
||||
DEFAULT_SESSION_TIMEOUT,
|
||||
JaasUtils.isZkSecurityEnabled()
|
||||
);
|
||||
if (zkUtils.pathExists(ZkUtils.PreferredReplicaLeaderElectionPath())) {
|
||||
return PreferredReplicaElectEnum.RUNNING;
|
||||
}
|
||||
return PreferredReplicaElectEnum.SUCCESS;
|
||||
} catch (Exception e) {
|
||||
return PreferredReplicaElectEnum.UNKNOWN;
|
||||
} finally {
|
||||
if (null != zkUtils) {
|
||||
zkUtils.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public PreferredReplicaElectEnum preferredReplicaElection(ClusterDO clusterDO, String operator) {
|
||||
ZkUtils zkUtils = null;
|
||||
try {
|
||||
zkUtils = ZkUtils.apply(clusterDO.getZookeeper(),
|
||||
DEFAULT_SESSION_TIMEOUT,
|
||||
DEFAULT_SESSION_TIMEOUT,
|
||||
JaasUtils.isZkSecurityEnabled()
|
||||
);
|
||||
PreferredReplicaLeaderElectionCommand preferredReplicaElectionCommand = new PreferredReplicaLeaderElectionCommand(zkUtils, zkUtils.getAllPartitions());
|
||||
preferredReplicaElectionCommand.moveLeaderToPreferredReplica();
|
||||
} catch (AdminOperationException e) {
|
||||
|
||||
} catch (Throwable t) {
|
||||
|
||||
} finally {
|
||||
if (null != zkUtils) {
|
||||
zkUtils.close();
|
||||
}
|
||||
}
|
||||
return PreferredReplicaElectEnum.SUCCESS;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PreferredReplicaElectEnum preferredReplicaElection(ClusterDO clusterDO, Integer brokerId, String operator) {
|
||||
BrokerMetadata brokerMetadata = ClusterMetadataManager.getBrokerMetadata(clusterDO.getId(), brokerId);
|
||||
if (null == brokerMetadata) {
|
||||
return PreferredReplicaElectEnum.PARAM_ILLEGAL;
|
||||
}
|
||||
ZkUtils zkUtils = null;
|
||||
try {
|
||||
Map<String, List<Integer>> partitionMap = topicService.getTopicPartitionIdMap(clusterDO.getId(), brokerId);
|
||||
if (partitionMap == null || partitionMap.isEmpty()) {
|
||||
return PreferredReplicaElectEnum.SUCCESS;
|
||||
}
|
||||
String preferredReplicaElectString = convert2preferredReplicaElectString(partitionMap);
|
||||
zkUtils = ZkUtils.apply(clusterDO.getZookeeper(),
|
||||
DEFAULT_SESSION_TIMEOUT,
|
||||
DEFAULT_SESSION_TIMEOUT,
|
||||
JaasUtils.isZkSecurityEnabled()
|
||||
);
|
||||
PreferredReplicaLeaderElectionCommand preferredReplicaElectionCommand = new PreferredReplicaLeaderElectionCommand(zkUtils, PreferredReplicaLeaderElectionCommand.parsePreferredReplicaElectionData(preferredReplicaElectString));
|
||||
preferredReplicaElectionCommand.moveLeaderToPreferredReplica();
|
||||
} catch (Exception e) {
|
||||
return PreferredReplicaElectEnum.UNKNOWN;
|
||||
} finally {
|
||||
if (zkUtils != null) {
|
||||
zkUtils.close();
|
||||
}
|
||||
}
|
||||
return PreferredReplicaElectEnum.SUCCESS;
|
||||
}
|
||||
|
||||
private String convert2preferredReplicaElectString(Map<String, List<Integer>> topicNamePartitionIdMap) {
|
||||
List<Map<String, Object>> metaList = new ArrayList<>();
|
||||
for(Map.Entry<String, List<Integer>> entry : topicNamePartitionIdMap.entrySet()){
|
||||
if (entry.getValue() == null || entry.getValue().isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
for (Integer partitionId: entry.getValue()) {
|
||||
Map<String, Object> params = new HashMap<>();
|
||||
params.put("topic", entry.getKey());
|
||||
params.put("partition", partitionId);
|
||||
metaList.add(params);
|
||||
}
|
||||
}
|
||||
Map<String, Object> result = new HashMap<>();
|
||||
result.put("partitions", metaList);
|
||||
return JSON.toJSONString(result);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,191 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service.impl;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.bizenum.AdminTopicStatusEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.bizenum.OperationEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.OperationHistoryDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.TopicDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.TopicMetadata;
|
||||
import com.xiaojukeji.kafka.manager.dao.OperationHistoryDao;
|
||||
import com.xiaojukeji.kafka.manager.dao.TopicDao;
|
||||
import com.xiaojukeji.kafka.manager.service.service.AdminTopicService;
|
||||
import com.xiaojukeji.kafka.manager.service.utils.BrokerMetadataUtil;
|
||||
import kafka.admin.AdminOperationException;
|
||||
import kafka.admin.AdminUtils;
|
||||
import kafka.admin.BrokerMetadata;
|
||||
import kafka.common.TopicAndPartition;
|
||||
import kafka.utils.ZkUtils;
|
||||
import org.I0Itec.zkclient.exception.ZkNodeExistsException;
|
||||
import org.apache.kafka.common.errors.*;
|
||||
import org.apache.kafka.common.security.JaasUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
import scala.collection.JavaConversions;
|
||||
import scala.collection.Seq;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* @author arthur
|
||||
* @date 2017/7/21
|
||||
*/
|
||||
@Service("adminTopicService")
|
||||
public class AdminTopicServiceImpl implements AdminTopicService {
|
||||
private static final Logger logger = LoggerFactory.getLogger(AdminTopicServiceImpl.class);
|
||||
|
||||
private static final int DEFAULT_SESSION_TIMEOUT = 30000;
|
||||
|
||||
@Autowired
|
||||
private TopicDao topicDao;
|
||||
|
||||
@Autowired
|
||||
private OperationHistoryDao operationHistoryDao;
|
||||
|
||||
@Override
|
||||
public AdminTopicStatusEnum createTopic(ClusterDO clusterDO, TopicMetadata topicMetadata, TopicDO topicDO, Properties topicConfig, String operator) {
|
||||
ZkUtils zkUtils = null;
|
||||
try {
|
||||
zkUtils = ZkUtils.apply(clusterDO.getZookeeper(), DEFAULT_SESSION_TIMEOUT, DEFAULT_SESSION_TIMEOUT, JaasUtils.isZkSecurityEnabled());
|
||||
|
||||
scala.collection.Seq<Object> brokerListSeq = JavaConversions.asScalaBuffer(new ArrayList<Object>(topicMetadata.getBrokerIdSet())).toSeq();
|
||||
scala.collection.Seq<kafka.admin.BrokerMetadata> brokerMetadataListSeq = BrokerMetadataUtil.convert2BrokerMetadata(brokerListSeq);
|
||||
scala.collection.Map<Object, scala.collection.Seq<Object>> replicaAssignment = AdminUtils.assignReplicasToBrokers(brokerMetadataListSeq, topicMetadata.getPartitionNum(), topicMetadata.getReplicaNum(), -1, -1);
|
||||
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topicMetadata.getTopic(), replicaAssignment, topicConfig, false);
|
||||
} catch (NullPointerException e) {
|
||||
return AdminTopicStatusEnum.PARAM_NULL_POINTER;
|
||||
} catch (InvalidPartitionsException e) {
|
||||
return AdminTopicStatusEnum.PARTITION_NUM_ILLEGAL;
|
||||
} catch (InvalidReplicationFactorException e) {
|
||||
return AdminTopicStatusEnum.BROKER_NUM_NOT_ENOUGH;
|
||||
} catch (TopicExistsException | ZkNodeExistsException e) {
|
||||
return AdminTopicStatusEnum.TOPIC_EXISTED;
|
||||
} catch (InvalidTopicException e) {
|
||||
return AdminTopicStatusEnum.TOPIC_NAME_ILLEGAL;
|
||||
} catch (Throwable t) {
|
||||
return AdminTopicStatusEnum.UNKNOWN_ERROR;
|
||||
} finally {
|
||||
if (zkUtils != null) {
|
||||
zkUtils.close();
|
||||
}
|
||||
}
|
||||
|
||||
// Topic信息及操作记录写DB
|
||||
try {
|
||||
OperationHistoryDO operationHistoryDO = OperationHistoryDO.newInstance(topicDO.getClusterId(), topicDO.getTopicName(), operator, OperationEnum.CREATE_TOPIC.message);
|
||||
operationHistoryDao.insert(operationHistoryDO);
|
||||
topicDao.replace(topicDO);
|
||||
} catch (Exception e) {
|
||||
return AdminTopicStatusEnum.REPLACE_DB_FAILED;
|
||||
}
|
||||
return AdminTopicStatusEnum.SUCCESS;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AdminTopicStatusEnum deleteTopic(ClusterDO clusterDO, String topicName, String operator) {
|
||||
ZkUtils zkUtils = null;
|
||||
try {
|
||||
zkUtils = ZkUtils.apply(clusterDO.getZookeeper(), DEFAULT_SESSION_TIMEOUT, DEFAULT_SESSION_TIMEOUT, JaasUtils.isZkSecurityEnabled());
|
||||
AdminUtils.deleteTopic(zkUtils, topicName);
|
||||
} catch (UnknownTopicOrPartitionException e) {
|
||||
return AdminTopicStatusEnum.UNKNOWN_TOPIC_PARTITION;
|
||||
} catch (ZkNodeExistsException e) {
|
||||
return AdminTopicStatusEnum.TOPIC_IN_DELETING;
|
||||
} catch (Throwable t) {
|
||||
return AdminTopicStatusEnum.UNKNOWN_ERROR;
|
||||
} finally {
|
||||
if (zkUtils != null) {
|
||||
zkUtils.close();
|
||||
}
|
||||
}
|
||||
|
||||
// Topic信息及操作记录写DB
|
||||
try {
|
||||
OperationHistoryDO operationHistoryDO = OperationHistoryDO.newInstance(clusterDO.getId(), topicName, operator, OperationEnum.DELETE_TOPIC.message);
|
||||
operationHistoryDao.insert(operationHistoryDO);
|
||||
topicDao.deleteByName(clusterDO.getId(), topicName);
|
||||
} catch (Exception e) {
|
||||
return AdminTopicStatusEnum.REPLACE_DB_FAILED;
|
||||
}
|
||||
return AdminTopicStatusEnum.SUCCESS;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AdminTopicStatusEnum modifyTopic(ClusterDO clusterDO, TopicDO topicDO, Properties topicConfig, String operator) {
|
||||
ZkUtils zkUtils = null;
|
||||
try {
|
||||
zkUtils = ZkUtils.apply(clusterDO.getZookeeper(), DEFAULT_SESSION_TIMEOUT, DEFAULT_SESSION_TIMEOUT, JaasUtils.isZkSecurityEnabled());
|
||||
AdminUtils.changeTopicConfig(zkUtils, topicDO.getTopicName(), topicConfig);
|
||||
} catch (AdminOperationException e) {
|
||||
return AdminTopicStatusEnum.UNKNOWN_TOPIC_PARTITION;
|
||||
} catch (InvalidConfigurationException e) {
|
||||
return AdminTopicStatusEnum.TOPIC_CONFIG_ILLEGAL;
|
||||
} catch (Throwable t) {
|
||||
return AdminTopicStatusEnum.UNKNOWN_ERROR;
|
||||
} finally {
|
||||
if (zkUtils != null) {
|
||||
zkUtils.close();
|
||||
}
|
||||
}
|
||||
|
||||
// Topic信息及操作记录写DB
|
||||
try {
|
||||
OperationHistoryDO operationHistoryDO = OperationHistoryDO.newInstance(clusterDO.getId(), topicDO.getTopicName(), operator, OperationEnum.MODIFY_TOPIC_CONFIG.message);
|
||||
operationHistoryDao.insert(operationHistoryDO);
|
||||
topicDao.replace(topicDO);
|
||||
} catch (Exception e) {
|
||||
return AdminTopicStatusEnum.REPLACE_DB_FAILED;
|
||||
}
|
||||
return AdminTopicStatusEnum.SUCCESS;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AdminTopicStatusEnum expandTopic(ClusterDO clusterDO, TopicMetadata topicMetadata, String operator) {
|
||||
ZkUtils zkUtils = null;
|
||||
try {
|
||||
zkUtils = ZkUtils.apply(clusterDO.getZookeeper(), DEFAULT_SESSION_TIMEOUT, DEFAULT_SESSION_TIMEOUT, JaasUtils.isZkSecurityEnabled());
|
||||
|
||||
List<String> topicList = new ArrayList<String>();
|
||||
topicList.add(topicMetadata.getTopic());
|
||||
scala.collection.mutable.Map<TopicAndPartition, scala.collection.Seq<Object>> existingReplicaList = zkUtils.getReplicaAssignmentForTopics(JavaConversions.asScalaBuffer(topicList));
|
||||
Map<TopicAndPartition, scala.collection.Seq<Object>> javaMap = JavaConversions.asJavaMap(existingReplicaList);
|
||||
|
||||
//重新构造分配策略
|
||||
Map<Object, scala.collection.Seq<Object>> targetMap = new HashMap<Object, scala.collection.Seq<Object>>();
|
||||
|
||||
scala.collection.Map<Object, scala.collection.Seq<Object>> newPartitionReplicaList = null;
|
||||
|
||||
Seq<Object> brokerListSeq = JavaConversions.asScalaBuffer(new ArrayList<Object>(topicMetadata.getBrokerIdSet())).toSeq();
|
||||
Seq<BrokerMetadata> brokerMetadataSeq = BrokerMetadataUtil.convert2BrokerMetadata(brokerListSeq);
|
||||
newPartitionReplicaList = AdminUtils.assignReplicasToBrokers(brokerMetadataSeq, topicMetadata.getPartitionNum(), existingReplicaList.head()._2().size(), (Integer) existingReplicaList.head()._2().head(), existingReplicaList.size());
|
||||
//转换为目标map,因为最终的map均是为[partition,该partition的replica的seq]
|
||||
for (Map.Entry<Object, scala.collection.Seq<Object>> entry : JavaConversions.asJavaMap(newPartitionReplicaList).entrySet()) {
|
||||
targetMap.put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
|
||||
//将已有的partition信息存入targetMap结构体内
|
||||
for (Map.Entry<TopicAndPartition, scala.collection.Seq<Object>> entry : javaMap.entrySet()) {
|
||||
targetMap.put(entry.getKey().partition(), entry.getValue());
|
||||
}
|
||||
|
||||
//更新topic
|
||||
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topicMetadata.getTopic(), JavaConversions.asScalaMap(targetMap), new Properties(), true);
|
||||
} catch (Throwable t) {
|
||||
return AdminTopicStatusEnum.UNKNOWN_ERROR;
|
||||
} finally {
|
||||
if (zkUtils != null) {
|
||||
zkUtils.close();
|
||||
}
|
||||
}
|
||||
|
||||
// Topic信息及操作记录写DB
|
||||
try {
|
||||
OperationHistoryDO operationHistoryDO = OperationHistoryDO.newInstance(clusterDO.getId(), topicMetadata.getTopic(), operator, OperationEnum.EXPAND_TOPIC_PARTITION.message);
|
||||
operationHistoryDao.insert(operationHistoryDO);
|
||||
} catch (Exception e) {
|
||||
return AdminTopicStatusEnum.REPLACE_DB_FAILED;
|
||||
}
|
||||
return AdminTopicStatusEnum.SUCCESS;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,137 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service.impl;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.StatusCode;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.AlarmRuleDO;
|
||||
import com.xiaojukeji.kafka.manager.dao.AlarmRuleDao;
|
||||
import com.xiaojukeji.kafka.manager.service.service.AlarmRuleService;
|
||||
import com.xiaojukeji.kafka.manager.service.utils.ListUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.dao.DuplicateKeyException;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/4/3
|
||||
*/
|
||||
@Service("alarmRuleService")
|
||||
public class AlarmRuleServiceImpl implements AlarmRuleService {
|
||||
private final static Logger logger = LoggerFactory.getLogger(AlarmRuleServiceImpl.class);
|
||||
|
||||
@Autowired
|
||||
private AlarmRuleDao alarmRuleDao;
|
||||
|
||||
@Override
|
||||
public Result addAlarmRule(AlarmRuleDO alarmRuleDO) {
|
||||
try {
|
||||
alarmRuleDao.insert(alarmRuleDO);
|
||||
} catch (DuplicateKeyException e) {
|
||||
logger.info("addAlarmRule@AlarmRuleManagerServiceImpl, duplicate key, alarm rule:{}.", alarmRuleDO, e);
|
||||
return new Result(StatusCode.PARAM_ERROR, "duplicate alarm name");
|
||||
} catch (Exception e) {
|
||||
logger.error("addAlarmRule@AlarmRuleManagerServiceImpl, add failed, alarm rule:{}.", alarmRuleDO, e);
|
||||
return new Result(StatusCode.MY_SQL_INSERT_ERROR, Constant.KAFKA_MANAGER_INNER_ERROR);
|
||||
}
|
||||
return new Result();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result deleteById(String operator, Long alarmRuleId) {
|
||||
try {
|
||||
AlarmRuleDO alarmRuleDO = alarmRuleDao.getById(alarmRuleId);
|
||||
if (alarmRuleDO == null) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "param illegal, alarm rule not exist");
|
||||
}
|
||||
List<String> principalList = ListUtils.string2StrList(alarmRuleDO.getPrincipals());
|
||||
if (principalList == null || !principalList.contains(operator)) {
|
||||
return new Result(StatusCode.OPERATION_ERROR, "without authority to delete");
|
||||
}
|
||||
return alarmRuleDao.deleteById(alarmRuleId) > 0? new Result(): new Result(StatusCode.PARAM_ERROR, "alarm id illegal");
|
||||
} catch (Exception e) {
|
||||
logger.error("deleteById@AlarmRuleManagerServiceImpl, delete failed, alarmRuleId:{}.", alarmRuleId, e);
|
||||
return new Result(StatusCode.MY_SQL_UPDATE_ERROR, Constant.KAFKA_MANAGER_INNER_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result updateById(String operator, AlarmRuleDO alarmRuleDO) {
|
||||
Result result = checkAlarmRuleDOIllegal(alarmRuleDO);
|
||||
if (!StatusCode.SUCCESS.equals(result.getCode())) {
|
||||
return result;
|
||||
}
|
||||
if (alarmRuleDO.getId() == null) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "id is null");
|
||||
}
|
||||
try {
|
||||
AlarmRuleDO oldAlarmRuleDO = alarmRuleDao.getById(alarmRuleDO.getId());
|
||||
if (oldAlarmRuleDO == null) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "param illegal, alarm rule not exist");
|
||||
}
|
||||
List<String> principalList = ListUtils.string2StrList(oldAlarmRuleDO.getPrincipals());
|
||||
if (principalList == null || !principalList.contains(operator)) {
|
||||
return new Result(StatusCode.OPERATION_ERROR, "without authority to delete");
|
||||
}
|
||||
if (alarmRuleDO.getStatus() == null) {
|
||||
alarmRuleDO.setStatus(oldAlarmRuleDO.getStatus());
|
||||
}
|
||||
return alarmRuleDao.updateById(alarmRuleDO)> 0? new Result(): new Result(StatusCode.PARAM_ERROR, "alarm id illegal");
|
||||
} catch (Exception e) {
|
||||
logger.error("updateById@AlarmRuleManagerServiceImpl, update failed, alarmRule:{}.", alarmRuleDO, e);
|
||||
return new Result(StatusCode.MY_SQL_UPDATE_ERROR, Constant.KAFKA_MANAGER_INNER_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public AlarmRuleDO getById(Long id) {
|
||||
try {
|
||||
return alarmRuleDao.getById(id);
|
||||
} catch (Exception e) {
|
||||
logger.error("getById@AlarmRuleManagerServiceImpl, get failed, alarmId:{}.", id, e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<AlarmRuleDO> listAll() {
|
||||
try {
|
||||
return alarmRuleDao.listAll();
|
||||
} catch (Exception e) {
|
||||
logger.error("listAll@AlarmRuleManagerServiceImpl, list all failed.", e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* 检查配置的参数是否合理
|
||||
*/
|
||||
private Result checkAlarmRuleDOIllegal(AlarmRuleDO alarmRuleDO) {
|
||||
if (alarmRuleDO == null) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "param empty");
|
||||
}
|
||||
// if (!RuleType.legal(alarmRuleDO.getRuleType())){
|
||||
// return new Result(StatusCode.PARAM_ERROR, "ruleType error");
|
||||
// }
|
||||
// if (!NotifyType.legal(alarmRuleDO.getNotifyType())){
|
||||
// return new Result(StatusCode.PARAM_ERROR, "notifyType error");
|
||||
// }
|
||||
// if (!MetricType.legal(alarmRuleDO.getMetricType())){
|
||||
// return new Result(StatusCode.PARAM_ERROR, "metricType error");
|
||||
// }
|
||||
// if (!ConditionType.legal(alarmRuleDO.getConditionType())){
|
||||
// return new Result(StatusCode.PARAM_ERROR, "conditionType error");
|
||||
// }
|
||||
// if (alarmRuleDO.getClusterId() == null) {
|
||||
// return new Result(StatusCode.PARAM_ERROR, "clusterId error");
|
||||
// }
|
||||
if (StringUtils.isEmpty(alarmRuleDO.getPrincipals())) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "principals is empty");
|
||||
}
|
||||
return new Result();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,150 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service.impl;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.MetricsType;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.analysis.AnalysisBrokerDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.analysis.AnalysisTopicDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.TopicMetadata;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ClusterMetadataManager;
|
||||
import com.xiaojukeji.kafka.manager.service.service.AnalysisService;
|
||||
import com.xiaojukeji.kafka.manager.service.service.JmxService;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* Topic抖动分析
|
||||
* @author huangyiminghappy@163.com, zengqaio_cn@163.com
|
||||
* @date 2019-06-14
|
||||
*/
|
||||
@Service("analysisService")
|
||||
public class AnalysisServiceImpl implements AnalysisService {
|
||||
private static final Logger logger = LoggerFactory.getLogger(AnalysisServiceImpl.class);
|
||||
|
||||
@Autowired
|
||||
private JmxService jmxService;
|
||||
|
||||
private static final Integer TOP_TOPIC_NUM = 5;
|
||||
|
||||
private static final Integer MIN_TOP_TOPIC_VALUE = 2;
|
||||
|
||||
@Override
|
||||
public AnalysisBrokerDTO doAnalysisBroker(Long clusterId, Integer brokerId) {
|
||||
AnalysisBrokerDTO analysisBrokerDTO = new AnalysisBrokerDTO();
|
||||
analysisBrokerDTO.setClusterId(clusterId);
|
||||
analysisBrokerDTO.setBrokerId(brokerId);
|
||||
analysisBrokerDTO.setBaseTime(System.currentTimeMillis());
|
||||
analysisBrokerDTO.setTopicAnalysisVOList(new ArrayList<>());
|
||||
|
||||
BrokerMetrics brokerMetrics = jmxService.getSpecifiedBrokerMetricsFromJmx(clusterId, brokerId, BrokerMetrics.getFieldNameList(MetricsType.BROKER_ANALYSIS_METRICS), true);
|
||||
if (brokerMetrics == null) {
|
||||
return analysisBrokerDTO;
|
||||
}
|
||||
analysisBrokerDTO.setBytesIn(brokerMetrics.getBytesInPerSec());
|
||||
analysisBrokerDTO.setBytesOut(brokerMetrics.getBytesOutPerSec());
|
||||
analysisBrokerDTO.setMessagesIn(brokerMetrics.getMessagesInPerSec());
|
||||
analysisBrokerDTO.setTotalProduceRequests(brokerMetrics.getTotalProduceRequestsPerSec());
|
||||
analysisBrokerDTO.setTotalFetchRequests(brokerMetrics.getTotalFetchRequestsPerSec());
|
||||
|
||||
List<TopicMetrics> topicMetricsList = new ArrayList<>();
|
||||
for (String topicName: ClusterMetadataManager.getTopicNameList(clusterId)) {
|
||||
TopicMetadata topicMetadata = ClusterMetadataManager.getTopicMetaData(clusterId, topicName);
|
||||
if (topicMetadata == null || !topicMetadata.getBrokerIdSet().contains(brokerId)) {
|
||||
continue;
|
||||
}
|
||||
TopicMetrics topicMetrics = jmxService.getSpecifiedBrokerTopicMetricsFromJmx(clusterId, brokerId, topicName, TopicMetrics.getFieldNameList(MetricsType.BROKER_TOPIC_ANALYSIS_METRICS), true);
|
||||
if (topicMetrics == null) {
|
||||
continue;
|
||||
}
|
||||
if (topicMetrics.getBytesInPerSec() < MIN_TOP_TOPIC_VALUE.doubleValue()
|
||||
|| topicMetrics.getTotalProduceRequestsPerSec() < MIN_TOP_TOPIC_VALUE.doubleValue()) {
|
||||
continue;
|
||||
}
|
||||
topicMetricsList.add(topicMetrics);
|
||||
}
|
||||
Set<String> topicNameSet = new HashSet<>();
|
||||
supplyAnalysisTopicDTOList(analysisBrokerDTO, topicNameSet, topicMetricsList, "BytesIn");
|
||||
supplyAnalysisTopicDTOList(analysisBrokerDTO, topicNameSet, topicMetricsList, "TotalProduceRequest");
|
||||
return analysisBrokerDTO;
|
||||
}
|
||||
|
||||
private void supplyAnalysisTopicDTOList(AnalysisBrokerDTO analysisBrokerDTO,
|
||||
Set<String> topicNameSet,
|
||||
List<TopicMetrics> topicMetricsList,
|
||||
String fieldName) {
|
||||
|
||||
Collections.sort(topicMetricsList, new Comparator<TopicMetrics>() {
|
||||
@Override
|
||||
public int compare(TopicMetrics t1, TopicMetrics t2) {
|
||||
double diff = 0;
|
||||
switch (fieldName) {
|
||||
case "BytesIn":
|
||||
diff = t1.getBytesInPerSec() - t2.getBytesInPerSec();
|
||||
break;
|
||||
case "TotalProduceRequest":
|
||||
diff = t1.getTotalProduceRequestsPerSec() - t2.getTotalProduceRequestsPerSec();
|
||||
break;
|
||||
default:
|
||||
diff = 0;
|
||||
break;
|
||||
}
|
||||
if (diff > 0) {
|
||||
return -1;
|
||||
} else if (diff < 0) {
|
||||
return 1;
|
||||
}
|
||||
return t1.getTopicName().compareTo(t2.toString());
|
||||
}
|
||||
});
|
||||
|
||||
for (int i = 0; i < TOP_TOPIC_NUM && i < topicMetricsList.size(); ++i) {
|
||||
TopicMetrics topicMetrics = topicMetricsList.get(i);
|
||||
if (topicNameSet.contains(topicMetrics.getTopicName())) {
|
||||
continue;
|
||||
}
|
||||
AnalysisTopicDTO analysisTopicDTO = new AnalysisTopicDTO();
|
||||
analysisTopicDTO.setTopicName(topicMetrics.getTopicName());
|
||||
analysisTopicDTO.setBytesIn(topicMetrics.getBytesInPerSec());
|
||||
if (analysisBrokerDTO.getBytesIn() <= 0) {
|
||||
analysisTopicDTO.setBytesInRate(0.0);
|
||||
} else {
|
||||
analysisTopicDTO.setBytesInRate(topicMetrics.getBytesInPerSec() / analysisBrokerDTO.getBytesIn());
|
||||
}
|
||||
|
||||
analysisTopicDTO.setBytesOut(topicMetrics.getBytesOutPerSec());
|
||||
if (analysisBrokerDTO.getBytesOut() <= 0) {
|
||||
analysisTopicDTO.setBytesOutRate(0.0);
|
||||
} else {
|
||||
analysisTopicDTO.setBytesOutRate(topicMetrics.getBytesOutPerSec() / analysisBrokerDTO.getBytesOut());
|
||||
}
|
||||
|
||||
analysisTopicDTO.setMessagesIn(topicMetrics.getMessagesInPerSec());
|
||||
if (analysisBrokerDTO.getMessagesIn() <= 0) {
|
||||
analysisTopicDTO.setMessagesInRate(0.0);
|
||||
} else {
|
||||
analysisTopicDTO.setMessagesInRate(topicMetrics.getMessagesInPerSec() / analysisBrokerDTO.getMessagesIn());
|
||||
}
|
||||
|
||||
analysisTopicDTO.setTotalFetchRequests(topicMetrics.getTotalFetchRequestsPerSec());
|
||||
if (analysisBrokerDTO.getTotalFetchRequests() <= 0) {
|
||||
analysisTopicDTO.setTotalFetchRequestsRate(0.0);
|
||||
} else {
|
||||
analysisTopicDTO.setTotalFetchRequestsRate(topicMetrics.getTotalFetchRequestsPerSec() / analysisBrokerDTO.getTotalFetchRequests());
|
||||
}
|
||||
|
||||
analysisTopicDTO.setTotalProduceRequests(topicMetrics.getTotalProduceRequestsPerSec());
|
||||
if (analysisBrokerDTO.getTotalProduceRequests() <= 0) {
|
||||
analysisTopicDTO.setTotalProduceRequestsRate(0.0);
|
||||
} else {
|
||||
analysisTopicDTO.setTotalProduceRequestsRate(topicMetrics.getTotalProduceRequestsPerSec() / analysisBrokerDTO.getTotalProduceRequests());
|
||||
}
|
||||
topicNameSet.add(topicMetrics.getTopicName());
|
||||
analysisBrokerDTO.getTopicAnalysisVOList().add(analysisTopicDTO);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,133 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service.impl;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.bizenum.DBStatusEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.BrokerBasicDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.BrokerOverviewDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.BrokerOverallDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.BrokerDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.BrokerMetadata;
|
||||
import com.xiaojukeji.kafka.manager.dao.BrokerMetricsDao;
|
||||
import com.xiaojukeji.kafka.manager.dao.BrokerDao;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ClusterMetadataManager;
|
||||
import com.xiaojukeji.kafka.manager.service.service.BrokerService;
|
||||
import com.xiaojukeji.kafka.manager.service.service.JmxService;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* @author tukun, zengqiao
|
||||
* @date 2015/11/9
|
||||
*/
|
||||
@Service("brokerService")
|
||||
public class BrokerServiceImpl implements BrokerService {
|
||||
private final static Logger logger = LoggerFactory.getLogger(BrokerServiceImpl.class);
|
||||
|
||||
@Autowired
|
||||
private BrokerMetricsDao brokerMetricDao;
|
||||
|
||||
@Autowired
|
||||
private BrokerDao brokerDao;
|
||||
|
||||
@Autowired
|
||||
private JmxService jmxService;
|
||||
|
||||
@Override
|
||||
public List<BrokerOverviewDTO> getBrokerOverviewList(Long clusterId, List<String> specifiedFieldList, boolean simple) {
|
||||
Map<Integer, BrokerMetrics> brokerMap = getSpecifiedBrokerMetrics(clusterId, specifiedFieldList, simple);
|
||||
|
||||
List<BrokerOverviewDTO> brokerOverviewDTOList = new ArrayList<>();
|
||||
for (Integer brokerId: ClusterMetadataManager.getBrokerIdList(clusterId)) {
|
||||
BrokerMetadata brokerMetadata = ClusterMetadataManager.getBrokerMetadata(clusterId, brokerId);
|
||||
if (brokerMetadata == null) {
|
||||
continue;
|
||||
}
|
||||
brokerOverviewDTOList.add(BrokerOverviewDTO.newInstance(brokerMetadata, brokerMap.get(brokerId)));
|
||||
}
|
||||
|
||||
List<BrokerDO> deadBrokerList = brokerDao.getDead(clusterId);
|
||||
if (deadBrokerList == null) {
|
||||
return brokerOverviewDTOList;
|
||||
}
|
||||
for (BrokerDO brokerDO: deadBrokerList) {
|
||||
BrokerOverviewDTO brokerOverviewDTO = new BrokerOverviewDTO();
|
||||
brokerOverviewDTO.setBrokerId(brokerDO.getBrokerId());
|
||||
brokerOverviewDTO.setHost(brokerDO.getHost());
|
||||
brokerOverviewDTO.setPort(brokerDO.getPort());
|
||||
brokerOverviewDTO.setJmxPort(-1);
|
||||
brokerOverviewDTO.setStartTime(brokerDO.getGmtModify().getTime());
|
||||
brokerOverviewDTO.setStatus(DBStatusEnum.DELETED.getStatus());
|
||||
brokerOverviewDTOList.add(brokerOverviewDTO);
|
||||
}
|
||||
return brokerOverviewDTOList;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<BrokerOverallDTO> getBrokerOverallList(Long clusterId, List<String> specifiedFieldList) {
|
||||
Map<Integer, BrokerMetrics> brokerMap = getSpecifiedBrokerMetrics(clusterId, specifiedFieldList, true);
|
||||
|
||||
List<BrokerOverallDTO> brokerOverallDTOList = new ArrayList<>();
|
||||
for (Integer brokerId: ClusterMetadataManager.getBrokerIdList(clusterId)) {
|
||||
BrokerMetadata brokerMetadata = ClusterMetadataManager.getBrokerMetadata(clusterId, brokerId);
|
||||
if (brokerMetadata == null) {
|
||||
continue;
|
||||
}
|
||||
brokerOverallDTOList.add(BrokerOverallDTO.newInstance(brokerMetadata, brokerMap.get(brokerId)));
|
||||
}
|
||||
return brokerOverallDTOList;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<Integer, BrokerMetrics> getSpecifiedBrokerMetrics(Long clusterId, List<String> specifiedFieldList, boolean simple) {
|
||||
Map<Integer, BrokerMetrics> brokerMap = new HashMap<>();
|
||||
for (Integer brokerId: ClusterMetadataManager.getBrokerIdList(clusterId)) {
|
||||
BrokerMetrics brokerMetrics = getSpecifiedBrokerMetrics(clusterId, brokerId, specifiedFieldList, simple);
|
||||
if (brokerMetrics == null) {
|
||||
continue;
|
||||
}
|
||||
brokerMap.put(brokerId, brokerMetrics);
|
||||
}
|
||||
return brokerMap;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BrokerMetrics getSpecifiedBrokerMetrics(Long clusterId, Integer brokerId, List<String> specifiedFieldList, Boolean simple) {
|
||||
if (clusterId == null || brokerId == null || specifiedFieldList == null) {
|
||||
return null;
|
||||
}
|
||||
BrokerMetrics brokerMetrics = jmxService.getSpecifiedBrokerMetricsFromJmx(clusterId, brokerId, specifiedFieldList, simple);
|
||||
if (brokerMetrics == null) {
|
||||
return brokerMetrics;
|
||||
}
|
||||
brokerMetrics.setClusterId(clusterId);
|
||||
brokerMetrics.setBrokerId(brokerId);
|
||||
return brokerMetrics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<BrokerMetrics> getBrokerMetricsByInterval(Long clusterId, Integer brokerId, Date startTime, Date endTime) {
|
||||
return brokerMetricDao.getBrokerMetricsByInterval(clusterId, brokerId, startTime, endTime);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BrokerBasicDTO getBrokerBasicDTO(Long clusterId, Integer brokerId, List<String> specifiedFieldList) {
|
||||
BrokerMetadata brokerMetadata = ClusterMetadataManager.getBrokerMetadata(clusterId, brokerId);
|
||||
if (brokerMetadata == null) {
|
||||
return null;
|
||||
}
|
||||
BrokerMetrics brokerMetrics = jmxService.getSpecifiedBrokerMetricsFromJmx(clusterId, brokerId, specifiedFieldList, true);
|
||||
BrokerBasicDTO brokerBasicDTO = new BrokerBasicDTO();
|
||||
brokerBasicDTO.setHost(brokerMetadata.getHost());
|
||||
brokerBasicDTO.setPort(brokerMetadata.getPort());
|
||||
brokerBasicDTO.setJmxPort(brokerMetadata.getJmxPort());
|
||||
brokerBasicDTO.setStartTime(brokerMetadata.getTimestamp());
|
||||
brokerBasicDTO.setPartitionCount(brokerMetrics.getPartitionCount());
|
||||
brokerBasicDTO.setLeaderCount(brokerMetrics.getLeaderCount());
|
||||
brokerBasicDTO.setTopicNum(ClusterMetadataManager.getTopicNameList(clusterId).size());
|
||||
return brokerBasicDTO;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,139 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service.impl;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.StatusCode;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterMetricsDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ControllerDO;
|
||||
import com.xiaojukeji.kafka.manager.dao.ClusterDao;
|
||||
import com.xiaojukeji.kafka.manager.dao.ClusterMetricsDao;
|
||||
import com.xiaojukeji.kafka.manager.dao.ControllerDao;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ClusterMetadataManager;
|
||||
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
|
||||
import org.apache.zookeeper.ZooKeeper;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* ClusterServiceImpl
|
||||
* @author zengqiao
|
||||
* @date 19/4/3
|
||||
*/
|
||||
@Service("clusterService")
|
||||
public class ClusterServiceImpl implements ClusterService {
|
||||
private final static Logger logger = LoggerFactory.getLogger(ClusterServiceImpl.class);
|
||||
|
||||
@Autowired
|
||||
private ClusterDao clusterDao;
|
||||
|
||||
@Autowired
|
||||
private ClusterMetricsDao clusterMetricsDao;
|
||||
|
||||
@Autowired
|
||||
private ClusterMetadataManager clusterMetadataManager;
|
||||
|
||||
@Autowired
|
||||
private ControllerDao controllerDao;
|
||||
|
||||
@Override
|
||||
public Result addNewCluster(ClusterDO clusterDO, String operator) {
|
||||
if (clusterDO == null) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "param illegal");
|
||||
}
|
||||
if (!checkZookeeper(clusterDO.getZookeeper())) {
|
||||
logger.error("addNewCluster@ClusterServiceImpl, zookeeper address invalid, cluster:{}", clusterDO);
|
||||
return new Result(StatusCode.PARAM_ERROR, "param illegal, zookeeper address illegal");
|
||||
}
|
||||
if (clusterDao.insert(clusterDO) <= 0) {
|
||||
return new Result(StatusCode.MY_SQL_INSERT_ERROR, Constant.KAFKA_MANAGER_INNER_ERROR);
|
||||
}
|
||||
|
||||
boolean status = clusterMetadataManager.addNew(clusterDO);
|
||||
if (!status) {
|
||||
return new Result(StatusCode.OPERATION_ERROR, "add zookeeper watch failed");
|
||||
}
|
||||
|
||||
if (clusterDO.getAlarmFlag() == null || clusterDO.getAlarmFlag() <= 0) {
|
||||
return new Result();
|
||||
}
|
||||
return new Result();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterDO getById(Long clusterId) {
|
||||
if (clusterId == null || clusterId < 0) {
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
return clusterDao.getById(clusterId);
|
||||
} catch (Exception e) {
|
||||
logger.error("getById@ClusterServiceImpl, select failed, clusterId:{}.", clusterId, e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result updateCluster(ClusterDO clusterDO, boolean reload, String operator) {
|
||||
if (clusterDO == null) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "param illegal");
|
||||
}
|
||||
if (!checkZookeeper(clusterDO.getZookeeper())) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "zookeeper address invalid");
|
||||
}
|
||||
try {
|
||||
clusterDao.updateById(clusterDO);
|
||||
} catch (Exception e) {
|
||||
logger.error("update cluster failed, newCluster:{} reload:{}.", clusterDO, reload, e);
|
||||
return new Result(StatusCode.MY_SQL_UPDATE_ERROR, Constant.KAFKA_MANAGER_INNER_ERROR);
|
||||
}
|
||||
if (!reload) {
|
||||
return new Result();
|
||||
}
|
||||
boolean status = clusterMetadataManager.reload(clusterDO);
|
||||
if (!status) {
|
||||
return new Result(StatusCode.OPERATION_ERROR, "reload cache failed");
|
||||
}
|
||||
return new Result();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ClusterDO> listAll() {
|
||||
return clusterDao.listAll();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ClusterMetricsDO> getClusterMetricsByInterval(long clusterId, Date startTime, Date endTime) {
|
||||
return clusterMetricsDao.getClusterMetricsByInterval(clusterId, startTime, endTime);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ControllerDO> getKafkaControllerHistory(Long clusterId) {
|
||||
if (clusterId == null) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
return controllerDao.getByClusterId(clusterId);
|
||||
}
|
||||
|
||||
private boolean checkZookeeper(String server) {
|
||||
ZooKeeper zookeeper = null;
|
||||
try {
|
||||
zookeeper = new ZooKeeper(server, 1000, null);
|
||||
} catch (Exception e) {
|
||||
logger.warn("checkZookeeper@ClusterServiceImpl, create ZOOKEEPER instance error.", e);
|
||||
return false;
|
||||
} finally {
|
||||
try {
|
||||
if (zookeeper != null) {
|
||||
zookeeper.close();
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,460 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service.impl;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.OffsetStoreLocation;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.StatusCode;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.PartitionState;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.consumer.ConsumeDetailDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.TopicMetadata;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.consumer.ConsumerDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.consumer.ConsumerGroupDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.PartitionOffsetDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.exception.ConfigException;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.DefaultThreadFactory;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.zk.ZkConfigImpl;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ClusterMetadataManager;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ConsumerMetadataCache;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.KafkaClientCache;
|
||||
import com.xiaojukeji.kafka.manager.service.service.ConsumerService;
|
||||
import com.xiaojukeji.kafka.manager.service.service.TopicService;
|
||||
import com.xiaojukeji.kafka.manager.service.service.ZookeeperService;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.zk.ZkPathUtil;
|
||||
import kafka.admin.AdminClient;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
import scala.collection.JavaConversions;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.FutureTask;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* @author tukun
|
||||
* @date 2015/11/12
|
||||
*/
|
||||
@Service("consumerService")
|
||||
public class ConsumerServiceImpl implements ConsumerService {
|
||||
private final static Logger logger = LoggerFactory.getLogger(ConsumerServiceImpl.class);
|
||||
|
||||
@Autowired
|
||||
private TopicService topicService;
|
||||
|
||||
@Autowired
|
||||
private ZookeeperService zkService;
|
||||
|
||||
private final ExecutorService consumerListThreadPool = Executors.newFixedThreadPool(50, new DefaultThreadFactory("ConsumerPool"));
|
||||
|
||||
@Override
|
||||
public List<ConsumerGroupDTO> getConsumerGroupList(Long clusterId) {
|
||||
List<ConsumerGroupDTO> consumerGroupDTOList = new ArrayList<>();
|
||||
for (OffsetStoreLocation location: OffsetStoreLocation.values()) {
|
||||
Set<String> consumerGroupSet = null;
|
||||
if (OffsetStoreLocation.ZOOKEEPER.equals(location)) {
|
||||
// 获取ZK中的消费组
|
||||
consumerGroupSet = ConsumerMetadataCache.getGroupInZkMap(clusterId);
|
||||
} else if (OffsetStoreLocation.BROKER.equals(location)) {
|
||||
// 获取Broker中的消费组
|
||||
consumerGroupSet = ConsumerMetadataCache.getGroupInBrokerMap(clusterId);
|
||||
}
|
||||
if (consumerGroupSet == null) {
|
||||
continue;
|
||||
}
|
||||
for (String consumerGroup : consumerGroupSet) {
|
||||
consumerGroupDTOList.add(new ConsumerGroupDTO(clusterId, consumerGroup, location)); }
|
||||
}
|
||||
return consumerGroupDTOList;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ConsumerGroupDTO> getConsumerGroupList(Long clusterId, String topicName) {
|
||||
List<ConsumerGroupDTO> consumerGroupDTOList = new ArrayList<>();
|
||||
|
||||
for (OffsetStoreLocation location: OffsetStoreLocation.values()) {
|
||||
Set<String> consumerGroupSet = null;
|
||||
if (OffsetStoreLocation.ZOOKEEPER.equals(location)) {
|
||||
// 获取ZK中的消费组
|
||||
consumerGroupSet = ConsumerMetadataCache.getTopicConsumerGroupInZk(clusterId, topicName);
|
||||
} else if (OffsetStoreLocation.BROKER.equals(location)) {
|
||||
// 获取Broker中的消费组
|
||||
consumerGroupSet = ConsumerMetadataCache.getTopicConsumerGroupInBroker(clusterId, topicName);
|
||||
}
|
||||
if (consumerGroupSet == null) {
|
||||
continue;
|
||||
}
|
||||
for (String consumerGroup : consumerGroupSet) {
|
||||
consumerGroupDTOList.add(new ConsumerGroupDTO(clusterId, consumerGroup, location));
|
||||
}
|
||||
}
|
||||
return consumerGroupDTOList;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ConsumeDetailDTO> getConsumeDetail(ClusterDO clusterDO, String topicName, ConsumerGroupDTO consumeGroupDTO) {
|
||||
TopicMetadata topicMetadata = ClusterMetadataManager.getTopicMetaData(clusterDO.getId(), topicName);
|
||||
if (topicMetadata == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
List<ConsumeDetailDTO> consumerGroupDetailDTOList = null;
|
||||
if (OffsetStoreLocation.ZOOKEEPER.equals(consumeGroupDTO.getOffsetStoreLocation())) {
|
||||
consumerGroupDetailDTOList = getConsumerPartitionStateInZK(clusterDO, topicMetadata, consumeGroupDTO);
|
||||
} else if (OffsetStoreLocation.BROKER.equals(consumeGroupDTO.getOffsetStoreLocation())){
|
||||
consumerGroupDetailDTOList = getConsumerPartitionStateInBroker(clusterDO, topicMetadata, consumeGroupDTO);
|
||||
}
|
||||
if (consumerGroupDetailDTOList == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Map<TopicPartition, Long> topicPartitionLongMap = topicService.getTopicPartitionOffset(clusterDO, topicName);
|
||||
if (topicPartitionLongMap == null) {
|
||||
return consumerGroupDetailDTOList;
|
||||
}
|
||||
for (ConsumeDetailDTO consumerGroupDetailDTO : consumerGroupDetailDTOList) {
|
||||
consumerGroupDetailDTO.setOffset(topicPartitionLongMap.get(new TopicPartition(topicName, consumerGroupDetailDTO.getPartitionId())));
|
||||
}
|
||||
return consumerGroupDetailDTOList;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getConsumerGroupConsumedTopicList(ClusterDO cluster, ConsumerGroupDTO consumerGroupDTO) {
|
||||
if (cluster == null || consumerGroupDTO == null) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
return ConsumerMetadataCache.getConsumerGroupConsumedTopicList(cluster.getId(),consumerGroupDTO.getOffsetStoreLocation().getLocation(), consumerGroupDTO.getConsumerGroup());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ConsumerDTO> getMonitoredConsumerList(final ClusterDO clusterDO,
|
||||
final Map<String, List<PartitionState>> partitionStateListMap) {
|
||||
List<ConsumerGroupDTO> consumerGroupDTOList = getConsumerGroupList(clusterDO.getId());
|
||||
if (consumerGroupDTOList == null || consumerGroupDTOList.isEmpty()) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
|
||||
FutureTask<ConsumerDTO>[] taskList = new FutureTask[consumerGroupDTOList.size()];
|
||||
for (int i = 0; i < consumerGroupDTOList.size(); i++) {
|
||||
final ConsumerGroupDTO consumerGroupDTO = consumerGroupDTOList.get(i);
|
||||
taskList[i] = new FutureTask<>(new Callable<ConsumerDTO>() {
|
||||
@Override
|
||||
public ConsumerDTO call() throws Exception {
|
||||
try {
|
||||
return getMonitoredConsumer(clusterDO, consumerGroupDTO, partitionStateListMap);
|
||||
} catch (Exception e) {
|
||||
logger.error("get monitored consumer error, group:{}", consumerGroupDTO.getConsumerGroup(), e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
});
|
||||
consumerListThreadPool.submit(taskList[i]);
|
||||
}
|
||||
|
||||
List<ConsumerDTO> consumerList = new ArrayList<>();
|
||||
for (FutureTask<ConsumerDTO> task : taskList) {
|
||||
ConsumerDTO consumer = null;
|
||||
try {
|
||||
consumer = task.get();
|
||||
} catch (Exception e) {
|
||||
logger.error("getMonitoredConsumerList@ConsumeServiceImpl, ", e);
|
||||
}
|
||||
if (consumer == null) {
|
||||
continue;
|
||||
}
|
||||
consumerList.add(consumer);
|
||||
}
|
||||
return consumerList;
|
||||
}
|
||||
|
||||
private ConsumerDTO getMonitoredConsumer(ClusterDO cluster, ConsumerGroupDTO consumerGroupDTO, Map<String, List<PartitionState>> globalTopicNamePartitionStateListMap) {
|
||||
// 获取当前consumerGroup下的所有的topic的partitionState信息
|
||||
Map<String, List<PartitionState>> topicNamePartitionStateListMap = getConsumerGroupPartitionStateList(cluster, consumerGroupDTO, globalTopicNamePartitionStateListMap);
|
||||
|
||||
//将没有对应consumer的partition信息统一放到一个consumer中
|
||||
ConsumerDTO consumerDTO = new ConsumerDTO();
|
||||
consumerDTO.setConsumerGroup(consumerGroupDTO.getConsumerGroup());
|
||||
consumerDTO.setLocation(consumerGroupDTO.getOffsetStoreLocation().name());
|
||||
consumerDTO.setTopicPartitionMap(topicNamePartitionStateListMap);
|
||||
return consumerDTO;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Result> resetConsumerOffset(ClusterDO clusterDO, String topicName, ConsumerGroupDTO consumerGroupDTO, List<PartitionOffsetDTO> partitionOffsetDTOList) {
|
||||
Map<TopicPartition, Long> offsetMap = partitionOffsetDTOList.stream().collect(Collectors.toMap(elem -> {return new TopicPartition(topicName, elem.getPartitionId());}, PartitionOffsetDTO::getOffset));
|
||||
List<Result> resultList = new ArrayList<>();
|
||||
|
||||
// 创建KafkaConsumer, 修正offset值
|
||||
KafkaConsumer<String, String> kafkaConsumer = null;
|
||||
try {
|
||||
Properties properties = KafkaClientCache.createProperties(clusterDO, false);
|
||||
properties.setProperty("group.id", consumerGroupDTO.getConsumerGroup());
|
||||
kafkaConsumer = new KafkaConsumer<>(properties);
|
||||
checkAndCorrectPartitionOffset(kafkaConsumer, offsetMap);
|
||||
return resetConsumerOffset(clusterDO, kafkaConsumer, consumerGroupDTO, offsetMap);
|
||||
} catch (Exception e) {
|
||||
logger.error("resetConsumerOffset@ConsumeServiceImpl, create kafka consumer failed, clusterId:{} topicName:{} consumerGroup:{} partition:{}.", clusterDO.getId(), topicName, consumerGroupDTO, partitionOffsetDTOList, e);
|
||||
resultList.add(new Result(StatusCode.OPERATION_ERROR, "reset failed, create KafkaConsumer or check offset failed"));
|
||||
} finally {
|
||||
if (kafkaConsumer != null) {
|
||||
kafkaConsumer.close();
|
||||
}
|
||||
}
|
||||
return new ArrayList<>();
|
||||
}
|
||||
|
||||
private List<Result> resetConsumerOffset(ClusterDO cluster, KafkaConsumer<String, String> kafkaConsumer, ConsumerGroupDTO consumerGroupDTO, Map<TopicPartition, Long> offsetMap) {
|
||||
List<Result> resultList = new ArrayList<>();
|
||||
|
||||
for(Map.Entry<TopicPartition, Long> entry: offsetMap.entrySet()){
|
||||
TopicPartition tp = entry.getKey();
|
||||
Long offset = entry.getValue();
|
||||
try {
|
||||
if (consumerGroupDTO.getOffsetStoreLocation().equals(OffsetStoreLocation.ZOOKEEPER)) {
|
||||
resetConsumerOffsetInZK(cluster, consumerGroupDTO.getConsumerGroup(), tp, offset);
|
||||
} else if (consumerGroupDTO.getOffsetStoreLocation().equals(OffsetStoreLocation.BROKER)) {
|
||||
resetConsumerOffsetInBroker(kafkaConsumer, tp, offset);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("resetConsumerOffset@ConsumeServiceImpl, reset failed, clusterId:{} consumerGroup:{} topic-partition:{}.", cluster.getId(), consumerGroupDTO, tp, e);
|
||||
resultList.add(new Result());
|
||||
}
|
||||
resultList.add(new Result());
|
||||
}
|
||||
return resultList;
|
||||
}
|
||||
|
||||
private void checkAndCorrectPartitionOffset(KafkaConsumer<String, String> kafkaConsumer, Map<TopicPartition, Long> offsetMap) {
|
||||
List<TopicPartition> topicPartitionList = new ArrayList<>(offsetMap.keySet());
|
||||
Map<TopicPartition, Long> beginningOffsets = kafkaConsumer.beginningOffsets(topicPartitionList);
|
||||
Map<TopicPartition, Long> endOffsets = kafkaConsumer.endOffsets(topicPartitionList);
|
||||
for (TopicPartition tp: topicPartitionList) {
|
||||
Long offset = offsetMap.get(tp);
|
||||
Long earliestOffset = beginningOffsets.get(tp);
|
||||
Long largestOffset = endOffsets.get(tp);
|
||||
if (earliestOffset != null && offset < earliestOffset) {
|
||||
offsetMap.put(tp, earliestOffset);
|
||||
} else if (largestOffset != null && largestOffset < offset) {
|
||||
offsetMap.put(tp, largestOffset);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void resetConsumerOffsetInZK(ClusterDO cluster,
|
||||
String consumerGroup,
|
||||
TopicPartition topicPartition,
|
||||
Long offset) throws Exception {
|
||||
ZkConfigImpl zkConfig = ClusterMetadataManager.getZKConfig(cluster.getId());
|
||||
String offsetPath = ZkPathUtil.getConsumerGroupOffsetTopicPartitionNode(consumerGroup, topicPartition.topic(), topicPartition.partition());
|
||||
zkConfig.setNodeStat(offsetPath, offset.toString());
|
||||
}
|
||||
|
||||
private void resetConsumerOffsetInBroker(KafkaConsumer kafkaConsumer,
|
||||
TopicPartition topicPartition,
|
||||
Long offset) throws Exception {
|
||||
kafkaConsumer.assign(Arrays.asList(topicPartition));
|
||||
kafkaConsumer.seek(topicPartition, offset);
|
||||
kafkaConsumer.commitSync();
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取属于该集群和consumerGroup下的所有topic的信息
|
||||
*/
|
||||
private Map<String, List<PartitionState>> getConsumerGroupPartitionStateList(ClusterDO clusterDO,
|
||||
ConsumerGroupDTO consumerGroupDTO,
|
||||
Map<String, List<PartitionState>> globalTopicNamePartitionStateListMap) {
|
||||
Map<String, List<PartitionState>> topicNamePartitionStateListMap = new HashMap<>(2);
|
||||
|
||||
List<String> topicNameList = ConsumerMetadataCache.getConsumerGroupConsumedTopicList(clusterDO.getId(),consumerGroupDTO.getOffsetStoreLocation().getLocation(), consumerGroupDTO.getConsumerGroup());
|
||||
for (String topicName : topicNameList) {
|
||||
if (!ClusterMetadataManager.isTopicExist(clusterDO.getId(), topicName)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
List<PartitionState> partitionStateList = globalTopicNamePartitionStateListMap.get(topicName);
|
||||
if (partitionStateList == null) {
|
||||
try {
|
||||
partitionStateList = zkService.getTopicPartitionState(clusterDO.getId(), topicName);
|
||||
} catch (Exception e) {
|
||||
logger.error("get topic partition state failed, clusterId:{} topicName:{}.", clusterDO.getId(), topicName, e);
|
||||
}
|
||||
if (partitionStateList == null) {
|
||||
continue;
|
||||
}
|
||||
globalTopicNamePartitionStateListMap.put(topicName, partitionStateList);
|
||||
}
|
||||
List<PartitionState> consumerGroupPartitionStateList = new ArrayList<>();
|
||||
for (PartitionState partitionState: partitionStateList) {
|
||||
consumerGroupPartitionStateList.add((PartitionState) partitionState.clone());
|
||||
}
|
||||
|
||||
if (consumerGroupDTO.getOffsetStoreLocation().equals(OffsetStoreLocation.ZOOKEEPER)) {
|
||||
updateTopicConsumerOffsetInZK(clusterDO, topicName, consumerGroupDTO, consumerGroupPartitionStateList);
|
||||
} else if (consumerGroupDTO.getOffsetStoreLocation().equals(OffsetStoreLocation.BROKER)) {
|
||||
updateTopicConsumerOffsetInBroker(clusterDO, topicName, consumerGroupDTO, consumerGroupPartitionStateList);
|
||||
}
|
||||
topicNamePartitionStateListMap.put(topicName, consumerGroupPartitionStateList);
|
||||
}
|
||||
return topicNamePartitionStateListMap;
|
||||
}
|
||||
|
||||
private void updateTopicConsumerOffsetInZK(ClusterDO cluster, String topicName, ConsumerGroupDTO consumerGroupDTO, List<PartitionState> partitionStateList) {
|
||||
ZkConfigImpl zkConfig = ClusterMetadataManager.getZKConfig(cluster.getId());
|
||||
for (PartitionState partitionState : partitionStateList) {
|
||||
//offset存储于zk中
|
||||
String consumerGroupOffsetLocation = ZkPathUtil.getConsumerGroupOffsetTopicPartitionNode(consumerGroupDTO.getConsumerGroup(), topicName, partitionState.getPartitionId());
|
||||
String offset = null;
|
||||
try {
|
||||
Stat stat = zkConfig.getNodeStat(consumerGroupOffsetLocation);
|
||||
if (stat == null) {
|
||||
continue;
|
||||
}
|
||||
offset = zkConfig.get(consumerGroupOffsetLocation);
|
||||
} catch (ConfigException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
String consumerId = null;
|
||||
try {
|
||||
consumerId = zkConfig.get(ZkPathUtil.getConsumerGroupOwnersTopicPartitionNode(consumerGroupDTO.getConsumerGroup(), topicName, partitionState.getPartitionId()));
|
||||
} catch (ConfigException e) {
|
||||
// logger.error("get consumerId error in updateTopicConsumerOffsetInZK cluster:{} topic:{} consumerGroup:{}", cluster.getClusterName(), topicName, consumerGroupDTO.getConsumerGroup());
|
||||
}
|
||||
partitionState.setConsumerGroup(consumerGroupDTO.getConsumerGroup());
|
||||
updatePartitionStateOffset(partitionState, offset, consumerId);
|
||||
}
|
||||
}
|
||||
|
||||
private void updateTopicConsumerOffsetInBroker(ClusterDO cluster, String topicName, ConsumerGroupDTO consumerGroupDTO, List<PartitionState> partitionStateList) {
|
||||
Map<Integer, String> offsetsFromBroker = getOffsetByGroupAndTopicFromBroker(cluster, consumerGroupDTO.getConsumerGroup(), topicName);
|
||||
if (offsetsFromBroker == null || offsetsFromBroker.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (PartitionState partitionState : partitionStateList) {
|
||||
int partitionId = partitionState.getPartitionId();
|
||||
updatePartitionStateOffset(partitionState, offsetsFromBroker.get(partitionId), null);
|
||||
}
|
||||
}
|
||||
|
||||
private void updatePartitionStateOffset(PartitionState partitionState, String offset, String consumerId) {
|
||||
partitionState.setConsumeOffset(0);
|
||||
if (!StringUtils.isEmpty(offset)) {
|
||||
partitionState.setConsumeOffset(Long.parseLong(offset));
|
||||
}
|
||||
partitionState.setConsumerGroup(consumerId);
|
||||
}
|
||||
|
||||
private Map<Integer, String> getConsumeIdMap(Long clusterId, String topicName, String consumerGroup) {
|
||||
AdminClient.ConsumerGroupSummary consumerGroupSummary = ConsumerMetadataCache.getConsumerGroupSummary(clusterId, consumerGroup);
|
||||
if (consumerGroupSummary == null) {
|
||||
return new HashMap<>();
|
||||
}
|
||||
Map<Integer, String> consumerIdMap = new HashMap<>();
|
||||
for (scala.collection.immutable.List<AdminClient.ConsumerSummary> scalaSubConsumerSummaryList: JavaConversions.asJavaList(consumerGroupSummary.consumers().toList())) {
|
||||
List<AdminClient.ConsumerSummary> subConsumerSummaryList = JavaConversions.asJavaList(scalaSubConsumerSummaryList);
|
||||
for (AdminClient.ConsumerSummary consumerSummary: subConsumerSummaryList) {
|
||||
for (TopicPartition tp: JavaConversions.asJavaList(consumerSummary.assignment())) {
|
||||
if (!tp.topic().equals(topicName)) {
|
||||
continue;
|
||||
}
|
||||
consumerIdMap.put(tp.partition(), consumerSummary.host().substring(1, consumerSummary.host().length()) + ":" + consumerSummary.consumerId());
|
||||
}
|
||||
}
|
||||
}
|
||||
return consumerIdMap;
|
||||
}
|
||||
|
||||
private List<ConsumeDetailDTO> getConsumerPartitionStateInBroker(ClusterDO clusterDO, TopicMetadata topicMetadata, ConsumerGroupDTO consumerGroupDTO) {
|
||||
Map<Integer, String> consumerIdMap = getConsumeIdMap(clusterDO.getId(), topicMetadata.getTopic(), consumerGroupDTO.getConsumerGroup());
|
||||
Map<Integer, String> consumeOffsetMap = getOffsetByGroupAndTopicFromBroker(clusterDO, consumerGroupDTO.getConsumerGroup(), topicMetadata.getTopic());
|
||||
|
||||
List<ConsumeDetailDTO> consumeDetailDTOList = new ArrayList<>();
|
||||
for (int partitionId : topicMetadata.getPartitionMap().getPartitions().keySet()) {
|
||||
ConsumeDetailDTO consumeDetailDTO = new ConsumeDetailDTO();
|
||||
consumeDetailDTO.setPartitionId(partitionId);
|
||||
String consumeOffsetStr = consumeOffsetMap.get(partitionId);
|
||||
try {
|
||||
consumeDetailDTO.setConsumeOffset(StringUtils.isEmpty(consumeOffsetStr)? null: Long.valueOf(consumeOffsetStr));
|
||||
} catch (Exception e) {
|
||||
logger.error("getConsumerPartitionStateInBroker@ConsumerServiceImpl, illegal consumer offset, clusterId:{} topicName:{} consumerGroup:{} offset:{}.", clusterDO.getId(), topicMetadata.getTopic(), consumerGroupDTO.getConsumerGroup(), consumeOffsetStr, e);
|
||||
}
|
||||
consumeDetailDTO.setConsumerId(consumerIdMap.get(partitionId));
|
||||
consumeDetailDTOList.add(consumeDetailDTO);
|
||||
}
|
||||
return consumeDetailDTOList;
|
||||
}
|
||||
|
||||
private List<ConsumeDetailDTO> getConsumerPartitionStateInZK(ClusterDO clusterDO,
|
||||
TopicMetadata topicMetadata,
|
||||
ConsumerGroupDTO consumerGroupDTO) {
|
||||
ZkConfigImpl zkConfig = ClusterMetadataManager.getZKConfig(clusterDO.getId());
|
||||
|
||||
List<ConsumeDetailDTO> consumeDetailDTOList = new ArrayList<>();
|
||||
for (Integer partitionId : topicMetadata.getPartitionMap().getPartitions().keySet()) {
|
||||
String consumeGroupPath = ZkPathUtil.getConsumerGroupOffsetTopicPartitionNode(consumerGroupDTO.getConsumerGroup(), topicMetadata.getTopic(), partitionId);
|
||||
String consumeOffset = null;
|
||||
try {
|
||||
consumeOffset = zkConfig.get(consumeGroupPath);
|
||||
} catch (ConfigException e) {
|
||||
logger.error("get consumeOffset error for zk path:{}", consumeGroupPath, e);
|
||||
}
|
||||
String consumeIdZkPath = ZkPathUtil.getConsumerGroupOwnersTopicPartitionNode(consumerGroupDTO.getConsumerGroup(), topicMetadata.getTopic(), partitionId);
|
||||
String consumerId = null;
|
||||
try {
|
||||
consumerId = zkConfig.get(consumeIdZkPath);
|
||||
} catch (ConfigException e) {
|
||||
// logger.error("get consumerId error for zk path:{}", consumeIdZkPath, e);
|
||||
}
|
||||
|
||||
ConsumeDetailDTO consumeDetailDTO = new ConsumeDetailDTO();
|
||||
consumeDetailDTO.setPartitionId(partitionId);
|
||||
consumeDetailDTO.setConsumerId(consumerId);
|
||||
consumeDetailDTO.setPartitionId(partitionId);
|
||||
if (!StringUtils.isEmpty(consumeOffset)) {
|
||||
consumeDetailDTO.setConsumeOffset(Long.valueOf(consumeOffset));
|
||||
}
|
||||
consumeDetailDTOList.add(consumeDetailDTO);
|
||||
}
|
||||
return consumeDetailDTOList;
|
||||
}
|
||||
|
||||
/**
|
||||
* 根据group,topic获取broker中的group中的各个消费者的offset
|
||||
*/
|
||||
private Map<Integer, String> getOffsetByGroupAndTopicFromBroker(ClusterDO clusterDO,
|
||||
String consumerGroup,
|
||||
String topicName) {
|
||||
Map<Integer, String> result = new HashMap<>();
|
||||
AdminClient client = KafkaClientCache.getAdminClient(clusterDO.getId());
|
||||
if (null == client) {
|
||||
return result;
|
||||
}
|
||||
Map<TopicPartition, Object> offsetMap = JavaConversions.asJavaMap(client.listGroupOffsets(consumerGroup));
|
||||
for (Map.Entry<TopicPartition, Object> entry : offsetMap.entrySet()) {
|
||||
TopicPartition topicPartition = entry.getKey();
|
||||
if (topicPartition.topic().equals(topicName)) {
|
||||
result.put(topicPartition.partition(), entry.getValue().toString());
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<Long, Integer> getConsumerGroupNumMap(List<ClusterDO> clusterDOList) {
|
||||
Map<Long, Integer> consumerGroupNumMap = new HashMap<>();
|
||||
for (ClusterDO clusterDO: clusterDOList) {
|
||||
Integer zkConsumerGroupNum = ConsumerMetadataCache.getGroupInZkMap(clusterDO.getId()).size();
|
||||
Integer brokerConsumerGroupNum = ConsumerMetadataCache.getGroupInBrokerMap(clusterDO.getId()).size();
|
||||
consumerGroupNumMap.put(clusterDO.getId(), zkConsumerGroupNum + brokerConsumerGroupNum);
|
||||
}
|
||||
return consumerGroupNumMap;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,209 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service.impl;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.TopicMetadata;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ClusterMetadataManager;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConnectorWrap;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.jmx.Mbean;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.jmx.MbeanNameUtil;
|
||||
import com.xiaojukeji.kafka.manager.service.service.BrokerService;
|
||||
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
|
||||
import com.xiaojukeji.kafka.manager.service.service.JmxService;
|
||||
import com.xiaojukeji.kafka.manager.service.utils.ObjectUtil;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.BeanUtils;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import javax.management.*;
|
||||
import javax.management.remote.JMXConnector;
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* @author tukun, zengqiao
|
||||
* @date 2015/11/11.
|
||||
*/
|
||||
@Service("jmxService")
|
||||
public class JmxServiceImpl implements JmxService {
|
||||
private final static Logger logger = LoggerFactory.getLogger(JmxServiceImpl.class);
|
||||
|
||||
@Autowired
|
||||
BrokerService brokerService;
|
||||
|
||||
@Autowired
|
||||
ClusterService clusterService;
|
||||
|
||||
@Override
|
||||
public BrokerMetrics getSpecifiedBrokerMetricsFromJmx(Long clusterId,
|
||||
Integer brokerId,
|
||||
List<String> specifiedFieldList,
|
||||
Boolean simple) {
|
||||
if (clusterId == null || brokerId == null || specifiedFieldList == null) {
|
||||
throw new IllegalArgumentException("param illegal");
|
||||
}
|
||||
|
||||
MBeanServerConnection connection = getMBeanServerConnection(clusterId, brokerId);
|
||||
if (connection == null) {
|
||||
logger.warn("getSpecifiedBrokerMetricsFromJmx@JmxServiceImpl, get jmx connector failed, clusterId:{} brokerId:{}.", clusterId, brokerId);
|
||||
return null;
|
||||
}
|
||||
|
||||
BrokerMetrics brokerMetrics = null;
|
||||
for (String fieldName : specifiedFieldList) {
|
||||
String property = fieldName.substring(0, 1).toUpperCase() + fieldName.substring(1);
|
||||
|
||||
Mbean mbean = MbeanNameUtil.getMbean(property, null);
|
||||
if (mbean == null || StringUtils.isEmpty(mbean.getObjectName())) {
|
||||
// 找不到objectName,则跳过
|
||||
continue;
|
||||
}
|
||||
|
||||
// 依据不同的field, 获取不同的属性信息
|
||||
String[] properties = new String[]{mbean.getProperty()};
|
||||
if (!simple && "OneMinuteRate".equals(mbean.getProperty())) {
|
||||
properties = new String[]{"MeanRate", "OneMinuteRate", "FiveMinuteRate", "FifteenMinuteRate"};
|
||||
}
|
||||
|
||||
List<Attribute> attributeValueList = null;
|
||||
try {
|
||||
attributeValueList = connection.getAttributes(new ObjectName(mbean.getObjectName()), properties).asList();
|
||||
} catch (Exception e) {
|
||||
logger.error("getSpecifiedBrokerMetricsFromJmx@JmxServiceImpl, get metrics fail, objectName:{}.", mbean.getObjectName(), e);
|
||||
continue;
|
||||
}
|
||||
if (attributeValueList == null || attributeValueList.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// 使用反射的方法调用setProperty方法给对象赋值
|
||||
for (int i = 0; i < properties.length; i++) {
|
||||
String propertyName = property;
|
||||
if ("MeanRate".equals(properties[i]) || "FiveMinuteRate".equals(properties[i]) || "FifteenMinuteRate".equals(properties[i])) {
|
||||
propertyName = property + properties[i];
|
||||
}
|
||||
|
||||
Method method = BeanUtils.findMethod(BrokerMetrics.class, setProperty(propertyName), mbean.getPropertyClass());
|
||||
if (method == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (brokerMetrics == null) {
|
||||
brokerMetrics = new BrokerMetrics();
|
||||
brokerMetrics.setClusterId(clusterId);
|
||||
brokerMetrics.setBrokerId(brokerId);
|
||||
}
|
||||
try {
|
||||
method.invoke(brokerMetrics, attributeValueList.get(i).getValue());
|
||||
} catch (Exception e) {
|
||||
logger.error("getSpecifiedBrokerMetricsFromJmx@JmxServiceImpl, call method failed, methodName:{}.", setProperty(propertyName), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
return brokerMetrics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TopicMetrics getSpecifiedTopicMetricsFromJmx(Long clusterId,
|
||||
String topicName,
|
||||
List<String> specifiedFieldList,
|
||||
Boolean simple) {
|
||||
return getSpecifiedBrokerTopicMetricsFromJmx(clusterId, -1, topicName, specifiedFieldList, simple);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TopicMetrics getSpecifiedBrokerTopicMetricsFromJmx(Long clusterId,
|
||||
Integer filteredBrokerId,
|
||||
String topicName,
|
||||
List<String> specifiedFieldList,
|
||||
Boolean simple) {
|
||||
TopicMetadata topicMetadata = ClusterMetadataManager.getTopicMetaData(clusterId, topicName);
|
||||
if (topicMetadata == null) {
|
||||
throw new IllegalArgumentException("param illegal");
|
||||
}
|
||||
String[] properties = null;
|
||||
if (simple) {
|
||||
properties = new String[]{"OneMinuteRate"};
|
||||
} else {
|
||||
properties = new String[]{"MeanRate", "OneMinuteRate", "FiveMinuteRate", "FifteenMinuteRate"};
|
||||
}
|
||||
|
||||
TopicMetrics topicMetrics = new TopicMetrics();
|
||||
for (Integer brokerId: topicMetadata.getBrokerIdSet()) {
|
||||
try {
|
||||
if (filteredBrokerId != null && filteredBrokerId >= 0 && !brokerId.equals(filteredBrokerId)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
MBeanServerConnection connection = getMBeanServerConnection(clusterId, brokerId);
|
||||
if(connection == null){
|
||||
continue;
|
||||
}
|
||||
TopicMetrics newTopicMetrics = new TopicMetrics();
|
||||
for (String fieldName : specifiedFieldList) {
|
||||
if (!fieldName.endsWith("PerSec")) {
|
||||
continue;
|
||||
}
|
||||
String property = fieldName.substring(0, 1).toUpperCase() + fieldName.substring(1);
|
||||
|
||||
Mbean mbean = MbeanNameUtil.getMbean(property, topicName);
|
||||
if (mbean == null || StringUtils.isEmpty(mbean.getObjectName())) {
|
||||
// 找不到objectName,则跳过
|
||||
continue;
|
||||
}
|
||||
|
||||
// 对于某些存于zk上的Topic可能没有metrics值,所以单独捕获异常
|
||||
List<Attribute> attributeValueList = null;
|
||||
try {
|
||||
attributeValueList = connection.getAttributes(new ObjectName(mbean.getObjectName()), properties).asList();
|
||||
} catch (Exception e) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (int i = 0; i < properties.length; i++) {
|
||||
String propertyName = "OneMinuteRate".equals(properties[i]) ? property: property + properties[i];
|
||||
Method method = BeanUtils.findMethod(TopicMetrics.class, setProperty(propertyName), Double.class);
|
||||
if (method != null) {
|
||||
method.invoke(newTopicMetrics, attributeValueList.get(i).getValue());
|
||||
continue;
|
||||
}
|
||||
logger.warn("method not find, methodName = " + setProperty(propertyName));
|
||||
}
|
||||
}
|
||||
ObjectUtil.add(topicMetrics, newTopicMetrics, "PerSec");
|
||||
} catch (Exception e) {
|
||||
logger.error("getSpecifiedTopicMetrics@JmxServiceImpl, get topic metrics from jmx error.", e);
|
||||
}
|
||||
}
|
||||
topicMetrics.setClusterId(clusterId);
|
||||
topicMetrics.setTopicName(topicName);
|
||||
return topicMetrics;
|
||||
}
|
||||
|
||||
private MBeanServerConnection getMBeanServerConnection(Long clusterId , int brokerId){
|
||||
JmxConnectorWrap jmxConnectorWrap = ClusterMetadataManager.getJmxConnectorWrap(clusterId, brokerId);
|
||||
if (jmxConnectorWrap == null) {
|
||||
// logger.error("get jmxConnector fail, clusterId:{} brokerId:{}.", clusterId, brokerId);
|
||||
return null;
|
||||
}
|
||||
|
||||
JMXConnector jmxConnector = jmxConnectorWrap.getJmxConnector();
|
||||
if(jmxConnector == null){
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
return jmxConnector.getMBeanServerConnection();
|
||||
} catch (IOException e) {
|
||||
logger.error("can not get connection to brokerId:{} which clusterId is:{}",brokerId,clusterId,e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private String setProperty(String property) {
|
||||
return "set" + property;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,144 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service.impl;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.StatusCode;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.bizenum.AccountRoleEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.AccountDO;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.EncryptUtil;
|
||||
import com.xiaojukeji.kafka.manager.dao.AccountDao;
|
||||
import com.xiaojukeji.kafka.manager.service.service.LoginService;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpSession;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
/**
|
||||
* @author huangyiminghappy@163.com
|
||||
* @date 2019-04-26
|
||||
*/
|
||||
@Service("loginService")
|
||||
public class LoginServiceImpl implements LoginService {
|
||||
/**
|
||||
* <userName, <expire timestamp, role>>
|
||||
*/
|
||||
private static final Map<String, HttpSession> SESSION_MAP = new ConcurrentHashMap<>();
|
||||
|
||||
@Autowired
|
||||
private AccountDao accountDao;
|
||||
|
||||
@Override
|
||||
public Result<AccountRoleEnum> login(HttpServletRequest request, String username, String password) {
|
||||
if (StringUtils.isEmpty(username) || StringUtils.isEmpty(password)) {
|
||||
return new Result<>(StatusCode.PARAM_ERROR, "参数错误");
|
||||
}
|
||||
AccountDO accountDO = accountDao.getByName(username);
|
||||
if (accountDO == null) {
|
||||
return new Result<>(StatusCode.PARAM_ERROR, "用户/密码不存在");
|
||||
}
|
||||
|
||||
if (!accountDO.getPassword().equals(EncryptUtil.md5(password))) {
|
||||
return new Result<>(StatusCode.PARAM_ERROR, "用户/密码错误");
|
||||
}
|
||||
AccountRoleEnum userRoleEnum = AccountRoleEnum.getUserRoleEnum(accountDO.getRole());
|
||||
if (userRoleEnum == null) {
|
||||
return new Result<>(StatusCode.OPERATION_ERROR, "用户无权限");
|
||||
}
|
||||
|
||||
HttpSession session = request.getSession(true);
|
||||
session.setMaxInactiveInterval(24 * 60 * 60);
|
||||
session.setAttribute("role", userRoleEnum);
|
||||
session.setAttribute("username", username);
|
||||
SESSION_MAP.put(username, session);
|
||||
return new Result<>(userRoleEnum);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result logoff(HttpServletRequest request, String username) {
|
||||
if (StringUtils.isEmpty(username)) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "param error");
|
||||
}
|
||||
try {
|
||||
request.getSession().invalidate();
|
||||
removeSession(username);
|
||||
} catch (Exception e) {
|
||||
}
|
||||
return new Result();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result addNewAccount(AccountDO accountDO) {
|
||||
AccountDO oldAccountDO = accountDao.getByName(accountDO.getUsername());
|
||||
if (oldAccountDO != null) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "account already exist");
|
||||
}
|
||||
accountDO.setPassword(EncryptUtil.md5(accountDO.getPassword()));
|
||||
accountDO.setStatus(0);
|
||||
int status = accountDao.addNewAccount(accountDO);
|
||||
if (status > 0) {
|
||||
return new Result();
|
||||
}
|
||||
return new Result(StatusCode.MY_SQL_INSERT_ERROR, Constant.KAFKA_MANAGER_INNER_ERROR);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean deleteByName(String userName) {
|
||||
removeSession(userName);
|
||||
return accountDao.deleteByName(userName) > 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result updateAccount(AccountDO accountDO, String oldPassword) {
|
||||
AccountDO oldAccountDO = accountDao.getByName(accountDO.getUsername());
|
||||
if (oldAccountDO == null) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "account not exist");
|
||||
}
|
||||
if (!StringUtils.isEmpty(oldPassword)) {
|
||||
String md5Sum = EncryptUtil.md5(oldPassword);
|
||||
if (oldAccountDO.getPassword().equals(md5Sum)) {
|
||||
accountDO.setPassword(md5Sum);
|
||||
} else {
|
||||
return new Result(StatusCode.OPERATION_ERROR, "old password is wrong");
|
||||
}
|
||||
} else {
|
||||
accountDO.setPassword(StringUtils.isEmpty(accountDO.getPassword())? oldAccountDO.getPassword(): EncryptUtil.md5(accountDO.getPassword()));
|
||||
}
|
||||
accountDO.setRole(accountDO.getRole() == null? oldAccountDO.getRole(): accountDO.getRole());
|
||||
accountDO.setStatus(accountDO.getStatus() == null? oldAccountDO.getStatus(): accountDO.getStatus());
|
||||
int status = accountDao.updateAccount(accountDO);
|
||||
if (status > 0) {
|
||||
removeSession(accountDO.getUsername());
|
||||
return new Result();
|
||||
}
|
||||
return new Result(StatusCode.MY_SQL_UPDATE_ERROR, Constant.KAFKA_MANAGER_INNER_ERROR);
|
||||
}
|
||||
|
||||
private void removeSession(String username) {
|
||||
if (StringUtils.isEmpty(username)) {
|
||||
return;
|
||||
}
|
||||
HttpSession httpSession = SESSION_MAP.get(username);
|
||||
SESSION_MAP.remove(username);
|
||||
if (httpSession != null) {
|
||||
httpSession.invalidate();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<AccountDO> list() {
|
||||
return accountDao.list();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLogin(String userName) {
|
||||
if (!StringUtils.isEmpty(userName) && SESSION_MAP.containsKey(userName)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,314 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service.impl;
|
||||
|
||||
import com.alibaba.fastjson.JSONObject;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.StatusCode;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.bizenum.ReassignmentStatusEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.TopicMetadata;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.MigrationTaskDO;
|
||||
import com.xiaojukeji.kafka.manager.dao.MigrationTaskDao;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ClusterMetadataManager;
|
||||
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
|
||||
import com.xiaojukeji.kafka.manager.service.service.MigrationService;
|
||||
import com.xiaojukeji.kafka.manager.service.utils.SpringContextHolder;
|
||||
import kafka.admin.*;
|
||||
import kafka.common.TopicAndPartition;
|
||||
import kafka.controller.ReassignedPartitionsContext;
|
||||
import kafka.utils.ZkUtils;
|
||||
import org.apache.kafka.common.security.JaasUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.util.StringUtils;
|
||||
import scala.collection.JavaConversions;
|
||||
import scala.collection.JavaConverters;
|
||||
import scala.collection.Seq;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* migrate topic service impl
|
||||
* @author zengqiao_cn@163.com,huangyiminghappy@163.com
|
||||
* @date 19/4/16
|
||||
*/
|
||||
@Service("migrationService")
|
||||
public class MigrationServiceImpl implements MigrationService{
|
||||
private static final Logger logger = LoggerFactory.getLogger(MigrationServiceImpl.class);
|
||||
|
||||
private static final int DEFAULT_SESSION_TIMEOUT = 90000;
|
||||
|
||||
@Autowired
|
||||
private MigrationTaskDao migrationTaskDao;
|
||||
|
||||
@Autowired
|
||||
private ClusterService clusterService;
|
||||
|
||||
@Override
|
||||
public Result<MigrationTaskDO> createMigrationTask(Long clusterId, String topicName, List<Integer> partitionList, Long throttle, List<Integer> brokerIdList, String description) {
|
||||
ClusterDO cluster = clusterService.getById(clusterId);
|
||||
if (cluster == null) {
|
||||
return new Result<>(StatusCode.PARAM_ERROR, "cluster not exist");
|
||||
}
|
||||
Result<MigrationTaskDO> result = checkAddMigrateTaskParamIllegal(clusterId, topicName, brokerIdList, partitionList);
|
||||
if (!StatusCode.SUCCESS.equals(result.getCode())) {
|
||||
return result;
|
||||
}
|
||||
String reassignmentJson = createReassignmentJson(cluster, topicName, new ArrayList<>(brokerIdList));
|
||||
if (StringUtils.isEmpty(reassignmentJson)) {
|
||||
return new Result<>(StatusCode.PARAM_ERROR, "create reassignment json failed");
|
||||
}
|
||||
|
||||
// 任务存储到数据库
|
||||
MigrationTaskDO migrationTaskDO = MigrationTaskDO.createInstance(clusterId, topicName, reassignmentJson, throttle, description);
|
||||
if (!addMigrationTask(migrationTaskDO)) {
|
||||
return new Result<>(StatusCode.MY_SQL_INSERT_ERROR, Constant.KAFKA_MANAGER_INNER_ERROR);
|
||||
}
|
||||
return new Result<>(migrationTaskDO);
|
||||
}
|
||||
|
||||
private boolean addMigrationTask(MigrationTaskDO migrationTaskDO) {
|
||||
migrationTaskDO.setOperator(SpringContextHolder.getUserName());
|
||||
try {
|
||||
if (migrationTaskDO.getStatus() == null) {
|
||||
migrationTaskDO.setStatus(ReassignmentStatusEnum.WAITING.getCode());
|
||||
}
|
||||
migrationTaskDao.addMigrationTask(migrationTaskDO);
|
||||
} catch (Exception e) {
|
||||
logger.error("addMigrationTask@MigrationServiceImpl, add failed, migrationTaskDO:{}.", migrationTaskDO, e);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private Result<MigrationTaskDO> checkAddMigrateTaskParamIllegal(Long clusterId, String topicName, List<Integer> brokerIdList, List<Integer> partitionIdList) {
|
||||
TopicMetadata topicMetadata = ClusterMetadataManager.getTopicMetaData(clusterId, topicName);
|
||||
if (topicMetadata == null) {
|
||||
return new Result<>(StatusCode.PARAM_ERROR, "topic not exist");
|
||||
}
|
||||
List<Integer> allBrokerIdList = ClusterMetadataManager.getBrokerIdList(clusterId);
|
||||
for (Integer brokerId : brokerIdList) {
|
||||
if (!allBrokerIdList.contains(brokerId)) {
|
||||
return new Result<>(StatusCode.PARAM_ERROR, String.format("brokerId:%d not alive", brokerId));
|
||||
}
|
||||
}
|
||||
if (brokerIdList.size() < topicMetadata.getReplicaNum()) {
|
||||
return new Result<>(StatusCode.PARAM_ERROR, "brokerId num is less than topic replica num");
|
||||
}
|
||||
if (partitionIdList == null) {
|
||||
return new Result<>();
|
||||
}
|
||||
|
||||
List<Integer> allPartitionIdList = new ArrayList<>(topicMetadata.getPartitionMap().getPartitions().keySet());
|
||||
for (Integer partitionId: partitionIdList) {
|
||||
if (!allPartitionIdList.contains(partitionId)) {
|
||||
return new Result<>(StatusCode.PARAM_ERROR, String.format("partitionId:%d not exist", partitionId));
|
||||
}
|
||||
}
|
||||
return new Result<>();
|
||||
}
|
||||
|
||||
@Override
|
||||
public MigrationTaskDO getMigrationTask(Long taskId) {
|
||||
try {
|
||||
return migrationTaskDao.getById(taskId);
|
||||
} catch (Exception e) {
|
||||
logger.error("getMigrationTask@MigrationServiceImpl, get failed, taskId:{}.", taskId, e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<Integer, Integer> getMigrationStatus(ClusterDO cluster, String reassignmentJson) {
|
||||
Map<TopicAndPartition, ReassignmentStatus> reassignmentStatusMap = verifyAssignment(cluster, reassignmentJson);
|
||||
if (reassignmentStatusMap == null) {
|
||||
return null;
|
||||
}
|
||||
Map<Integer, Integer> result = new HashMap<>();
|
||||
for(Map.Entry<TopicAndPartition, ReassignmentStatus> entry: reassignmentStatusMap.entrySet()){
|
||||
TopicAndPartition tp = entry.getKey();
|
||||
if (ReassignmentCompleted.status() == entry.getValue().status()) {
|
||||
result.put(tp.partition(), ReassignmentStatusEnum.SUCCESS.getCode());
|
||||
} else if (ReassignmentInProgress.status() == entry.getValue().status()) {
|
||||
result.put(tp.partition(), ReassignmentStatusEnum.RUNNING.getCode());
|
||||
} else {
|
||||
result.put(tp.partition(), ReassignmentStatusEnum.FAILED.getCode());
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result executeMigrationTask(Long taskId) {
|
||||
MigrationTaskDO migrationTaskDO = getMigrationTask(taskId);
|
||||
if (migrationTaskDO == null || !ReassignmentStatusEnum.WAITING.getCode().equals(migrationTaskDO.getStatus())) {
|
||||
return new Result<>(StatusCode.PARAM_ERROR, "task may not exist or can't be execute");
|
||||
}
|
||||
ClusterDO cluster = clusterService.getById(migrationTaskDO.getClusterId());
|
||||
if (cluster == null) {
|
||||
return new Result<>(StatusCode.OPERATION_ERROR, "cluster not exist");
|
||||
}
|
||||
try {
|
||||
if (!executeAssignment(cluster, migrationTaskDO.getReassignmentJson(), migrationTaskDO.getThrottle())) {
|
||||
return new Result(StatusCode.OPERATION_ERROR, "execute migration failed");
|
||||
}
|
||||
int status = migrationTaskDao.updateById(taskId, ReassignmentStatusEnum.RUNNING.getCode(), migrationTaskDO.getThrottle());
|
||||
if (status != 1) {
|
||||
return new Result(StatusCode.MY_SQL_UPDATE_ERROR, "update throttle failed");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("executeMigrationTask@MigrationServiceImpl, update mysql:migration_task throttle failed, taskId:{}.", taskId, e);
|
||||
return new Result(StatusCode.MY_SQL_UPDATE_ERROR, "update throttle failed");
|
||||
}
|
||||
return new Result();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result modifyMigrationTask(Long taskId, Long throttle) {
|
||||
MigrationTaskDO migrationTaskDO = getMigrationTask(taskId);
|
||||
if (migrationTaskDO == null || !ReassignmentStatusEnum.triggerTask(migrationTaskDO.getStatus())) {
|
||||
return new Result<>(StatusCode.PARAM_ERROR, "task may not exist or can't be execute");
|
||||
}
|
||||
ClusterDO cluster = clusterService.getById(migrationTaskDO.getClusterId());
|
||||
if (cluster == null) {
|
||||
return new Result<>(StatusCode.OPERATION_ERROR, "cluster not exist");
|
||||
}
|
||||
if (throttle == null) {
|
||||
throttle = migrationTaskDO.getThrottle();
|
||||
}
|
||||
try {
|
||||
if (ReassignmentStatusEnum.RUNNING.getCode().equals(migrationTaskDO.getStatus()) && !executeAssignment(cluster, migrationTaskDO.getReassignmentJson(), throttle)) {
|
||||
return new Result(StatusCode.OPERATION_ERROR, "execute migration failed");
|
||||
}
|
||||
int status = migrationTaskDao.updateById(taskId, migrationTaskDO.getStatus(), throttle);
|
||||
if (status != 1) {
|
||||
return new Result(StatusCode.MY_SQL_UPDATE_ERROR, "update throttle failed");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("executeMigrationTask@MigrationServiceImpl, update mysql:migration_task throttle failed, taskId:{} throttle:{}.", taskId, throttle, e);
|
||||
return new Result(StatusCode.MY_SQL_UPDATE_ERROR, "update throttle failed");
|
||||
}
|
||||
return new Result();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<MigrationTaskDO> getMigrationTaskList() {
|
||||
return migrationTaskDao.listAll();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<MigrationTaskDO> getByStatus(Integer status) {
|
||||
return migrationTaskDao.getByStatus(status);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result deleteMigrationTask(Long taskId) {
|
||||
MigrationTaskDO migrationTaskDO = getMigrationTask(taskId);
|
||||
if (migrationTaskDO == null || !ReassignmentStatusEnum.cancelTask(migrationTaskDO.getStatus())) {
|
||||
return new Result<>(StatusCode.PARAM_ERROR, "task may not exist or can't be cancel");
|
||||
}
|
||||
try {
|
||||
int status = migrationTaskDao.updateById(taskId, ReassignmentStatusEnum.CANCELED.getCode(), migrationTaskDO.getThrottle());
|
||||
if (status != 1) {
|
||||
return new Result(StatusCode.MY_SQL_UPDATE_ERROR, "delete failed");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("deleteMigrationTask@MigrationServiceImpl, delete mysql:migration_task failed, taskId:{}.", taskId, e);
|
||||
return new Result(StatusCode.MY_SQL_UPDATE_ERROR, "delete failed");
|
||||
}
|
||||
return new Result();
|
||||
}
|
||||
|
||||
private String createReassignmentJson(ClusterDO cluster, String topicName, List<Object> brokerIdList) {
|
||||
ZkUtils zkUtils = createZkUtils(cluster);
|
||||
if (zkUtils == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
JSONObject topicsToMoveJson = new JSONObject();
|
||||
List<JSONObject> topicList = new ArrayList<>();
|
||||
JSONObject topicJson = new JSONObject();
|
||||
topicJson.put("topic", topicName);
|
||||
topicList.add(topicJson);
|
||||
topicsToMoveJson.put("topics", topicList);
|
||||
topicsToMoveJson.put("version", 1);
|
||||
|
||||
Seq<Object> brokerListToReassign = JavaConverters.asScalaIteratorConverter(brokerIdList.iterator()).asScala().toSeq();
|
||||
scala.collection.Map<TopicAndPartition, Seq<Object>> reassignmentJsonMap = ReassignPartitionsCommand.generateAssignment(zkUtils, brokerListToReassign, topicsToMoveJson.toJSONString(), false)._1();
|
||||
String reassignmentJson = ZkUtils.formatAsReassignmentJson(reassignmentJsonMap);
|
||||
closeZkUtils(zkUtils);
|
||||
return reassignmentJson;
|
||||
}
|
||||
|
||||
private boolean executeAssignment(ClusterDO cluster, String reassignmentJson, Long throttle) {
|
||||
ZkUtils zkUtils = createZkUtils(cluster);
|
||||
if (zkUtils == null) {
|
||||
return false;
|
||||
}
|
||||
try {
|
||||
ReassignPartitionsCommand.executeAssignment(zkUtils, reassignmentJson, throttle);
|
||||
} catch (Throwable e) {
|
||||
logger.error("executeAssignment@MigrationServiceImpl execute exception:",e);
|
||||
return false;
|
||||
} finally {
|
||||
closeZkUtils(zkUtils);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private Map<TopicAndPartition, ReassignmentStatus> verifyAssignment(ClusterDO cluster, String reassignmentJson) {
|
||||
ZkUtils zkUtils = createZkUtils(cluster);
|
||||
if (zkUtils == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
scala.collection.Map<TopicAndPartition, Seq<Object>> partitionsToBeReassigned = ZkUtils.parsePartitionReassignmentData(reassignmentJson);
|
||||
Map<TopicAndPartition, Seq<Object>> partitionsToBeReassignedJMap = JavaConversions.asJavaMap(partitionsToBeReassigned);
|
||||
|
||||
/*
|
||||
翻译这句代码:
|
||||
val partitionsBeingReassigned = zkUtils.getPartitionsBeingReassigned().mapValues(_.newReplicas)
|
||||
*/
|
||||
scala.collection.Map<TopicAndPartition, ReassignedPartitionsContext> partitionsBeingReassignedContext = zkUtils.getPartitionsBeingReassigned();
|
||||
Map<TopicAndPartition, ReassignedPartitionsContext> partitionsBeingReassignedContextJMap = JavaConversions.asJavaMap(partitionsBeingReassignedContext);
|
||||
Map<TopicAndPartition, Seq<Object>> partitionsBeingReassignedJMap = new HashMap<>();
|
||||
|
||||
for(Map.Entry<TopicAndPartition, ReassignedPartitionsContext> entry: partitionsBeingReassignedContextJMap.entrySet()){
|
||||
TopicAndPartition topicAndPartition = entry.getKey();
|
||||
ReassignedPartitionsContext reassignedPartitionsContext = entry.getValue();
|
||||
partitionsBeingReassignedJMap.put(topicAndPartition, reassignedPartitionsContext.newReplicas());
|
||||
}
|
||||
scala.collection.Map<TopicAndPartition, Seq<Object>> partitionsBeingReassigned = JavaConversions.asScalaMap(partitionsBeingReassignedJMap);
|
||||
|
||||
//初始化result并填装结果
|
||||
Map<TopicAndPartition, ReassignmentStatus> result = new HashMap<>();
|
||||
for (TopicAndPartition topicAndPartition : partitionsToBeReassignedJMap.keySet()) {
|
||||
result.put(topicAndPartition, ReassignPartitionsCommand.checkIfPartitionReassignmentSucceeded(zkUtils, topicAndPartition, partitionsToBeReassigned, partitionsBeingReassigned));
|
||||
}
|
||||
|
||||
closeZkUtils(zkUtils);
|
||||
return result;
|
||||
}
|
||||
|
||||
private ZkUtils createZkUtils(ClusterDO cluster) {
|
||||
try {
|
||||
return ZkUtils.apply(cluster.getZookeeper(), DEFAULT_SESSION_TIMEOUT, DEFAULT_SESSION_TIMEOUT, JaasUtils.isZkSecurityEnabled());
|
||||
} catch (Exception e) {
|
||||
logger.error("getZkUtils@MigrationServiceImpl, connect ZOOKEEPER failed, clusterId:{} ZOOKEEPER:{}.", cluster.getId(), cluster.getZookeeper(), e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private void closeZkUtils(ZkUtils zkUtils) {
|
||||
if (zkUtils == null) {
|
||||
return ;
|
||||
}
|
||||
try {
|
||||
zkUtils.close();
|
||||
} catch (Exception e) {
|
||||
logger.error("closeZkUtils@MigrationServiceImpl, close zkUtils failed.", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,127 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service.impl;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.StatusCode;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.bizenum.OrderStatusEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.bizenum.OrderTypeEnum;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.OrderPartitionDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.OrderTopicDO;
|
||||
import com.xiaojukeji.kafka.manager.dao.OrderPartitionDao;
|
||||
import com.xiaojukeji.kafka.manager.dao.OrderTopicDao;
|
||||
import com.xiaojukeji.kafka.manager.service.service.OrderService;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 19/6/18
|
||||
*/
|
||||
@Service("orderService")
|
||||
public class OrderServiceImpl implements OrderService {
|
||||
private static final Logger logger = LoggerFactory.getLogger(OrderServiceImpl.class);
|
||||
|
||||
@Autowired
|
||||
private OrderTopicDao orderTopicDao;
|
||||
|
||||
@Autowired
|
||||
private OrderPartitionDao orderPartitionDao;
|
||||
|
||||
@Override
|
||||
public Boolean createOrderTopic(OrderTopicDO orderTopicDO) {
|
||||
return orderTopicDao.insert(orderTopicDO) > 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result cancelOrder(Long orderId, String operator, OrderTypeEnum orderTypeEnum) {
|
||||
if (OrderTypeEnum.APPLY_TOPIC.equals(orderTypeEnum)) {
|
||||
OrderTopicDO orderTopicDO = orderTopicDao.getById(orderId);
|
||||
if (orderTopicDO != null) {
|
||||
orderTopicDO.setOrderStatus(OrderStatusEnum.CANCELLED.getCode());
|
||||
}
|
||||
return modifyOrderTopic(orderTopicDO, operator, false);
|
||||
} else if (OrderTypeEnum.APPLY_PARTITION.equals(orderTypeEnum)) {
|
||||
OrderPartitionDO orderPartitionDO = orderPartitionDao.getById(orderId);
|
||||
if (orderPartitionDO != null) {
|
||||
orderPartitionDO.setOrderStatus(OrderStatusEnum.CANCELLED.getCode());
|
||||
}
|
||||
return modifyOrderPartition(orderPartitionDO, operator);
|
||||
}
|
||||
return new Result(StatusCode.PARAM_ERROR, "order type illegal");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result modifyOrderTopic(OrderTopicDO newOrderTopicDO, String operator, boolean admin) {
|
||||
if (newOrderTopicDO == null) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "param illegal, order not exist");
|
||||
} else if (!admin && !newOrderTopicDO.getApplicant().equals(operator)) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "without authority to cancel the order");
|
||||
}
|
||||
OrderTopicDO oldOrderTopicDO = orderTopicDao.getById(newOrderTopicDO.getId());
|
||||
if (!OrderStatusEnum.WAIT_DEAL.getCode().equals(oldOrderTopicDO.getOrderStatus())) {
|
||||
return new Result(StatusCode.OPERATION_ERROR, "order already handled");
|
||||
}
|
||||
if (orderTopicDao.updateById(newOrderTopicDO) > 0) {
|
||||
return new Result();
|
||||
}
|
||||
return new Result(StatusCode.OPERATION_ERROR, Constant.KAFKA_MANAGER_INNER_ERROR);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result modifyOrderPartition(OrderPartitionDO newOrderPartitionDO, String operator) {
|
||||
if (newOrderPartitionDO == null) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "param illegal, order not exist");
|
||||
} else if (!newOrderPartitionDO.getApplicant().equals(operator)) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "without authority to cancel the order");
|
||||
}
|
||||
OrderPartitionDO oldOrderPartitionDO = orderPartitionDao.getById(newOrderPartitionDO.getId());
|
||||
if (!OrderStatusEnum.WAIT_DEAL.getCode().equals(oldOrderPartitionDO.getOrderStatus())) {
|
||||
return new Result(StatusCode.OPERATION_ERROR, "order already handled");
|
||||
}
|
||||
if (orderPartitionDao.updateById(newOrderPartitionDO) > 0) {
|
||||
return new Result();
|
||||
}
|
||||
return new Result(StatusCode.OPERATION_ERROR, Constant.KAFKA_MANAGER_INNER_ERROR);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<OrderTopicDO> getOrderTopics(String username) {
|
||||
if (username == null) {
|
||||
return orderTopicDao.list();
|
||||
}
|
||||
return orderTopicDao.getByUsername(username);
|
||||
}
|
||||
|
||||
@Override
|
||||
public OrderTopicDO getOrderTopicById(Long orderId) {
|
||||
return orderTopicDao.getById(orderId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean createOrderPartition(OrderPartitionDO orderPartitionDO) {
|
||||
return orderPartitionDao.insert(orderPartitionDO) > 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<OrderPartitionDO> getOrderPartitions(String username, Long orderId) {
|
||||
List<OrderPartitionDO> allOrderPartitionDOList = orderPartitionDao.list();
|
||||
if (allOrderPartitionDOList == null || allOrderPartitionDOList.isEmpty()) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
if (username == null) {
|
||||
return allOrderPartitionDOList.stream().filter(elem -> (orderId == null || (elem.getId().equals(orderId)))).collect(Collectors.toList());
|
||||
}
|
||||
return allOrderPartitionDOList.stream().filter(elem -> elem.getApplicant().equals(username) && (orderId == null || (elem.getId().equals(orderId)))).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@Override
|
||||
public OrderPartitionDO getOrderPartitionById(Long orderId) {
|
||||
return orderPartitionDao.getById(orderId);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,174 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service.impl;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||
import com.xiaojukeji.kafka.manager.common.constant.StatusCode;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.Result;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.RegionDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.TopicMetadata;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.bizenum.DBStatusEnum;
|
||||
import com.xiaojukeji.kafka.manager.dao.RegionDao;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ClusterMetadataManager;
|
||||
import com.xiaojukeji.kafka.manager.service.service.RegionService;
|
||||
import com.xiaojukeji.kafka.manager.service.utils.ListUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* @author zengqiao
|
||||
* @date 2017/11/13.
|
||||
*/
|
||||
@Service("regionService")
|
||||
public class RegionServiceImpl implements RegionService {
|
||||
private static final Logger logger = LoggerFactory.getLogger(RegionServiceImpl.class);
|
||||
|
||||
@Autowired
|
||||
private RegionDao regionDao;
|
||||
|
||||
@Override
|
||||
public Result createRegion(RegionDO regionDO) {
|
||||
if (regionDO == null) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "param is null");
|
||||
}
|
||||
try {
|
||||
if (brokerAlreadyAssigned2Region(null, regionDO.getClusterId(), regionDO.getBrokerList())) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "exist already used brokerId");
|
||||
}
|
||||
regionDao.insert(regionDO);
|
||||
} catch (Exception e) {
|
||||
logger.error("createRegion@RegionServiceImpl, create region failed, newRegion:{}.", regionDO, e);
|
||||
return new Result(StatusCode.OPERATION_ERROR, Constant.KAFKA_MANAGER_INNER_ERROR);
|
||||
}
|
||||
return new Result();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean deleteById(Long id) {
|
||||
return regionDao.deleteById(id) > 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result updateRegion(RegionDO newRegionDO) {
|
||||
if (newRegionDO == null || newRegionDO.getId() == null) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "param is null");
|
||||
}
|
||||
RegionDO oldRegionDO = regionDao.getById(newRegionDO.getId());
|
||||
if (oldRegionDO == null) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "region not exist");
|
||||
}
|
||||
if (brokerAlreadyAssigned2Region(newRegionDO.getId(), newRegionDO.getClusterId(), newRegionDO.getBrokerList())) {
|
||||
return new Result(StatusCode.PARAM_ERROR, "exist already used brokerId");
|
||||
}
|
||||
regionDao.updateById(newRegionDO);
|
||||
return new Result();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RegionDO> getByClusterId(Long clusterId) {
|
||||
return regionDao.getByClusterId(clusterId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<Long, Long> getRegionNum() {
|
||||
List<RegionDO> regionDoList = null;
|
||||
try {
|
||||
regionDoList = regionDao.listAll();
|
||||
}catch (Exception e) {
|
||||
logger.error("getRegionNum@RegionServiceImpl, select mysql:region_info failed.", e);
|
||||
}
|
||||
if (regionDoList == null) {
|
||||
return new HashMap<>(0);
|
||||
}
|
||||
Map<Long, Long> regionNumMap = new HashMap<>();
|
||||
for (RegionDO regionDO: regionDoList) {
|
||||
Long regionNum = regionNumMap.getOrDefault(regionDO.getClusterId(), 0L);
|
||||
regionNumMap.put(regionDO.getClusterId(), regionNum + 1);
|
||||
}
|
||||
return regionNumMap;
|
||||
}
|
||||
|
||||
private boolean brokerAlreadyAssigned2Region(Long regionId, Long clusterId, String newbrokerIdStr) {
|
||||
if (clusterId == null || StringUtils.isEmpty(newbrokerIdStr)) {
|
||||
return true;
|
||||
}
|
||||
List<RegionDO> regionDOList = getByClusterId(clusterId);
|
||||
if (regionDOList == null || regionDOList.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
List<Integer> newBrokerIdList = ListUtils.string2IntList(newbrokerIdStr);
|
||||
for (RegionDO regionDO : regionDOList) {
|
||||
if (!DBStatusEnum.NORMAL.getStatus().equals(regionDO.getStatus())) {
|
||||
continue;
|
||||
}
|
||||
if (regionDO.getId().equals(regionId)) {
|
||||
continue;
|
||||
}
|
||||
List<Integer> regionBrokerIdList = ListUtils.string2IntList(regionDO.getBrokerList());
|
||||
if (regionBrokerIdList == null) {
|
||||
continue;
|
||||
}
|
||||
if (regionBrokerIdList.stream().filter(brokerId -> newBrokerIdList.contains(brokerId)).count() > 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RegionDO> getRegionByTopicName(Long clusterId, String topicName) {
|
||||
TopicMetadata topicMetadata = ClusterMetadataManager.getTopicMetaData(clusterId, topicName);
|
||||
if (topicMetadata == null) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
List<RegionDO> regionList = getByClusterId(clusterId);
|
||||
if (regionList == null) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
List<RegionDO> result = new ArrayList<>();
|
||||
for (RegionDO region: regionList) {
|
||||
List<Integer> brokerIdList = ListUtils.string2IntList(region.getBrokerList());
|
||||
if (brokerIdList == null) {
|
||||
continue;
|
||||
}
|
||||
for (Integer brokerId: brokerIdList) {
|
||||
if (topicMetadata.getBrokerIdSet().contains(brokerId)) {
|
||||
result.add(region);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Integer> getFullBrokerId(Long clusterId, List<Long> regionIdList, List<Integer> brokerIdList) {
|
||||
if (regionIdList == null || regionIdList.isEmpty()) {
|
||||
return new ArrayList<>(brokerIdList);
|
||||
}
|
||||
List<RegionDO> regionDOList = null;
|
||||
try {
|
||||
regionDOList = getByClusterId(clusterId);
|
||||
} catch (Exception e) {
|
||||
logger.error("getFullBrokerId@RegionServiceImpl, select mysql:region_info failed.", e);
|
||||
}
|
||||
if (regionDOList == null) {
|
||||
regionDOList = new ArrayList<>();
|
||||
}
|
||||
Set<Integer> brokerIdSet = new HashSet<>(brokerIdList == null? new ArrayList<>(): brokerIdList);
|
||||
for (RegionDO regionDO: regionDOList) {
|
||||
if (!regionIdList.contains(regionDO.getId())) {
|
||||
continue;
|
||||
}
|
||||
List<Integer> regionBrokerIdList = ListUtils.string2IntList(regionDO.getBrokerList());
|
||||
if (regionBrokerIdList == null) {
|
||||
continue;
|
||||
}
|
||||
brokerIdSet.addAll(regionBrokerIdList);
|
||||
}
|
||||
return new ArrayList<>(brokerIdSet);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,120 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service.impl;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.TopicDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.TopicFavoriteDO;
|
||||
import com.xiaojukeji.kafka.manager.dao.TopicFavoriteDao;
|
||||
import com.xiaojukeji.kafka.manager.dao.TopicDao;
|
||||
import com.xiaojukeji.kafka.manager.service.service.TopicManagerService;
|
||||
import com.xiaojukeji.kafka.manager.service.service.ZookeeperService;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* @author arthur
|
||||
* @date 2017/7/21.
|
||||
*/
|
||||
@Service("topicManagerService")
|
||||
public class TopicManagerServiceImpl implements TopicManagerService {
|
||||
private static final Logger logger = LoggerFactory.getLogger(TopicManagerServiceImpl.class);
|
||||
|
||||
@Autowired
|
||||
private TopicDao topicDao;
|
||||
|
||||
@Autowired
|
||||
private TopicFavoriteDao topicFavoriteDao;
|
||||
|
||||
@Autowired
|
||||
private ZookeeperService zookeeperService;
|
||||
|
||||
@Override
|
||||
public List<TopicDO> getByClusterId(Long clusterId) {
|
||||
if (clusterId == null) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
return topicDao.getByClusterId(clusterId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TopicDO getByTopicName(Long clusterId, String topicName) {
|
||||
if (StringUtils.isEmpty(topicName) || clusterId == null) {
|
||||
return null;
|
||||
}
|
||||
return topicDao.getByTopicName(clusterId, topicName);
|
||||
}
|
||||
|
||||
// @Override
|
||||
// public Properties getTopicProperties(Long clusterId, String topicName) {
|
||||
// if (clusterId == null || StringUtils.isEmpty(topicName)) {
|
||||
// return new Properties();
|
||||
// }
|
||||
// try {
|
||||
// ZkConfigImpl zkConfig = ClusterMetadataManager.getZKConfig(clusterId);
|
||||
// return zookeeperService.getTopicProperties(zkConfig, topicName);
|
||||
// } catch (Exception e) {
|
||||
// logger.error("getTopicProperties@TopicManagerService, get properties failed, clusterId:{} topicName:{}.", clusterId, topicName, e);
|
||||
// }
|
||||
// return new Properties();
|
||||
// }
|
||||
|
||||
@Override
|
||||
public Boolean addFavorite(List<TopicFavoriteDO> topicFavoriteDOList) {
|
||||
return topicFavoriteDao.batchAdd(topicFavoriteDOList) > 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean delFavorite(List<TopicFavoriteDO> unFavoriteList) {
|
||||
if (unFavoriteList == null || unFavoriteList.isEmpty()) {
|
||||
return Boolean.TRUE;
|
||||
}
|
||||
List<TopicFavoriteDO> topicFavoriteDOList = topicFavoriteDao.getByUserName(unFavoriteList.get(0).getUsername());
|
||||
if (topicFavoriteDOList == null) {
|
||||
return Boolean.TRUE;
|
||||
}
|
||||
|
||||
Set<String> unFavoriteSet = new HashSet<>();
|
||||
for (TopicFavoriteDO topicFavoriteDO: unFavoriteList) {
|
||||
unFavoriteSet.add(String.valueOf(topicFavoriteDO.getClusterId()) + "_" + topicFavoriteDO.getTopicName());
|
||||
}
|
||||
Set<Long> idSet = new HashSet<>();
|
||||
for (TopicFavoriteDO topicFavoriteDO: topicFavoriteDOList) {
|
||||
if (unFavoriteSet.contains(String.valueOf(topicFavoriteDO.getClusterId()) + "_" + topicFavoriteDO.getTopicName())) {
|
||||
idSet.add(topicFavoriteDO.getId());
|
||||
}
|
||||
}
|
||||
return topicFavoriteDao.batchDelete(new ArrayList<>(idSet));
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TopicFavoriteDO> getFavorite(String username) {
|
||||
if (StringUtils.isEmpty(username)) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
try {
|
||||
return topicFavoriteDao.getByUserName(username);
|
||||
} catch (Exception e) {
|
||||
logger.error("getFavorite@TopicManangerServiceImpl, get favorite failed, username:{}.", username, e);
|
||||
}
|
||||
return new ArrayList<>();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TopicFavoriteDO> getFavorite(String username, Long clusterId) {
|
||||
if (StringUtils.isEmpty(username) && clusterId == null) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
try {
|
||||
if (clusterId == null) {
|
||||
return topicFavoriteDao.getByUserName(username);
|
||||
}
|
||||
return topicFavoriteDao.getByUserNameAndClusterId(username, clusterId);
|
||||
} catch (Exception e) {
|
||||
logger.error("getFavorite@TopicManangerServiceImpl, get favorite failed, username:{} clusterId:{}.", username, clusterId, e);
|
||||
}
|
||||
return new ArrayList<>();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,450 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service.impl;
|
||||
|
||||
import com.xiaojukeji.kafka.manager.common.constant.MetricsType;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.PartitionState;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.PartitionOffsetDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.TopicBasicDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.TopicOverviewDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.dto.TopicPartitionDTO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.PartitionMap;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.TopicMetadata;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.DefaultThreadFactory;
|
||||
import com.xiaojukeji.kafka.manager.dao.TopicMetricsDao;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.KafkaClientCache;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.KafkaMetricsCache;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ClusterMetadataManager;
|
||||
import com.xiaojukeji.kafka.manager.service.service.JmxService;
|
||||
import com.xiaojukeji.kafka.manager.service.service.TopicService;
|
||||
import com.xiaojukeji.kafka.manager.service.service.ZookeeperService;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndTimestamp;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.FutureTask;
|
||||
|
||||
/**
|
||||
* @author limeng
|
||||
* @date 2018/5/4.
|
||||
*/
|
||||
@Service("topicService")
|
||||
public class TopicServiceImpl implements TopicService {
|
||||
private final static Logger logger = LoggerFactory.getLogger(TopicService.class);
|
||||
|
||||
@Autowired
|
||||
private TopicMetricsDao topicMetricsDao;
|
||||
|
||||
@Autowired
|
||||
private ZookeeperService zookeeperService;
|
||||
|
||||
@Autowired
|
||||
private JmxService jmxService;
|
||||
|
||||
private static final int SPLIT_SIZE = 3;
|
||||
|
||||
private final ExecutorService TOPIC_THREAD_POOL = Executors.newFixedThreadPool(4, new DefaultThreadFactory("TopicServiceImpl-getTopicInfo"));
|
||||
|
||||
@Override
|
||||
public Long calTopicMaxAvgBytesIn(List<TopicMetrics> topicMetricsList, Integer maxAvgBytesInDuration) {
|
||||
if (topicMetricsList == null || maxAvgBytesInDuration == null) {
|
||||
throw new NullPointerException("calTopicMaxAvgBytesIn@TopicServiceImpl, param illegal.");
|
||||
}
|
||||
Double sumBytesIn = 0.0;
|
||||
Double tempSumBytesIn = 0.0;
|
||||
for (int i = 0; i < topicMetricsList.size(); ++i) {
|
||||
Double bytesIn = topicMetricsList.get(i).getBytesInPerSec();
|
||||
if (i < maxAvgBytesInDuration) {
|
||||
sumBytesIn += bytesIn;
|
||||
tempSumBytesIn += bytesIn;
|
||||
}
|
||||
if (i >= maxAvgBytesInDuration) {
|
||||
tempSumBytesIn = tempSumBytesIn + bytesIn - topicMetricsList.get(i - maxAvgBytesInDuration).getBytesInPerSec();
|
||||
}
|
||||
if (tempSumBytesIn > sumBytesIn) {
|
||||
sumBytesIn = tempSumBytesIn;
|
||||
}
|
||||
}
|
||||
if (topicMetricsList.size() > maxAvgBytesInDuration) {
|
||||
return Double.valueOf((sumBytesIn / maxAvgBytesInDuration)).longValue();
|
||||
} else if (topicMetricsList.size() == 0) {
|
||||
return null;
|
||||
}
|
||||
return Double.valueOf(sumBytesIn / topicMetricsList.size()).longValue();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TopicMetrics> getTopicMetricsByInterval(Long clusterId, String topic, Date startTime, Date endTime) {
|
||||
return topicMetricsDao.getTopicMetricsByInterval(clusterId, topic, startTime, endTime);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, List<Integer>> getTopicPartitionIdMap(Long clusterId, Integer brokerId) {
|
||||
Map<String, List<Integer>> result = new HashMap<>();
|
||||
for (String topicName: ClusterMetadataManager.getTopicNameList(clusterId)) {
|
||||
TopicMetadata topicMetadata = ClusterMetadataManager.getTopicMetaData(clusterId, topicName);
|
||||
if (topicMetadata == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Set<Integer> brokerIdSet = topicMetadata.getBrokerIdSet();
|
||||
if (brokerIdSet == null || !brokerIdSet.contains(brokerId)) {
|
||||
continue;
|
||||
}
|
||||
PartitionMap partitionMap = topicMetadata.getPartitionMap();
|
||||
result.put(topicName, new ArrayList<>(partitionMap.getPartitions().keySet()));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TopicBasicDTO getTopicBasicDTO(Long clusterId, String topicName) {
|
||||
TopicBasicDTO topicBasicDTO = new TopicBasicDTO();
|
||||
topicBasicDTO.setTopicName(topicName);
|
||||
|
||||
TopicMetadata topicMetadata = ClusterMetadataManager.getTopicMetaData(clusterId, topicName);
|
||||
if (topicMetadata == null) {
|
||||
return topicBasicDTO;
|
||||
}
|
||||
topicBasicDTO.setBrokerNum(topicMetadata.getBrokerIdSet().size());
|
||||
topicBasicDTO.setReplicaNum(topicMetadata.getReplicaNum());
|
||||
topicBasicDTO.setPartitionNum(topicMetadata.getPartitionNum());
|
||||
topicBasicDTO.setCreateTime(topicMetadata.getCreateTime());
|
||||
topicBasicDTO.setModifyTime(topicMetadata.getModifyTime());
|
||||
topicBasicDTO.setRetentionTime(zookeeperService.getRetentionTime(clusterId, topicName));
|
||||
return topicBasicDTO;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TopicPartitionDTO> getTopicPartitionDTO(ClusterDO clusterDO, String topicName, Boolean needOffsets) {
|
||||
if (clusterDO == null || StringUtils.isEmpty(topicName)) {
|
||||
return null;
|
||||
}
|
||||
TopicMetadata topicMetadata = ClusterMetadataManager.getTopicMetaData(clusterDO.getId(), topicName);
|
||||
if (topicMetadata == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
List<PartitionState> partitionStateList = new ArrayList<>();
|
||||
try {
|
||||
partitionStateList = zookeeperService.getTopicPartitionState(clusterDO.getId(), topicName);
|
||||
} catch (Exception e) {
|
||||
logger.error("getTopicPartitionInfo@TopicServiceImpl, get partition state error.", e);
|
||||
}
|
||||
|
||||
Map<TopicPartition, Long> offsetMap = needOffsets? getTopicPartitionOffset(clusterDO, topicName): new HashMap<>(0);
|
||||
|
||||
List<TopicPartitionDTO> topicPartitionDTOList = new ArrayList<>();
|
||||
for (PartitionState partitionState: partitionStateList) {
|
||||
TopicPartitionDTO topicPartitionDTO = new TopicPartitionDTO();
|
||||
topicPartitionDTO.setPartitionId(partitionState.getPartitionId());
|
||||
topicPartitionDTO.setLeaderBrokerId(partitionState.getLeader());
|
||||
topicPartitionDTO.setLeaderEpoch(partitionState.getLeaderEpoch());
|
||||
topicPartitionDTO.setReplicasBroker(topicMetadata.getPartitionMap().getPartitions().get(partitionState.getPartitionId()));
|
||||
if (topicPartitionDTO.getReplicasBroker() != null && !topicPartitionDTO.getReplicasBroker().isEmpty()) {
|
||||
topicPartitionDTO.setPreferredBrokerId(topicPartitionDTO.getReplicasBroker().get(0));
|
||||
}
|
||||
topicPartitionDTO.setIsr(partitionState.getIsr());
|
||||
topicPartitionDTO.setOffset(offsetMap.get(new TopicPartition(topicName, partitionState.getPartitionId())));
|
||||
|
||||
if (topicPartitionDTO.getIsr().size() < topicPartitionDTO.getReplicasBroker().size()) {
|
||||
topicPartitionDTO.setUnderReplicated(true);
|
||||
} else {
|
||||
topicPartitionDTO.setUnderReplicated(false);
|
||||
}
|
||||
topicPartitionDTOList.add(topicPartitionDTO);
|
||||
}
|
||||
return topicPartitionDTOList;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TopicMetrics getTopicMetrics(Long clusterId, String topicName, List<String> specifiedFieldList) {
|
||||
return jmxService.getSpecifiedTopicMetricsFromJmx(clusterId, topicName, specifiedFieldList, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<TopicPartition, Long> getTopicPartitionOffset(ClusterDO clusterDO, String topicName) {
|
||||
if (clusterDO == null || StringUtils.isEmpty(topicName)) {
|
||||
return new HashMap<>(0);
|
||||
}
|
||||
TopicMetadata topicMetadata = ClusterMetadataManager.getTopicMetaData(clusterDO.getId(), topicName);
|
||||
if (topicMetadata == null) {
|
||||
return new HashMap<>();
|
||||
}
|
||||
List<TopicPartition> topicPartitionList = new ArrayList<>();
|
||||
for (Integer partitionId = 0; partitionId < topicMetadata.getPartitionNum(); ++partitionId) {
|
||||
topicPartitionList.add(new TopicPartition(topicName, partitionId));
|
||||
}
|
||||
Map<TopicPartition, Long> topicPartitionLongMap = new HashMap<>();
|
||||
try {
|
||||
KafkaConsumer kafkaConsumer = KafkaClientCache.getApiKafkaConsumerClient(clusterDO);
|
||||
topicPartitionLongMap = kafkaConsumer.endOffsets(topicPartitionList);
|
||||
} catch (Exception e) {
|
||||
logger.error("getTopicOffset@TopicServiceImpl, get topic endOffsets failed, clusterId:{} topicName:{}.", clusterDO.getId(), topicPartitionList.get(0).topic(), e);
|
||||
}
|
||||
return topicPartitionLongMap;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TopicOverviewDTO> getTopicOverviewDTOList(final Long clusterId,
|
||||
final Integer filterBrokerId,
|
||||
final List<String> filterTopicNameList) {
|
||||
if (clusterId == null) {
|
||||
throw new IllegalArgumentException("getTopicInfo@TopicServiceImpl, param illegal");
|
||||
}
|
||||
|
||||
// 过滤Topic
|
||||
List<String> topicNameList = new ArrayList<>();
|
||||
if (filterTopicNameList == null) {
|
||||
topicNameList = ClusterMetadataManager.getTopicNameList(clusterId);
|
||||
} else {
|
||||
for (String filterTopicName: filterTopicNameList) {
|
||||
if (ClusterMetadataManager.isTopicExist(clusterId, filterTopicName)) {
|
||||
topicNameList.add(filterTopicName);
|
||||
}
|
||||
}
|
||||
}
|
||||
final Map<String, TopicMetrics> topicMetricsMap = getTopicMetricsFromCache(clusterId);
|
||||
|
||||
int split = topicNameList.size() / SPLIT_SIZE;
|
||||
if (topicNameList.size() % SPLIT_SIZE != 0) {
|
||||
split ++;
|
||||
}
|
||||
FutureTask<List<TopicOverviewDTO>>[] taskList = new FutureTask[split];
|
||||
for (int i = 0; i < split; i++) {
|
||||
final List<String> subTopicNameList = topicNameList.subList(i * SPLIT_SIZE, (i + 1 == split) ? topicNameList.size() : (i + 1) * SPLIT_SIZE);
|
||||
taskList[i] = new FutureTask<List<TopicOverviewDTO>>(new Callable<List<TopicOverviewDTO>>() {
|
||||
@Override
|
||||
public List<TopicOverviewDTO> call() throws Exception {
|
||||
List<TopicOverviewDTO> result = new ArrayList<>();
|
||||
for (String topicName : subTopicNameList) {
|
||||
TopicOverviewDTO topicOverviewDTO = getTopicOverviewDTO(clusterId, topicName, filterBrokerId, topicMetricsMap);
|
||||
if (topicOverviewDTO == null) {
|
||||
continue;
|
||||
}
|
||||
result.add(topicOverviewDTO);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
});
|
||||
TOPIC_THREAD_POOL.submit(taskList[i]);
|
||||
}
|
||||
|
||||
List<TopicOverviewDTO> result = new ArrayList<>();
|
||||
for (int i = 0; i < taskList.length; ++i) {
|
||||
try {
|
||||
result.addAll(taskList[i].get());
|
||||
} catch (Exception e) {
|
||||
List<String> subTopicNameList = topicNameList.subList(i * SPLIT_SIZE, (i + 1 == split) ? topicNameList.size() : (i + 1) * SPLIT_SIZE);
|
||||
logger.error("getTopicInfo@TopicServiceImpl, get topic simple info failed, clusterId:{} brokerId:{} subTopicNameList:{}.", clusterId, filterBrokerId, subTopicNameList.toString());
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, List<PartitionState>> getTopicPartitionState(Long clusterId, Integer filterBrokerId) {
|
||||
if (clusterId == null || filterBrokerId == null) {
|
||||
return new HashMap<>();
|
||||
}
|
||||
|
||||
List<String> topicNameList = ClusterMetadataManager.getTopicNameList(clusterId);
|
||||
int split = topicNameList.size() / SPLIT_SIZE;
|
||||
if (topicNameList.size() % SPLIT_SIZE != 0) {
|
||||
split++;
|
||||
}
|
||||
|
||||
FutureTask<Map<String, List<PartitionState>>>[] taskList = new FutureTask[split];
|
||||
for (int i = 0; i < split; i++) {
|
||||
final List<String> subTopicNameList = topicNameList.subList(i * SPLIT_SIZE, (i + 1 == split) ? topicNameList.size() : (i + 1) * SPLIT_SIZE);
|
||||
taskList[i] = new FutureTask<Map<String, List<PartitionState>>>(new Callable<Map<String, List<PartitionState>>>() {
|
||||
@Override
|
||||
public Map<String, List<PartitionState>> call() throws Exception {
|
||||
Map<String, List<PartitionState>> subPartitionStateMap = new HashMap<>();
|
||||
for (String topicName : subTopicNameList) {
|
||||
List<PartitionState> partitionStateList = getTopicPartitionState(clusterId, topicName);
|
||||
if (partitionStateList == null || partitionStateList.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
subPartitionStateMap.put(topicName, partitionStateList);
|
||||
}
|
||||
return subPartitionStateMap;
|
||||
}
|
||||
});
|
||||
TOPIC_THREAD_POOL.submit(taskList[i]);
|
||||
}
|
||||
|
||||
Map<String, List<PartitionState>> partitionStateMap = new HashMap<>();
|
||||
for (int i = 0; i < taskList.length; ++i) {
|
||||
try {
|
||||
partitionStateMap.putAll(taskList[i].get());
|
||||
} catch (Exception e) {
|
||||
List<String> subTopicNameList = topicNameList.subList(i * SPLIT_SIZE, (i + 1 == split) ? topicNameList.size() : (i + 1) * SPLIT_SIZE);
|
||||
logger.error("getBrokerTopicPartitionInfo@TopicServiceImpl, get topic partition state failed, clusterId:{} brokerId:{} subTopicNameList:{}.", clusterId, filterBrokerId, subTopicNameList.toString());
|
||||
}
|
||||
}
|
||||
return partitionStateMap;
|
||||
}
|
||||
|
||||
|
||||
private List<PartitionState> getTopicPartitionState(Long clusterId, String topicName) {
|
||||
TopicMetadata topicMetadata = ClusterMetadataManager.getTopicMetaData(clusterId, topicName);
|
||||
if (topicMetadata == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
List<PartitionState> partitionStateList = null;
|
||||
try {
|
||||
partitionStateList = zookeeperService.getTopicPartitionState(clusterId, topicName);
|
||||
// 判断分区副本是否在isr
|
||||
for (PartitionState partitionState : partitionStateList) {
|
||||
if (topicMetadata.getReplicaNum() > partitionState.getIsr().size()) {
|
||||
partitionState.setUnderReplicated(false);
|
||||
} else {
|
||||
partitionState.setUnderReplicated(true);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("getBrokerTopicPartitionInfo@TopicServiceImpl, get partition state from zk failed, clusterId:{} topicName:{}.", clusterId, topicName);
|
||||
}
|
||||
return partitionStateList;
|
||||
}
|
||||
|
||||
|
||||
private TopicOverviewDTO getTopicOverviewDTO(Long clusterId, String topicName, Integer filterBrokerId, Map<String, TopicMetrics> topicMetricsMap) {
|
||||
TopicMetadata topicMetadata = ClusterMetadataManager.getTopicMetaData(clusterId, topicName);
|
||||
if (topicMetadata == null) {
|
||||
return null;
|
||||
}
|
||||
if (filterBrokerId != -1 && !topicMetadata.getBrokerIdSet().contains(filterBrokerId)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
TopicOverviewDTO topicOverviewDTO = new TopicOverviewDTO();
|
||||
topicOverviewDTO.setClusterId(clusterId);
|
||||
topicOverviewDTO.setTopicName(topicName);
|
||||
topicOverviewDTO.setPartitionNum(topicMetadata.getPartitionNum());
|
||||
topicOverviewDTO.setReplicaNum(topicMetadata.getReplicaNum());
|
||||
topicOverviewDTO.setUpdateTime(topicMetadata.getModifyTime());
|
||||
|
||||
TopicMetrics topicMetrics = topicMetricsMap.get(topicName);
|
||||
if (topicMetrics != null) {
|
||||
topicOverviewDTO.setBytesInPerSec(topicMetrics.getBytesInPerSec());
|
||||
topicOverviewDTO.setProduceRequestPerSec(topicMetrics.getTotalProduceRequestsPerSec());
|
||||
} else {
|
||||
topicMetrics = jmxService.getSpecifiedTopicMetricsFromJmx(clusterId, topicName, TopicMetrics.getFieldNameList(MetricsType.TOPIC_FLOW_DETAIL), true);
|
||||
topicOverviewDTO.setBytesInPerSec(topicMetrics.getBytesInPerSec());
|
||||
topicOverviewDTO.setProduceRequestPerSec(topicMetrics.getTotalProduceRequestsPerSec());
|
||||
}
|
||||
return topicOverviewDTO;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* 从DataCollectorManager获取Topic的流量信息
|
||||
*/
|
||||
private Map<String, TopicMetrics> getTopicMetricsFromCache(Long clusterId) {
|
||||
List<TopicMetrics> topicMetricsList = KafkaMetricsCache.getTopicMetricsFromCache(clusterId);
|
||||
if (topicMetricsList == null) {
|
||||
return new HashMap<>();
|
||||
}
|
||||
|
||||
Map<String, TopicMetrics> result = new HashMap<>();
|
||||
for (TopicMetrics topicMetrics: topicMetricsList) {
|
||||
result.put(topicMetrics.getTopicName(), topicMetrics);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<PartitionOffsetDTO> getPartitionOffsetList(ClusterDO clusterDO, String topicName, Long timestamp) {
|
||||
TopicMetadata topicMetadata = ClusterMetadataManager.getTopicMetaData(clusterDO.getId(), topicName);
|
||||
if (topicMetadata == null) {
|
||||
return null;
|
||||
}
|
||||
Map<TopicPartition, Long> timestampsToSearch = new HashMap<>();
|
||||
for (Integer partitionId: topicMetadata.getPartitionMap().getPartitions().keySet()) {
|
||||
timestampsToSearch.put(new TopicPartition(topicName, partitionId), timestamp);
|
||||
}
|
||||
if (timestampsToSearch.isEmpty()) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
|
||||
List<PartitionOffsetDTO> partitionOffsetDTOList = new ArrayList<>();
|
||||
try {
|
||||
KafkaConsumer kafkaConsumer = KafkaClientCache.getApiKafkaConsumerClient(clusterDO);
|
||||
Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestampMap = kafkaConsumer.offsetsForTimes(timestampsToSearch);
|
||||
if (offsetAndTimestampMap == null) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
for (Map.Entry<TopicPartition, OffsetAndTimestamp> entry: offsetAndTimestampMap.entrySet()) {
|
||||
TopicPartition tp = entry.getKey();
|
||||
OffsetAndTimestamp offsetAndTimestamp = entry.getValue();
|
||||
partitionOffsetDTOList.add(new PartitionOffsetDTO(tp.partition(), offsetAndTimestamp.offset(), offsetAndTimestamp.timestamp()));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("getTopicOffsetList@TopicServiceImpl, get offset failed, clusterId:{} topicName:{} timestamp:{}.", clusterDO.getId(), topicName, timestamp, e);
|
||||
}
|
||||
return partitionOffsetDTOList;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> fetchTopicData(ClusterDO clusterDO, List<TopicPartition> topicPartitionList, int timeout, int maxMsgNum, long offset, boolean truncate) {
|
||||
KafkaConsumer kafkaConsumer = KafkaClientCache.getApiKafkaConsumerClient(clusterDO);
|
||||
if (kafkaConsumer == null) {
|
||||
return null;
|
||||
}
|
||||
if (offset == -1) {
|
||||
Map<TopicPartition, Long> topicPartitionLongMap = kafkaConsumer.endOffsets(topicPartitionList);
|
||||
Long tempOffset = topicPartitionLongMap.get(topicPartitionList.get(0));
|
||||
if (tempOffset == null) {
|
||||
return null;
|
||||
}
|
||||
offset = Math.max(tempOffset - maxMsgNum, 0);
|
||||
}
|
||||
return fetchTopicData(kafkaConsumer, topicPartitionList, offset, maxMsgNum, timeout, truncate);
|
||||
}
|
||||
|
||||
private List<String> fetchTopicData(KafkaConsumer kafkaConsumer, List<TopicPartition> topicPartitionList, long startOffset, int maxMsgNum, int timeout, boolean truncate) {
|
||||
List<String> result = new ArrayList<>();
|
||||
long timestamp = System.currentTimeMillis();
|
||||
|
||||
kafkaConsumer.assign(topicPartitionList);
|
||||
while (result.size() < maxMsgNum) {
|
||||
try {
|
||||
kafkaConsumer.seek(topicPartitionList.get(0), startOffset);
|
||||
ConsumerRecords<String, String> records = kafkaConsumer.poll(2000);
|
||||
for (ConsumerRecord record: records) {
|
||||
startOffset = record.offset();
|
||||
|
||||
String dataValue = (String) record.value();
|
||||
int maxLength = 2048;
|
||||
if (dataValue.length() > maxLength && truncate) {
|
||||
dataValue = dataValue.substring(0, maxLength);
|
||||
}
|
||||
|
||||
result.add(dataValue);
|
||||
}
|
||||
Thread.sleep(10);
|
||||
if (System.currentTimeMillis() - timestamp > timeout) {
|
||||
break;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("fetchTopicData@TopicServiceImpl, fetch failed, tp:{} offset:{}", topicPartitionList, startOffset, e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
package com.xiaojukeji.kafka.manager.service.service.impl;
|
||||
|
||||
import com.alibaba.fastjson.JSONObject;
|
||||
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.PartitionState;
|
||||
import com.xiaojukeji.kafka.manager.common.exception.ConfigException;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.zk.ZkConfigImpl;
|
||||
import com.xiaojukeji.kafka.manager.service.cache.ClusterMetadataManager;
|
||||
import com.xiaojukeji.kafka.manager.service.service.ZookeeperService;
|
||||
import com.xiaojukeji.kafka.manager.common.utils.zk.ZkPathUtil;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
@Service("zookeeperService")
|
||||
public class ZookeeperServiceImpl implements ZookeeperService {
|
||||
private final static Logger logger = LoggerFactory.getLogger(ZookeeperServiceImpl.class);
|
||||
|
||||
@Override
|
||||
public Properties getTopicProperties(Long clusterId, String topicName) throws ConfigException {
|
||||
ZkConfigImpl zkConfig = ClusterMetadataManager.getZKConfig(clusterId);
|
||||
String path = ZkPathUtil.getConfigTopicNode(topicName);
|
||||
Properties properties = zkConfig.get(path, Properties.class);
|
||||
JSONObject jsonObject = (JSONObject) properties.get("config");
|
||||
return JSONObject.parseObject(jsonObject.toJSONString(), Properties.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<PartitionState> getTopicPartitionState(Long clusterId, String topicName) throws ConfigException {
|
||||
ZkConfigImpl zkConfig = ClusterMetadataManager.getZKConfig(clusterId);
|
||||
Integer partitionNum = ClusterMetadataManager.getTopicMetaData(clusterId, topicName).getPartitionNum();
|
||||
|
||||
List<PartitionState> partitionStateList = new ArrayList<>();
|
||||
for (int partitionId = 0; partitionId < partitionNum; ++partitionId) {
|
||||
String partitionStatePath = ZkPathUtil.getBrokerTopicPartitionStatePath(topicName, partitionId);
|
||||
PartitionState partitionState = zkConfig.get(partitionStatePath, PartitionState.class);
|
||||
partitionState.setPartitionId(partitionId);
|
||||
partitionStateList.add(partitionState);
|
||||
}
|
||||
return partitionStateList;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getRetentionTime(Long clusterId, String topicName) {
|
||||
try {
|
||||
Properties properties = getTopicProperties(clusterId, topicName);
|
||||
if (properties == null || !properties.containsKey("retention.ms")) {
|
||||
return null;
|
||||
}
|
||||
if (properties.get("retention.ms") instanceof String) {
|
||||
return Long.valueOf(properties.getProperty("retention.ms"));
|
||||
} else if (properties.get("retention.ms") instanceof Integer) {
|
||||
return Long.valueOf((Integer)properties.get("retention.ms"));
|
||||
} else if (properties.get("retention.ms") instanceof Long) {
|
||||
return (Long) properties.get("retention.ms");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("get retentionTime failed, clusterId:{} topicName:{}.", clusterId, topicName, e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
package com.xiaojukeji.kafka.manager.service.utils;
|
||||
|
||||
import kafka.admin.BrokerMetadata;
|
||||
import scala.Option;
|
||||
import scala.collection.JavaConversions;
|
||||
import scala.collection.Seq;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Created by limeng on 2017/10/24
|
||||
*/
|
||||
public class BrokerMetadataUtil {
|
||||
|
||||
public static Seq<BrokerMetadata> convert2BrokerMetadata(Seq<Object> brokerListSeq) {
|
||||
|
||||
List<BrokerMetadata> brokerMetadataList = new ArrayList<>();
|
||||
scala.collection.Iterator<Object> brokerIter = brokerListSeq.iterator();
|
||||
while (brokerIter.hasNext()) {
|
||||
Integer brokerId = (Integer) brokerIter.next();
|
||||
BrokerMetadata brokerMetadata = new BrokerMetadata(brokerId, Option.<String>empty());
|
||||
brokerMetadataList.add(brokerMetadata);
|
||||
}
|
||||
return JavaConversions.asScalaBuffer(brokerMetadataList).toSeq();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,87 @@
|
||||
package com.xiaojukeji.kafka.manager.service.utils;
|
||||
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author arthur
|
||||
* @date 2017/7/30.
|
||||
*/
|
||||
public class ListUtils {
|
||||
private static final String REGEX = ",";
|
||||
|
||||
public static List<Integer> string2IntList(String str) {
|
||||
if (!StringUtils.hasText(str)) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
List<Integer> intList = new ArrayList<>();
|
||||
for (String elem :str.split(REGEX)) {
|
||||
if (!StringUtils.hasText(elem)) {
|
||||
continue;
|
||||
}
|
||||
intList.add(Integer.parseInt(elem));
|
||||
}
|
||||
return intList;
|
||||
}
|
||||
|
||||
public static List<String> string2StrList(String str) {
|
||||
if (!StringUtils.hasText(str)) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
List<String> strList = new ArrayList<>();
|
||||
for (String elem: str.split(REGEX)) {
|
||||
if (!StringUtils.hasText(elem)) {
|
||||
continue;
|
||||
}
|
||||
strList.add(elem);
|
||||
}
|
||||
return strList;
|
||||
}
|
||||
|
||||
public static String longList2String(List<Long> longList) {
|
||||
if (longList == null || longList.isEmpty()) {
|
||||
return "";
|
||||
}
|
||||
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (Long elem: longList) {
|
||||
if (elem == null) {
|
||||
continue;
|
||||
}
|
||||
sb.append(elem).append(REGEX);
|
||||
}
|
||||
return sb.length() > 0 ? sb.substring(0, sb.length() - 1) : sb.toString();
|
||||
}
|
||||
|
||||
public static String intList2String(List<Integer> intList) {
|
||||
if (intList == null || intList.isEmpty()) {
|
||||
return "";
|
||||
}
|
||||
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (Integer elem: intList) {
|
||||
if (elem == null) {
|
||||
continue;
|
||||
}
|
||||
sb.append(elem).append(REGEX);
|
||||
}
|
||||
return sb.length() > 0 ? sb.substring(0, sb.length() - 1) : sb.toString();
|
||||
}
|
||||
|
||||
public static String strList2String(List<String> strList) {
|
||||
if (strList == null || strList.isEmpty()) {
|
||||
return "";
|
||||
}
|
||||
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (String elem: strList) {
|
||||
if (!StringUtils.hasText(elem)) {
|
||||
continue;
|
||||
}
|
||||
sb.append(elem).append(REGEX);
|
||||
}
|
||||
return sb.length() > 0 ? sb.substring(0, sb.length() - 1) : sb.toString();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
package com.xiaojukeji.kafka.manager.service.utils;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.Arrays;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* 对象成员相加的工具类
|
||||
*
|
||||
* @author tukun on 2015/11/11.
|
||||
*/
|
||||
public class ObjectUtil {
|
||||
|
||||
/**
|
||||
* 对象o1的包含filterName的成员值与o2的成员值相加
|
||||
* 目前只支持Double类型的值相加
|
||||
* @param o1
|
||||
* @param o2
|
||||
* @param filterName
|
||||
*/
|
||||
public static void add(Object o1, Object o2, String filterName) {
|
||||
if (!o1.getClass().equals(o2.getClass())) {
|
||||
return;
|
||||
}
|
||||
Field[] fields = o1.getClass().getDeclaredFields();
|
||||
List<Field> fieldList = new LinkedList<Field>(Arrays.asList(fields));
|
||||
Class superClass = o1.getClass().getSuperclass();
|
||||
while (!"Object".equals(superClass.getSimpleName())) {
|
||||
Field[] superFields = superClass.getDeclaredFields();
|
||||
fieldList.addAll(Arrays.asList(superFields));
|
||||
superClass = superClass.getSuperclass();
|
||||
}
|
||||
try {
|
||||
for (Field field : fieldList) {
|
||||
field.setAccessible(true);
|
||||
String fieldName = field.getName();
|
||||
if (fieldName.contains(filterName)) {
|
||||
field.set(o1, (Double) field.get(o1) + (Double) field.get(o2));
|
||||
}
|
||||
}
|
||||
} catch (IllegalAccessException e) {
|
||||
|
||||
} catch (Exception e) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,80 @@
|
||||
package com.xiaojukeji.kafka.manager.service.utils;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.DisposableBean;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ApplicationContextAware;
|
||||
import org.springframework.context.annotation.Lazy;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.web.context.request.RequestContextHolder;
|
||||
import org.springframework.web.context.request.ServletRequestAttributes;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpSession;
|
||||
|
||||
/**
|
||||
* 以静态变量保存Spring ApplicationContext, 可在任何代码任何地方任何时候取出ApplicationContext
|
||||
* @author huangyiminghappy@163.com
|
||||
* @date 2019-05-08
|
||||
*/
|
||||
@Service
|
||||
@Lazy(false)
|
||||
public class SpringContextHolder implements ApplicationContextAware, DisposableBean {
|
||||
private static ApplicationContext applicationContext = null;
|
||||
private static Logger logger = LoggerFactory.getLogger(SpringContextHolder.class);
|
||||
|
||||
/**
|
||||
* 去的存储在静态变量中的ApplicationContext
|
||||
*/
|
||||
public static ApplicationContext getApplicationContext() {
|
||||
return applicationContext;
|
||||
}
|
||||
|
||||
/**
|
||||
* 从静态变量applicationContext中去的Bean,自动转型为所复制对象的类型
|
||||
*/
|
||||
public static <T> T getBean(String name) {
|
||||
return (T) applicationContext.getBean(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* 从静态变量applicationContext中去的Bean,自动转型为所复制对象的类型
|
||||
*/
|
||||
public static <T> T getBean(Class<T> requiredType) {
|
||||
return (T) applicationContext.getBean(requiredType);
|
||||
}
|
||||
|
||||
/**
|
||||
* 清除SpringContextHolder中的ApplicationContext为Null
|
||||
*/
|
||||
public static void clearHolder() {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("清除SpringContextHolder中的ApplicationContext:" + applicationContext);
|
||||
}
|
||||
applicationContext = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* 实现ApplicationContextAware接口,注入Context到静态变量
|
||||
*/
|
||||
@Override
|
||||
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
|
||||
SpringContextHolder.applicationContext = applicationContext;
|
||||
}
|
||||
|
||||
/**
|
||||
* 实现DisposableBean接口,在Context关闭时清理静态变量
|
||||
*/
|
||||
@Override
|
||||
public void destroy() throws Exception {
|
||||
SpringContextHolder.clearHolder();
|
||||
}
|
||||
|
||||
public static String getUserName(){
|
||||
HttpServletRequest request = ((ServletRequestAttributes) RequestContextHolder.getRequestAttributes()).getRequest();
|
||||
HttpSession session = request.getSession();
|
||||
return (String) session.getAttribute("username");
|
||||
}
|
||||
}
|
||||
3
service/src/test/java/META-INF/MANIFEST.MF
Normal file
3
service/src/test/java/META-INF/MANIFEST.MF
Normal file
@@ -0,0 +1,3 @@
|
||||
Manifest-Version: 1.0
|
||||
Class-Path:
|
||||
|
||||
@@ -0,0 +1,47 @@
|
||||
package com.xiaojukeji.kafka.manager.service;
|
||||
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.FutureTask;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
public class FutureTest {
|
||||
|
||||
@Test
|
||||
public void test() throws InterruptedException, ExecutionException {
|
||||
|
||||
FutureTask<Integer> f1 = new FutureTask<Integer>(new Callable<Integer>() {
|
||||
|
||||
@Override
|
||||
public Integer call() throws InterruptedException {
|
||||
Thread.sleep(1000L);
|
||||
return 1;
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
FutureTask<Integer> f2 = new FutureTask<Integer>(new Callable<Integer>() {
|
||||
|
||||
@Override
|
||||
public Integer call() throws InterruptedException {
|
||||
Thread.sleep(1000L);
|
||||
return 2;
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
ExecutorService threadPool = Executors.newCachedThreadPool();
|
||||
|
||||
long ct = System.currentTimeMillis();
|
||||
|
||||
threadPool.submit(f1);
|
||||
threadPool.submit(f2);
|
||||
threadPool.shutdown();
|
||||
|
||||
System.out.println(f1.get() + " : " + f2.get() + " use:"
|
||||
+ (System.currentTimeMillis() - ct));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
package com.xiaojukeji.kafka.manager.service.utils;
|
||||
|
||||
import org.junit.runner.RunWith;
|
||||
import org.springframework.test.context.ContextConfiguration;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
|
||||
/**
|
||||
* Created by arthur on 2017/5/31.
|
||||
*/
|
||||
@RunWith(SpringJUnit4ClassRunner.class)
|
||||
@ContextConfiguration(locations = { "classpath:biz-test.xml" })
|
||||
public class SpringTestBase {
|
||||
}
|
||||
Reference in New Issue
Block a user