初始化3.0.0版本

This commit is contained in:
zengqiao
2022-08-18 17:04:05 +08:00
parent 462303fca0
commit 51832385b1
2446 changed files with 93177 additions and 127211 deletions

61
km-persistence/pom.xml Normal file
View File

@@ -0,0 +1,61 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>km-persistence</artifactId>
<version>${km.revision}</version>
<packaging>jar</packaging>
<parent>
<artifactId>km</artifactId>
<groupId>com.xiaojukeji.kafka</groupId>
<version>${km.revision}</version>
</parent>
<properties>
<!-- maven properties -->
<maven.test.skip>true</maven.test.skip>
<downloadSources>true</downloadSources>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<file_encoding>UTF-8</file_encoding>
</properties>
<dependencies>
<dependency>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>km-common</artifactId>
<version>${project.parent.version}</version>
</dependency>
<dependency>
<groupId>org.mariadb.jdbc</groupId>
<artifactId>mariadb-java-client</artifactId>
</dependency>
<dependency>
<groupId>io.github.zqrferrari</groupId>
<artifactId>logi-elasticsearch-client</artifactId>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
</dependency>
<dependency>
<groupId>com.github.ben-manes.caffeine</groupId>
<artifactId>caffeine</artifactId>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.13</artifactId>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,43 @@
package com.xiaojukeji.know.streaming.km.persistence;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.event.cluster.ClusterPhyLoadChangedEvent;
import org.springframework.context.ApplicationListener;
import java.util.concurrent.locks.ReentrantLock;
/**
* 加载的集群发生变化的处理类
* @author zengqiao
* @date 22/02/25
*/
public abstract class AbstractClusterLoadedChangedHandler implements ApplicationListener<ClusterPhyLoadChangedEvent> {
private static final ILog log = LogFactory.getLog(AbstractClusterLoadedChangedHandler.class);
protected final ReentrantLock modifyClientMapLock = new ReentrantLock();
protected abstract void add(ClusterPhy clusterPhy);
protected abstract void modify(ClusterPhy newClusterPhy, ClusterPhy oldClusterPhy);
protected abstract void remove(ClusterPhy clusterPhy);
@Override
public void onApplicationEvent(ClusterPhyLoadChangedEvent event) {
switch (event.getOperationEnum()) {
case ADD:
this.add(event.getInDBClusterPhy());
break;
case EDIT:
this.modify(event.getInDBClusterPhy(), event.getInCacheClusterPhy());
break;
case DELETE:
this.remove(event.getInCacheClusterPhy());
break;
default:
log.error("method=onApplicationEvent||event={}||msg=illegal event", event);
}
}
}

View File

@@ -0,0 +1,40 @@
package com.xiaojukeji.know.streaming.km.persistence.cache;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* @author zengqiao
* @date 22/02/25
*/
public class LoadedClusterPhyCache {
/**
* <物理集群ID, 物理集群PO>
*/
private static final Map<Long, ClusterPhy> PHY_CLUSTER_MAP = new ConcurrentHashMap<>();
private LoadedClusterPhyCache() {
}
public static boolean containsByPhyId(Long clusterPhyId) {
return PHY_CLUSTER_MAP.containsKey(clusterPhyId);
}
public static ClusterPhy getByPhyId(Long clusterPhyId) {
return PHY_CLUSTER_MAP.get(clusterPhyId);
}
public static ClusterPhy remove(Long clusterPhyId) {
return PHY_CLUSTER_MAP.remove(clusterPhyId);
}
public static void replace(ClusterPhy clusterPhy) {
PHY_CLUSTER_MAP.put(clusterPhy.getId(), clusterPhy);
}
public static Map<Long, ClusterPhy> listAll() {
return PHY_CLUSTER_MAP;
}
}

View File

@@ -0,0 +1,25 @@
package com.xiaojukeji.know.streaming.km.persistence.es;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslLoaderUtil;
import org.springframework.beans.factory.annotation.Autowired;
/**
* 直接操作es集群的dao
*/
public class BaseESDAO {
protected static final ILog LOGGER = LogFactory.getLog("ES_LOGGER");
/**
* 加载查询语句工具类
*/
@Autowired
protected DslLoaderUtil dslLoaderUtil;
/**
* Arius操作es集群的client
*/
@Autowired
protected ESOpClient esOpClient;
}

View File

@@ -0,0 +1,450 @@
package com.xiaojukeji.know.streaming.km.persistence.es;
import com.alibaba.fastjson.JSON;
import com.didiglobal.logi.elasticsearch.client.ESClient;
import com.didiglobal.logi.elasticsearch.client.gateway.document.ESIndexRequest;
import com.didiglobal.logi.elasticsearch.client.gateway.document.ESIndexResponse;
import com.didiglobal.logi.elasticsearch.client.model.type.ESVersion;
import com.didiglobal.logi.elasticsearch.client.request.batch.BatchNode;
import com.didiglobal.logi.elasticsearch.client.request.batch.BatchType;
import com.didiglobal.logi.elasticsearch.client.request.batch.ESBatchRequest;
import com.didiglobal.logi.elasticsearch.client.request.query.query.ESQueryRequest;
import com.didiglobal.logi.elasticsearch.client.response.batch.ESBatchResponse;
import com.didiglobal.logi.elasticsearch.client.response.batch.IndexResultItemNode;
import com.didiglobal.logi.elasticsearch.client.response.query.query.ESQueryResponse;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.google.common.collect.Lists;
import com.xiaojukeji.know.streaming.km.common.bean.po.BaseESPO;
import com.xiaojukeji.know.streaming.km.common.constant.ESConstant;
import com.xiaojukeji.know.streaming.km.common.utils.EnvUtil;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.HttpStatus;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import javax.annotation.Nullable;
import javax.annotation.PostConstruct;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
@Component
public class ESOpClient {
private static final ILog LOGGER = LogFactory.getLog("ES_LOGGER");
/**
* es 地址
*/
@Value("${es.client.address}")
private String esAddress;
/**
* es 访问密码
*/
@Value("${es.client.pass:''}")
private String esPass;
/**
* 客户端个数
*/
private static final int ES_CLIENT_COUNT = 30;
private static final int MAX_RETRY_CNT = 5;
private static final int ES_IO_THREAD_COUNT = 4;
/**
* 更新es数据的客户端连接队列
*/
private LinkedBlockingQueue<ESClient> esClientPool = new LinkedBlockingQueue<>( ES_CLIENT_COUNT );
@PostConstruct
public void init(){
for (int i = 0; i < ES_CLIENT_COUNT; ++i) {
ESClient esClient = buildEsClient(esAddress, esPass, "", "");
if (esClient != null) {
this.esClientPool.add(esClient);
LOGGER.info("class=ESOpClient||method=init||msg=add new es client {}", esAddress);
}
}
}
/**
* 从更新es http 客户端连接池找那个获取
*
* @return
*/
public ESClient getESClientFromPool() {
ESClient esClient = null;
int retryCount = 0;
// 如果esClient为空或者重试次数小于5次循环获取
while (esClient == null && retryCount < 5) {
try {
++retryCount;
esClient = esClientPool.poll(3, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
if (esClient == null) {
LOGGER.error( "class=ESOpClient||method=getESClientFromPool||errMsg=fail to get es client from pool");
}
return esClient;
}
/**
* 归还到es http 客户端连接池
*
* @param esClient
*/
public void returnESClientToPool(ESClient esClient) {
try {
this.esClientPool.put(esClient);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
/**
* 查询并获取第一个元素
*
* @param indexName
* @param queryDsl
* @param clzz
* @param <T>
* @return
*/
public <T> T performRequestAndTakeFirst(String indexName, String queryDsl, Class<T> clzz) {
List<T> hits = performRequest(indexName, queryDsl, clzz);
if (CollectionUtils.isEmpty(hits)) {
return null;
}
return hits.get(0);
}
/**
* 查询并获取第一个元素
*
* @param indexName
* @param queryDsl
* @param clazz
* @param <T>
* @return
*/
public <T> T performRequestAndTakeFirst(String routingValue, String indexName,
String queryDsl, Class<T> clazz) {
List<T> hits = performRequestWithRouting(routingValue, indexName, queryDsl, clazz);
if (CollectionUtils.isEmpty(hits)) {return null;}
return hits.get(0);
}
/**
* 执行查询
*
* @param indexName
* @param queryDsl
* @return
* @throws IOException
*/
public ESQueryResponse performRequest(String indexName,String queryDsl) {
return doQuery(new ESQueryRequest().indices(indexName).source(queryDsl));
}
public <R> R performRequest(String indexName, String queryDsl, Function<ESQueryResponse, R> func, int tryTimes) {
ESQueryResponse esQueryResponse;
do {
esQueryResponse = doQuery(new ESQueryRequest().indices(indexName).source(queryDsl));
} while (tryTimes-- > 0 && null == esQueryResponse);
return func.apply(esQueryResponse);
}
public <T> List<T> performRequest(String indexName, String queryDsl, Class<T> clzz) {
ESQueryResponse esQueryResponse = doQuery(
new ESQueryRequest().indices(indexName).source(queryDsl).clazz(clzz));
if (esQueryResponse == null) {
return new ArrayList<>();
}
List<Object> objectList = esQueryResponse.getSourceList();
if (CollectionUtils.isEmpty(objectList)) {
return new ArrayList<>();
}
List<T> hits = Lists.newLinkedList();
for (Object obj : objectList) {
hits.add((T) obj);
}
return hits;
}
public <T> List<T> performRequestWithRouting(String routingValue, String indexName, String queryDsl, Class<T> clzz) {
ESQueryResponse esQueryResponse = doQuery(
new ESQueryRequest().routing(routingValue).indices(indexName).source(queryDsl).clazz(clzz));
if (esQueryResponse == null) {
return new ArrayList<>();
}
List<Object> objectList = esQueryResponse.getSourceList();
if (CollectionUtils.isEmpty(objectList)) {
return new ArrayList<>();
}
List<T> hits = Lists.newLinkedList();
for (Object obj : objectList) {
hits.add((T) obj);
}
return hits;
}
public <R> R performRequestWithRouting(String routingValue, String indexName,
String queryDsl, Function<ESQueryResponse, R> func, int tryTimes) {
ESQueryResponse esQueryResponse;
do {
esQueryResponse = doQuery(new ESQueryRequest().routing(routingValue).indices(indexName).source(queryDsl));
} while (tryTimes-- > 0 && null == esQueryResponse);
return func.apply(esQueryResponse);
}
/**
* 写入单条
*
* @param source
* @return
*/
public boolean index(String indexName, String id, String source) {
ESClient esClient = null;
ESIndexResponse response = null;
try {
esClient = getESClientFromPool();
if (esClient == null) {
return false;
}
ESIndexRequest esIndexRequest = new ESIndexRequest();
esIndexRequest.setIndex(indexName);
esIndexRequest.source(source);
esIndexRequest.id(id);
for (int i = 0; i < MAX_RETRY_CNT; ++i) {
response = esClient.index(esIndexRequest).actionGet(10, TimeUnit.SECONDS);
if (response == null) {
continue;
}
return response.getRestStatus().getStatus() == HttpStatus.SC_OK
|| response.getRestStatus().getStatus() == HttpStatus.SC_CREATED;
}
} catch (Exception e) {
LOGGER.warn(
"class=ESOpClient||method=index||indexName={}||id={}||source={}||errMsg=index doc error. ",
indexName, id, source, e);
if (response != null) {
LOGGER.warn(
"class=ESOpClient||method=index||indexName={}||id={}||source={}||errMsg=response {}",
indexName, id, source, JSON.toJSONString(response));
}
} finally {
if (esClient != null) {
returnESClientToPool(esClient);
}
}
return false;
}
/**
* 批量写入
*
* @param indexName
* @return
*/
public boolean batchInsert(String indexName, List<? extends BaseESPO> pos) {
if (CollectionUtils.isEmpty(pos)) {
return true;
}
ESClient esClient = null;
ESBatchResponse response = null;
try {
esClient = getESClientFromPool();
if (esClient == null) {
return false;
}
ESBatchRequest batchRequest = new ESBatchRequest();
for (BaseESPO po : pos) {
//write with routing
if (null != po.getRoutingValue()) {
BatchNode batchNode = new BatchNode(BatchType.INDEX, indexName, null, po.getKey(), JSON.toJSONString(po));
batchNode.setRouting(po.getRoutingValue());
batchNode.setEsVersion(ESVersion.ES760);
batchRequest.addNode(batchNode);
continue;
}
//write without routing
batchRequest.addNode(BatchType.INDEX, indexName, null, po.getKey(), JSON.toJSONString(po));
}
for (int i = 0; i < MAX_RETRY_CNT; ++i) {
response = esClient.batch(batchRequest).actionGet(2, TimeUnit.MINUTES);
if (response == null) {continue;}
if (handleErrorResponse(indexName, pos, response)) {return false;}
return response.getRestStatus().getStatus() == HttpStatus.SC_OK && !response.getErrors();
}
} catch (Exception e) {
LOGGER.warn(
"method=batchInsert||indexName={}||errMsg=batch insert error. ", indexName, e);
if (response != null) {
LOGGER.warn("method=batchInsert||indexName={}||errMsg=response {}", indexName, JSON.toJSONString(response));
}
} finally {
if (esClient != null) {
returnESClientToPool(esClient);
}
}
return false;
}
/**************************************************** private method ****************************************************/
/**
* 执行查询
* @param request
* @return
*/
@Nullable
private ESQueryResponse doQuery(ESQueryRequest request) {
ESClient esClient = null;
try {
esClient = getESClientFromPool();
ESQueryResponse response = esClient.query(request).actionGet(120, TimeUnit.SECONDS);
if(!EnvUtil.isOnline()){
LOGGER.info("method=doQuery||indexName={}||queryDsl={}||ret={}",
request.indices(), bytesReferenceConvertDsl(request.source()), JSON.toJSONString(response));
}
return response;
} catch (Exception e) {
LOGGER.error( "method=doQuery||indexName={}||queryDsl={}||errMsg=query error. ",
request.indices(), bytesReferenceConvertDsl(request.source()), e);
return null;
}finally {
if (esClient != null) {
returnESClientToPool(esClient);
}
}
}
private boolean handleErrorResponse(String indexName, List<? extends BaseESPO> pos, ESBatchResponse response) {
if (response.getErrors().booleanValue()) {
int errorItemIndex = 0;
if (CollectionUtils.isNotEmpty(response.getItems())) {
for (IndexResultItemNode item : response.getItems()) {
recordErrorResponseItem(indexName, pos, errorItemIndex++, item);
}
}
return true;
}
return false;
}
private void recordErrorResponseItem(String indexName, List<? extends BaseESPO> pos, int errorItemIndex, IndexResultItemNode item) {
if (item.getIndex() != null && item.getIndex().getShards() != null
&& CollectionUtils.isNotEmpty(item.getIndex().getShards().getFailures())) {
LOGGER.warn(
"class=ESOpClient||method=batchInsert||indexName={}||errMsg=Failures: {}, content: {}",
indexName, item.getIndex().getShards().getFailures().toString(),
JSON.toJSONString(pos.get(errorItemIndex)));
}
if (item.getIndex() != null && item.getIndex().getError() != null) {
LOGGER.warn(
"class=ESOpClient||method=batchInsert||indexName={}||errMsg=Error: {}, content: {}",
indexName, item.getIndex().getError().getReason(),
JSON.toJSONString(pos.get(errorItemIndex)));
}
}
/**
* 转换dsl语句
*
* @param bytes
* @return
*/
private String bytesReferenceConvertDsl(BytesReference bytes) {
try {
return XContentHelper.convertToJson(bytes, false);
} catch (IOException e) {
LOGGER.warn("class=ESOpClient||method=bytesReferenceConvertDsl||errMsg=fail to covert", e);
}
return "";
}
private ESClient buildEsClient(String address,String password,String clusterName, String version) {
if (StringUtils.isBlank(address)) {
return null;
}
ESClient esClient = new ESClient();
try {
esClient.addTransportAddresses(address);
if(StringUtils.isNotBlank(clusterName)) {
esClient.setClusterName(clusterName);
}
if(StringUtils.isNotBlank(version)){
esClient.setEsVersion(version);
}
if(StringUtils.isNotBlank(password)){
esClient.setPassword(password);
}
if(ES_IO_THREAD_COUNT > 0) {
esClient.setIoThreadCount( ES_IO_THREAD_COUNT );
}
// 配置http超时
esClient.setRequestConfigCallback(builder -> builder.setConnectTimeout(10000).setSocketTimeout(120000)
.setConnectionRequestTimeout(120000));
esClient.start();
return esClient;
} catch (Exception e) {
esClient.close();
LOGGER.error("class=ESESOpClient||method=buildEsClient||errMsg={}||address={}", e.getMessage(), address,
e);
return null;
}
}
}

View File

@@ -0,0 +1,89 @@
package com.xiaojukeji.know.streaming.km.persistence.es;
import com.xiaojukeji.know.streaming.km.common.exception.BaseException;
import com.xiaojukeji.know.streaming.km.common.exception.ESOperateException;
import com.xiaojukeji.know.streaming.km.common.utils.RetryExecutor;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException;
import java.util.function.Function;
/**
* es操作器
* 1、操作返回false,这里直接返回false
* 2、超时重试对应的次数后,返回false
* 3、操作抛出异常(非超时异常), 抛异常
* 4、操作返回true,这里直接返回true
*
* @author d06679
* @date 2017/8/24
*/
public class ESOpTimeoutRetry {
private static final int SEC_30 = 30 * 1000;
private static final int MIN_5 = 5 * 60 * 1000;
private ESOpTimeoutRetry(){}
public static boolean esRetryExecute(String methodName, int tryCount,
RetryExecutor.Handler handler) throws ESOperateException {
try {
return RetryExecutor.builder().name(methodName).retryCount(tryCount).handler(new RetryExecutor.Handler() {
@Override
public boolean process() throws BaseException {
return handler.process();
}
@Override
public boolean needRetry(Exception e) {
return e instanceof ProcessClusterEventTimeoutException
|| e instanceof ElasticsearchTimeoutException;
}
@Override
public int retrySleepTime(int retryTims){
int sleepTime = retryTims * SEC_30;
int randomSleepTime = (int)(Math.random() * 100);
int totalSleepTime = sleepTime + randomSleepTime;
return totalSleepTime > MIN_5 ? MIN_5 : totalSleepTime;
}
}).execute();
} catch (Exception e) {
throw new ESOperateException(e.getMessage(), e);
}
}
/**
* 定制重试方法等待的时间
* @param methodName 方法名称
* @param tryCount 重试次数
* @param handler 重试的操作
* @param retrySleepTime 重试间隔的等待时间
* @return 整个重试方法执行的结果
* @throws ESOperateException 抛异常
*/
public static boolean esRetryExecuteWithGivenTime(String methodName, int tryCount,
RetryExecutor.Handler handler, Function<Integer, Integer> retrySleepTime) throws ESOperateException {
try {
return RetryExecutor.builder().name(methodName).retryCount(tryCount).handler(new RetryExecutor.Handler() {
@Override
public boolean process() throws BaseException {
return handler.process();
}
@Override
public boolean needRetry(Exception e) {
return e instanceof ProcessClusterEventTimeoutException
|| e instanceof ElasticsearchTimeoutException;
}
@Override
public int retrySleepTime(int retryTimes) {
return retrySleepTime.apply(retryTimes);
}
}).execute();
} catch (Exception e) {
throw new ESOperateException(e.getMessage(), e);
}
}
}

View File

@@ -0,0 +1,364 @@
package com.xiaojukeji.know.streaming.km.persistence.es.dao;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.didiglobal.logi.elasticsearch.client.response.query.query.ESQueryResponse;
import com.didiglobal.logi.elasticsearch.client.response.query.query.aggs.ESAggr;
import com.google.common.collect.Maps;
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.*;
import com.xiaojukeji.know.streaming.km.common.bean.po.BaseESPO;
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.BaseMetricESPO;
import com.xiaojukeji.know.streaming.km.common.enums.metric.KafkaMetricIndexEnum;
import com.xiaojukeji.know.streaming.km.common.utils.IndexNameUtils;
import com.xiaojukeji.know.streaming.km.persistence.es.BaseESDAO;
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslsConstant;
import lombok.NoArgsConstructor;
import org.springframework.util.CollectionUtils;
import java.util.*;
import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.*;
@NoArgsConstructor
public class BaseMetricESDAO extends BaseESDAO {
/**
* 操作的索引名称
*/
protected String indexName;
protected static final Long ONE_MIN = 60 * 1000L;
protected static final Long FIVE_MIN = 5 * ONE_MIN;
protected static final Long ONE_HOUR = 60 * ONE_MIN;
protected static final Long ONE_DAY = 24 * ONE_HOUR;
/**
* 不同维度 kafka 监控数据
*/
private static Map<KafkaMetricIndexEnum, BaseMetricESDAO> ariusStatsEsDaoMap = Maps
.newConcurrentMap();
public static BaseMetricESDAO getByStatsType(KafkaMetricIndexEnum statsType) {
return ariusStatsEsDaoMap.get(statsType);
}
/**
* 注册不同维度数据对应操作的es类
*
* @param statsType
* @param baseAriusStatsEsDao
*/
public static void register(KafkaMetricIndexEnum statsType, BaseMetricESDAO baseAriusStatsEsDao) {
ariusStatsEsDaoMap.put(statsType, baseAriusStatsEsDao);
}
/**
* 批量插入索引统计信息
* @param statsInfo
* @return
*/
public boolean batchInsertStats(List<? extends BaseESPO> statsInfo) {
String realIndex = IndexNameUtils.genCurrentDailyIndexName(indexName);
return esOpClient.batchInsert(realIndex, statsInfo);
}
public String realIndex(long startTime, long endTime){
return IndexNameUtils.genDailyIndexName(indexName, startTime, endTime);
}
/**
* 构建如下dsl语句
* {
* "FIELD": {
* "order": "desc"
* }
* }
*/
public String buildSortDsl(SearchSort sort, SearchSort def){
if(null == sort || !sort.valid()){sort = def;}
Map<String, String> orderMap = new HashMap<>();
orderMap.put(ORDER, sort.isDesc() ? DESC : ASC);
String fieldName = sort.isMetric() ? METRICS_DOT + sort.getQueryName() : sort.getQueryName();
Map<String, Map<String, String>> fieldMap = new HashMap<>();
fieldMap.put(fieldName, orderMap);
return JSON.toJSONString(fieldMap);
}
/**
* 构建如下dsl语句
* {
* "range": {
* "timestamp": {
* "gte": 1646984254883,
* "lte": 1646987854883
* }
* }
* }
*}
*/
public String buildRangeDsl(SearchRange range){
if(null == range || !range.valid()){return "";}
Map<String, Float> glTEMap = new HashMap<>();
glTEMap.put(GTE, range.getMin());
glTEMap.put(LTE, range.getMax());
String fieldName = range.getRealMetricName();
Map<String, Map<String, Float>> rangeKeyMap = new HashMap<>();
rangeKeyMap.put(fieldName, glTEMap);
Map<String, Map<String, Map<String, Float>>> rangeMap = new HashMap<>();
rangeMap.put(RANGE, rangeKeyMap);
return JSON.toJSONString(rangeMap);
}
/**
* 构建如下dsl语句
* {
* "match": {
* "groupName": "g-know-streaming-123456"
* }
* },
* {
* "match": {
* "groupName": "g-know-streaming-123456"
* }
* }
*/
public String buildMatchDsl(List<SearchTerm> matches){
if(CollectionUtils.isEmpty(matches)){return "";}
List<Map<String, Map<String, Object>>> list = new ArrayList<>();
for(SearchTerm match : matches){
if(null == match || !match.valid()){continue;}
String fieldName = match.getRealMetricName();
Map<String, Object> matchItem = new HashMap<>();
matchItem.put(fieldName, match.getQueryValue());
Map<String, Map<String, Object>> matchMap = new HashMap<>();
matchMap.put(MATCH, matchItem);
list.add(matchMap);
}
String json = JSON.toJSONString(list);
return json.substring(1, json.length() - 1);
}
/**
* {
* "term": {
* "clusterPhyId": {
* "value": 2
* }
* }
* },
* {
* "term": {
* "timestamp": {
* "value": 1649845260000
* }
* }
* }
* @param terms
* @return
*/
public String buildTermsDsl(List<SearchTerm> terms){
if(CollectionUtils.isEmpty(terms)){return "";}
List<String> termStrList = new ArrayList<>();
for(SearchTerm match : terms){
if(null == match || !match.valid()){continue;}
String fieldName = match.getRealMetricName();
termStrList.add(buildTermDsl(fieldName, match.getQueryValue()));
}
if(CollectionUtils.isEmpty(termStrList)){return "";}
return String.join(",", termStrList);
}
/**
* {
* "bool": {
* "should": [
* {
* "term": {
* "clusterPhyId": {
* "value": "1"
* }
* }
* },
* {
* "term": {
* "clusterPhyId": {
* "value": "2"
* }
* }
* }
* ]
* }
* }
* @param shoulds
* @return
*/
public String buildShouldDsl(List<SearchShould> shoulds){
if(CollectionUtils.isEmpty(shoulds)){return "";}
List<String> fieldValueStrList = new ArrayList<>();
for(SearchShould should : shoulds){
if(null == should || !should.valid()){return "";}
String fieldName = should.getRealMetricName();
for(Object fieldValue : should.getQueryValues()){
fieldValueStrList.add(buildTermDsl(fieldName, fieldValue));
}
}
String format = "{\"bool\":{\"should\": [%s]}}";
String fieldValueStr = String.join(",", fieldValueStrList);
return String.format(format, fieldValueStr);
}
public String buildShouldDsl(SearchShould should){
return buildShouldDsl( Arrays.asList(should));
}
protected String buildTermDsl(String fieldName, Object fieldValue){
Map<String, Object> valueItem = new HashMap<>();
valueItem.put(VALUE, fieldValue);
Map<String, Map<String, Object>> filedMap = new HashMap<>();
filedMap.put(fieldName, valueItem);
Map<String, Map<String, Map<String, Object>>> termMap = new HashMap<>();
termMap.put(TERM, filedMap);
return JSON.toJSONString(termMap);
}
public String buildPrefixDsl(SearchFuzzy fuzzy){
if(null == fuzzy || !fuzzy.valid()){return "";}
String fieldName = fuzzy.getRealMetricName();
Map<String, Object> valueItem = new HashMap<>();
valueItem.put(VALUE, fuzzy.getQueryValue());
Map<String, Map<String, Object>> filedMap = new HashMap<>();
filedMap.put(fieldName, valueItem);
Map<String, Map<String, Map<String, Object>>> termMap = new HashMap<>();
termMap.put(PREFIX, filedMap);
return JSON.toJSONString(termMap);
}
public String buildAggsDSL(List<String> metrics, String aggType) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < metrics.size(); i++) {
String metricName = metrics.get(i);
Map<String, String> aggsSubSubCellMap = Maps.newHashMap();
aggsSubSubCellMap.put(FIELD, METRICS_DOT + metricName);
buildAggsDslMap(aggType, sb, metricName, aggsSubSubCellMap);
if (i != metrics.size() - 1) {
sb.append(",").append("\n");
}
}
return sb.toString();
}
public void buildAggsDslMap(String aggType, StringBuilder sb, String metricName,
Map<String, String> aggsSubSubCellMap) {
Map<String, Object> aggsSubCellMap = Maps.newHashMap();
aggsSubCellMap.put(aggType, aggsSubSubCellMap);
Map<String, Object> aggsCellMap = Maps.newHashMap();
aggsCellMap.put(metricName, aggsSubCellMap);
JSONObject jsonObject = new JSONObject(aggsCellMap);
String str = jsonObject.toJSONString();
sb.append(str, 1, str.length() - 1);
}
protected Map<String, ESAggr> checkBucketsAndHitsOfResponseAggs(ESQueryResponse response){
if(null == response || null == response.getAggs()){
return null;
}
Map<String, ESAggr> esAggrMap = response.getAggs().getEsAggrMap();
if (null == esAggrMap || null == esAggrMap.get(HIST)) {
return null;
}
if(CollectionUtils.isEmpty(esAggrMap.get(HIST).getBucketList())){
return null;
}
return esAggrMap;
}
protected int handleESQueryResponseCount(ESQueryResponse response){
if(null == response || null == response.getHits()
|| null ==response.getHits().getUnusedMap()){return -1;}
return Integer.valueOf(response.getHits().getUnusedMap().getOrDefault(TOTAL, 0).toString());
}
protected <T extends BaseMetricESPO> T filterMetrics(T t, List<String> metricNames){
t.setMetrics(t.getMetrics(metricNames));
return t;
}
protected <T extends BaseMetricESPO> List<T> filterMetrics(List<T> ts, List<String> metricNames){
ts.stream().forEach(t -> filterMetrics(t, metricNames));
return ts;
}
/**
* 获取有数据的第一个时间点
*/
protected Long getLatestMetricTime() {
return getLatestMetricTime("");
}
protected Long getLatestMetricTime(Long clusterId) {
String appendQueryDsl = buildTermDsl("clusterPhyId" ,clusterId);
return getLatestMetricTime(appendQueryDsl);
}
protected Long getLatestMetricTime(Long clusterId, String appendQueryDsl) {
String clusterQueryDsl = buildTermDsl("clusterPhyId" ,clusterId);
return getLatestMetricTime(appendQueryDsl + "," + clusterQueryDsl);
}
protected Long getLatestMetricTime(String appendQueryDsl) {
Long endTime = System.currentTimeMillis();
Long startTime = endTime - 12 * ONE_HOUR;
String dsl = dslLoaderUtil.getFormatDslByFileName(DslsConstant.GET_LATEST_METRIC_TIME, startTime, endTime, appendQueryDsl);
String realIndexName = IndexNameUtils.genDailyIndexName(indexName, startTime, endTime);
return esOpClient.performRequest(realIndexName, dsl, s -> s.getHits().getHits().isEmpty()
? System.currentTimeMillis() : ((Map<String, Long>)s.getHits().getHits().get(0).getSource()).get(TIME_STAMP), 3);
}
}

View File

@@ -0,0 +1,286 @@
package com.xiaojukeji.know.streaming.km.persistence.es.dao;
import com.didiglobal.logi.elasticsearch.client.response.query.query.ESQueryResponse;
import com.didiglobal.logi.elasticsearch.client.response.query.query.aggs.ESAggr;
import com.google.common.collect.HashBasedTable;
import com.google.common.collect.Table;
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.BrokerMetricPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import com.xiaojukeji.know.streaming.km.common.utils.FutureWaitUtil;
import com.xiaojukeji.know.streaming.km.common.utils.MetricsUtils;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslsConstant;
import org.springframework.stereotype.Component;
import org.springframework.util.CollectionUtils;
import javax.annotation.PostConstruct;
import java.util.*;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.*;
import static com.xiaojukeji.know.streaming.km.common.enums.metric.KafkaMetricIndexEnum.BROKER_INFO;
@Component
public class BrokerMetricESDAO extends BaseMetricESDAO {
@PostConstruct
public void init() {
super.indexName = BROKER_INFO.getIndex();
BaseMetricESDAO.register(BROKER_INFO, this);
}
protected FutureWaitUtil<Void> queryFuture = FutureWaitUtil.init("BrokerMetricESDAO", 4,8, 500);
/**
* 获取集群 clusterId 中 brokerId 最新的统计指标
*/
public BrokerMetricPO getBrokerLatestMetrics(Long clusterId, Integer brokerId){
Long endTime = getLatestMetricTime(clusterId);
Long startTime = endTime - FIVE_MIN;
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.GET_BROKER_LATEST_METRICS, clusterId, brokerId, startTime, endTime);
BrokerMetricPO brokerMetricPO = esOpClient.performRequestAndTakeFirst(
brokerId.toString(), realIndex(startTime, endTime), dsl, BrokerMetricPO.class);
return (null == brokerMetricPO) ? new BrokerMetricPO(clusterId, brokerId) : brokerMetricPO;
}
/**
* 获取集群 clusterPhyId 中每个 metric 的指定 broker 在指定时间[startTime、endTime]区间内聚合计算(avg、max)之后的统计值
*/
public Map<String/*metric*/, MetricPointVO> getBrokerMetricsPoint(Long clusterPhyId, Integer brokerId, List<String> metrics,
String aggType, Long startTime, Long endTime){
//1、获取需要查下的索引
String realIndex = realIndex(startTime, endTime);
//2、构造agg查询条件
String aggDsl = buildAggsDSL(metrics, aggType);
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.GET_BROKER_AGG_SINGLE_METRICS, clusterPhyId, brokerId, startTime, endTime, aggDsl);
return esOpClient.performRequestWithRouting(String.valueOf(brokerId), realIndex, dsl,
s -> handleSingleESQueryResponse(s, metrics, aggType), 3);
}
/**
* 获取集群 clusterPhyId 中每个 metric 的 topN 的 broker 在指定时间[startTime、endTime]区间内所有的指标
* topN 按照[startTime, endTime] 时间段内最后一个值来排序
*/
public Table<String/*metric*/, Long/*brokerId*/, List<MetricPointVO>> listBrokerMetricsByTop(Long clusterPhyId, List<Long> brokerIds,
List<String> metrics, String aggType, int topN,
Long startTime, Long endTime){
//1、获取topN要查询brokerId每一个指标的topN的brokerId可能不一样
Map<String, List<Long>> metricBrokerIds = getTopNBrokerIds(clusterPhyId, metrics, aggType, topN, startTime, endTime);
Table<String, Long, List<MetricPointVO>> table = HashBasedTable.create();
//2、查询指标
for(String metric : metricBrokerIds.keySet()){
table.putAll(listBrokerMetricsByBrokerIds(clusterPhyId, Arrays.asList(metric),
aggType, metricBrokerIds.getOrDefault(metric, brokerIds), startTime, endTime));
}
return table;
}
/**
* 获取集群 clusterPhyId 中每个 metric 的指定 brokers 在指定时间[startTime、endTime]区间内所有的指标
*/
public Table<String/*metric*/, Long/*brokerId*/, List<MetricPointVO>> listBrokerMetricsByBrokerIds(Long clusterPhyId, List<String> metrics,
String aggType, List<Long> brokerIds,
Long startTime, Long endTime){
//1、获取需要查下的索引
String realIndex = realIndex(startTime, endTime);
//2、根据查询的时间区间大小来确定指标点的聚合区间大小
String interval = MetricsUtils.getInterval(endTime - startTime);
//3、构造agg查询条件
String aggDsl = buildAggsDSL(metrics, aggType);
final Table<String, Long, List<MetricPointVO>> table = HashBasedTable.create();
//4、构造dsl查询条件
for(Long brokerId : brokerIds){
try {
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.GET_BROKER_AGG_LIST_METRICS, clusterPhyId, brokerId, startTime, endTime, interval, aggDsl);
queryFuture.runnableTask(
String.format("class=BrokerMetricESDAO||method=listBrokerMetricsByBrokerIds||ClusterPhyId=%d", clusterPhyId),
5000,
() -> {
Map<String, List<MetricPointVO>> metricMap = esOpClient.performRequestWithRouting(String.valueOf(brokerId), realIndex, dsl,
s -> handleListESQueryResponse(s, metrics, aggType), 3);
synchronized (table){
for(String metric : metricMap.keySet()){
table.put(metric, brokerId, metricMap.get(metric));
}
}
});
}catch (Exception e){
LOGGER.error("method=listBrokerMetricsByBrokerIds||clusterPhyId={}||brokerId{}||errMsg=exception!", clusterPhyId, brokerId, e);
}
}
queryFuture.waitExecute();
return table;
}
/**
* 获取集群 clusterPhyId 中每个 metric 的 topN 的 broker
*/
//public for test
public Map<String, List<Long>> getTopNBrokerIds(Long clusterPhyId, List<String> metrics,
String aggType, int topN,
Long startTime, Long endTime){
//1、获取需要查下的索引
String realIndex = realIndex(startTime, endTime);
//2、根据查询的时间区间大小来确定指标点的聚合区间大小
String interval = MetricsUtils.getInterval(endTime - startTime);
//3、构造agg查询条件
String aggDsl = buildAggsDSL(metrics, aggType);
//4、查询es
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.GET_BROKER_AGG_TOP_METRICS, clusterPhyId, startTime, endTime, interval, aggDsl);
return esOpClient.performRequest(realIndex, dsl,
s -> handleTopBrokerESQueryResponse(s, metrics, topN), 3);
}
/**************************************************** private method ****************************************************/
private Map<String/*metric*/, MetricPointVO> handleSingleESQueryResponse(ESQueryResponse response, List<String> metrics, String aggType){
Map<String/*metric*/, MetricPointVO> metricMap = new HashMap<>();
if(null == response || null == response.getAggs()){
return metricMap;
}
Map<String, ESAggr> esAggrMap = response.getAggs().getEsAggrMap();
if (null == esAggrMap) {
return metricMap;
}
for(String metric : metrics){
String value = esAggrMap.get(metric).getUnusedMap().get(VALUE).toString();
MetricPointVO metricPoint = new MetricPointVO();
metricPoint.setAggType(aggType);
metricPoint.setValue(value);
metricPoint.setName(metric);
metricMap.put(metric, metricPoint);
}
return metricMap;
}
private Map<String, List<MetricPointVO>> handleListESQueryResponse(ESQueryResponse response, List<String> metrics, String aggType){
Map<String, List<MetricPointVO>> metricMap = new HashMap<>();
if(null == response || null == response.getAggs()){
return metricMap;
}
Map<String, ESAggr> esAggrMap = response.getAggs().getEsAggrMap();
if (null == esAggrMap || null == esAggrMap.get(HIST)) {
return metricMap;
}
if(CollectionUtils.isEmpty(esAggrMap.get(HIST).getBucketList())){
return metricMap;
}
for(String metric : metrics){
List<MetricPointVO> metricPoints = new ArrayList<>();
esAggrMap.get(HIST).getBucketList().forEach( esBucket -> {
try {
if (null != esBucket.getUnusedMap().get(KEY)) {
Long timestamp = Long.valueOf(esBucket.getUnusedMap().get(KEY).toString());
String value = esBucket.getAggrMap().get(metric).getUnusedMap().get(VALUE).toString();
MetricPointVO metricPoint = new MetricPointVO();
metricPoint.setAggType(aggType);
metricPoint.setTimeStamp(timestamp);
metricPoint.setValue(value);
metricPoint.setName(metric);
metricPoints.add(metricPoint);
}else {
LOGGER.info("");
}
}catch (Exception e){
LOGGER.error("metric={}||errMsg=exception!", metric, e);
}
} );
metricMap.put(metric, metricPoints);
}
return metricMap;
}
private Map<String, List<Long>> handleTopBrokerESQueryResponse(ESQueryResponse response, List<String> metrics, int topN){
Map<String, List<Long>> ret = new HashMap<>();
if(null == response || null == response.getAggs()){
return ret;
}
Map<String, ESAggr> esAggrMap = response.getAggs().getEsAggrMap();
if (null == esAggrMap || null == esAggrMap.get(HIST)) {
return ret;
}
if(CollectionUtils.isEmpty(esAggrMap.get(HIST).getBucketList())){
return ret;
}
Map<String, List<Tuple<Long, Double>>> metricBrokerValueMap = new HashMap<>();
//1、先获取每个指标对应的所有brokerIds以及指标的值
for(String metric : metrics) {
esAggrMap.get(HIST).getBucketList().forEach( esBucket -> {
try {
if (null != esBucket.getUnusedMap().get(KEY)) {
Long brokerId = Long.valueOf(esBucket.getUnusedMap().get(KEY).toString());
Double value = Double.valueOf(esBucket.getAggrMap().get(HIST).getBucketList().get(0).getAggrMap()
.get(metric).getUnusedMap().get(VALUE).toString());
List<Tuple<Long, Double>> brokerValue = (null == metricBrokerValueMap.get(metric)) ?
new ArrayList<>() : metricBrokerValueMap.get(metric);
brokerValue.add(new Tuple<>(brokerId, value));
metricBrokerValueMap.put(metric, brokerValue);
}
}catch (Exception e){
LOGGER.error("metrice={}||errMsg=exception!", metric, e);
}
} );
}
//2、对每个指标的broker按照指标值排序并截取前topN个brokerIds
for(String metric : metricBrokerValueMap.keySet()){
List<Tuple<Long, Double>> brokerValue = metricBrokerValueMap.get(metric);
brokerValue.sort((o1, o2) -> {
if(null == o1 || null == o2){return 0;}
return o2.getV2().compareTo(o1.getV2());
} );
List<Tuple<Long, Double>> temp = (brokerValue.size() > topN) ? brokerValue.subList(0, topN) : brokerValue;
List<Long> brokerIds = temp.stream().map(t -> t.getV1()).collect( Collectors.toList());
ret.put(metric, brokerIds);
}
return ret;
}
}

View File

@@ -0,0 +1,215 @@
package com.xiaojukeji.know.streaming.km.persistence.es.dao;
import com.didiglobal.logi.elasticsearch.client.response.query.query.ESQueryResponse;
import com.didiglobal.logi.elasticsearch.client.response.query.query.aggs.ESAggr;
import com.google.common.collect.HashBasedTable;
import com.google.common.collect.Table;
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchShould;
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchTerm;
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchRange;
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchSort;
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.ClusterMetricPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import com.xiaojukeji.know.streaming.km.common.utils.FutureWaitUtil;
import com.xiaojukeji.know.streaming.km.common.utils.MetricsUtils;
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslsConstant;
import org.springframework.stereotype.Component;
import org.springframework.util.StringUtils;
import javax.annotation.PostConstruct;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.*;
import static com.xiaojukeji.know.streaming.km.common.enums.metric.KafkaMetricIndexEnum.CLUSTER_INFO;
@Component
public class ClusterMetricESDAO extends BaseMetricESDAO {
@PostConstruct
public void init() {
super.indexName = CLUSTER_INFO.getIndex();
BaseMetricESDAO.register(CLUSTER_INFO, this);
}
protected FutureWaitUtil<Void> queryFuture = FutureWaitUtil.init("ClusterMetricESDAO", 4,8, 500);
/**
* 获取集群 clusterId 最新的统计指标
*/
public ClusterMetricPO getClusterLatestMetrics(Long clusterId, List<String> metricNames){
Long endTime = getLatestMetricTime(clusterId);
Long startTime = endTime - FIVE_MIN;
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.GET_CLUSTER_LATEST_METRICS, clusterId, startTime, endTime);
ClusterMetricPO clusterMetricPO = esOpClient.performRequestAndTakeFirst(
clusterId.toString(), realIndex(startTime, endTime), dsl, ClusterMetricPO.class);
return (null == clusterMetricPO) ? new ClusterMetricPO(clusterId)
: filterMetrics(clusterMetricPO, metricNames);
}
/**
* 获取集群 clusterPhyId 中每个 metric 在指定时间[startTime、endTime]区间内聚合计算(avg、max)之后的统计值
*/
public Map<String/*metric*/, MetricPointVO> getClusterMetricsPoint(Long clusterPhyId, List<String> metrics,
String aggType, Long startTime, Long endTime){
//1、获取需要查下的索引
String realIndex = realIndex(startTime, endTime);
//2、构造agg查询条件
String aggDsl = buildAggsDSL(metrics, aggType);
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.GET_CLUSTER_AGG_SINGLE_METRICS, clusterPhyId, startTime, endTime, aggDsl);
return esOpClient.performRequestWithRouting(String.valueOf(clusterPhyId), realIndex, dsl,
s -> handleSingleESQueryResponse(s, metrics, aggType), 3);
}
/**
* 获取某个 metric 的排序后的分页集群 ids
*/
public List<ClusterMetricPO> pagingClusterWithLatestMetrics(List<SearchTerm> terms, List<SearchShould> shoulds,
SearchSort sort, SearchRange range){
Long latestMetricTime = getLatestMetricTime();
Long startTime = latestMetricTime - FIVE_MIN;
//1、获取需要查下的索引
String realIndex = realIndex(startTime, latestMetricTime);
String sortDsl = buildSortDsl(sort, SearchSort.DEFAULT);
String rangeDsl = buildRangeDsl(range);
String termDsl = buildTermsDsl(terms);
String shouldDsl = buildShouldDsl(shoulds);
StringBuilder appendQueryDsl = new StringBuilder();
if(!StringUtils.isEmpty(rangeDsl)){
appendQueryDsl.append(",").append(rangeDsl);
}
if(!StringUtils.isEmpty(termDsl)){
appendQueryDsl.append(",").append(termDsl);
}
if(!StringUtils.isEmpty(shouldDsl)){
appendQueryDsl.append(",").append(shouldDsl);
}
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.LIST_CLUSTER_WITH_LATEST_METRICS, latestMetricTime, appendQueryDsl.toString(), sortDsl);
return esOpClient.performRequest(realIndex, dsl, ClusterMetricPO.class);
}
/**
* 获取一批集群 clusterPhyIds 一段时间内 metrics 的指标值
*/
public Table<String/*metric*/, Long/*clusterId*/, List<MetricPointVO>> listClusterMetricsByClusterIds(List<String> metrics,
String aggType, List<Long> clusterPhyIds,
Long startTime, Long endTime){
//1、获取需要查下的索引
String realIndex = realIndex(startTime, endTime);
//2、根据查询的时间区间大小来确定指标点的聚合区间大小
String interval = MetricsUtils.getInterval(endTime - startTime);
//3、构造agg查询条件
String aggDsl = buildAggsDSL(metrics, aggType);
final Table<String, Long, List<MetricPointVO>> table = HashBasedTable.create();
//4、构造dsl查询条件开始查询
for(Long clusterPhyId : clusterPhyIds){
try {
queryFuture.runnableTask(
String.format("class=ClusterMetricESDAO||method=listClusterMetricsByClusterIds||ClusterPhyId=%d", clusterPhyId),
5000,
() -> {
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.GET_CLUSTER_AGG_LIST_METRICS, clusterPhyId, startTime, endTime, interval, aggDsl);
Map<String/*metric*/, List<MetricPointVO>> metricMap = esOpClient.performRequestWithRouting(
String.valueOf(clusterPhyId), realIndex, dsl,
s -> handleListESQueryResponse(s, metrics, aggType), 3);
synchronized (table){
for(String metric : metricMap.keySet()){
table.put(metric, clusterPhyId, metricMap.get(metric));
}
}
});
}catch (Exception e){
LOGGER.error("method=listClusterMetricsByClusterIds||clusterPhyId={}||errMsg=exception!",
clusterPhyId, e);
}
}
queryFuture.waitExecute();
return table;
}
/**************************************************** private method ****************************************************/
private Map<String/*metric*/, MetricPointVO> handleSingleESQueryResponse(ESQueryResponse response, List<String> metrics, String aggType){
Map<String/*metric*/, MetricPointVO> metricMap = new HashMap<>();
if(null == response || null == response.getAggs()){
return metricMap;
}
Map<String, ESAggr> esAggrMap = response.getAggs().getEsAggrMap();
if (null == esAggrMap) {
return metricMap;
}
for(String metric : metrics){
String value = esAggrMap.get(metric).getUnusedMap().get(VALUE).toString();
MetricPointVO metricPoint = new MetricPointVO();
metricPoint.setAggType(aggType);
metricPoint.setValue(value);
metricPoint.setName(metric);
metricMap.put(metric, metricPoint);
}
return metricMap;
}
private Map<String, List<MetricPointVO>> handleListESQueryResponse(ESQueryResponse response, List<String> metrics, String aggType){
Map<String, ESAggr> esAggrMap = checkBucketsAndHitsOfResponseAggs(response);
if(null == esAggrMap){return new HashMap<>();}
Map<String, List<MetricPointVO>> metricMap = new HashMap<>();
for(String metric : metrics){
List<MetricPointVO> metricPoints = new ArrayList<>();
esAggrMap.get(HIST).getBucketList().forEach( esBucket -> {
try {
if (null != esBucket.getUnusedMap().get(KEY)) {
Long timestamp = Long.valueOf(esBucket.getUnusedMap().get(KEY).toString());
String value = esBucket.getAggrMap().get(metric).getUnusedMap().get(VALUE).toString();
MetricPointVO metricPoint = new MetricPointVO();
metricPoint.setAggType(aggType);
metricPoint.setTimeStamp(timestamp);
metricPoint.setValue(value);
metricPoint.setName(metric);
metricPoints.add(metricPoint);
}
}catch (Exception e){
LOGGER.error("method=handleESQueryResponse||metric={}||errMsg=exception!", metric, e);
}
} );
metricMap.put(metric, metricPoints);
}
return metricMap;
}
}

View File

@@ -0,0 +1,218 @@
package com.xiaojukeji.know.streaming.km.persistence.es.dao;
import com.didiglobal.logi.elasticsearch.client.response.query.query.ESQueryResponse;
import com.didiglobal.logi.elasticsearch.client.response.query.query.aggs.ESAggr;
import com.google.common.collect.HashBasedTable;
import com.google.common.collect.Table;
import com.xiaojukeji.know.streaming.km.common.bean.entity.group.GroupTopic;
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchTerm;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.TopicPartitionKS;
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.GroupMetricPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import com.xiaojukeji.know.streaming.km.common.enums.AggTypeEnum;
import com.xiaojukeji.know.streaming.km.common.utils.FutureWaitUtil;
import com.xiaojukeji.know.streaming.km.common.utils.MetricsUtils;
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslsConstant;
import org.springframework.stereotype.Component;
import javax.annotation.PostConstruct;
import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.constant.Constant.ZERO;
import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.*;
import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.KEY;
import static com.xiaojukeji.know.streaming.km.common.enums.metric.KafkaMetricIndexEnum.GROUP_INFO;
@Component
public class GroupMetricESDAO extends BaseMetricESDAO {
@PostConstruct
public void init() {
super.indexName = GROUP_INFO.getIndex();
BaseMetricESDAO.register(GROUP_INFO, this);
}
protected FutureWaitUtil<Void> queryFuture = FutureWaitUtil.init("GroupMetricESDAO", 4,8, 500);
public List<GroupMetricPO> listLatestMetricsAggByGroupTopic(Long clusterPhyId, List<GroupTopic> groupTopicList, List<String> metrics, AggTypeEnum aggType){
Long latestTime = getLatestMetricTime();
Long startTime = latestTime - FIVE_MIN;
//1、获取需要查下的索引
String realIndex = realIndex(startTime, latestTime);
//2、构造agg查询条件
String aggDsl = buildAggsDSL(metrics, aggType.getAggType());
List<GroupMetricPO> groupMetricPOS = new CopyOnWriteArrayList<>();
for(GroupTopic groupTopic : groupTopicList){
queryFuture.runnableTask(
String.format("class=GroupMetricESDAO||method=listLatestMetricsAggByGroupTopic||ClusterPhyId=%d||groupName=%s||topicName=%s",
clusterPhyId, groupTopic.getGroupName(), groupTopic.getTopicName()),
5000,
() -> {
String group = groupTopic.getGroupName();
String topic = groupTopic.getTopicName();
try {
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.LIST_GROUP_LATEST_METRICS_BY_GROUP_TOPIC, clusterPhyId, group, topic,
startTime, latestTime, aggDsl);
String routing = routing(clusterPhyId, group);
GroupMetricPO groupMetricPO = esOpClient.performRequestWithRouting(routing, realIndex, dsl,
s -> handleGroupMetricESQueryResponse(s, metrics, clusterPhyId, group, topic), 3);
groupMetricPOS.add(groupMetricPO);
}catch (Exception e){
LOGGER.error("method=listLatestMetricsAggByGroupTopic||clusterPhyId={}||group{}||topic={}||errMsg=exception!",
clusterPhyId, group, topic, e);
}
});
}
queryFuture.waitExecute();
return groupMetricPOS;
}
public List<GroupMetricPO> listPartitionLatestMetrics(Long clusterPhyId, String group, String topic, List<String> metrics){
Long latestTime = getLatestMetricTime();
Long startTime = latestTime - FIVE_MIN;
//1、获取需要查下的索引
String realIndex = realIndex(startTime, latestTime);
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.LIST_GROUP_LATEST_METRICS_OF_PARTITION, clusterPhyId, group, topic, latestTime);
List<GroupMetricPO> groupMetricPOS = esOpClient.performRequest(realIndex, dsl, GroupMetricPO.class);
return filterMetrics(groupMetricPOS, metrics);
}
/**
* 获取 match 命中或者不命中的次数,返回-1代表查询异常
*/
public Integer countMetricValue(Long clusterPhyId, String groupName, SearchTerm match, Long startTime, Long endTime){
//1、获取需要查下的索引
String realIndex = realIndex(startTime, endTime);
String matchDsl = buildTermsDsl(Arrays.asList(match));
String dsl = match.isEqual()
? dslLoaderUtil.getFormatDslByFileName(DslsConstant.COUNT_GROUP_METRIC_VALUE, clusterPhyId, groupName, startTime, endTime, matchDsl)
: dslLoaderUtil.getFormatDslByFileName(DslsConstant.COUNT_GROUP_NOT_METRIC_VALUE, clusterPhyId, groupName, startTime, endTime, matchDsl);
return esOpClient.performRequestWithRouting(clusterPhyId.toString() + "@" + groupName, realIndex, dsl,
s -> handleESQueryResponseCount(s), 3);
}
public Table<String/*metric*/, String/*topic&partition*/, List<MetricPointVO>> listGroupMetrics(Long clusterId, String groupName,
List<TopicPartitionKS> topicPartitionKS, List<String> metrics,
String aggType, Long startTime, Long endTime){
//1、获取需要查下的索引
String realIndex = realIndex(startTime, endTime);
//2、根据查询的时间区间大小来确定指标点的聚合区间大小
String interval = MetricsUtils.getInterval(endTime - startTime);
//3、构造agg查询条件
String aggDsl = buildAggsDSL(metrics, "avg");
final Table<String, String, List<MetricPointVO>> table = HashBasedTable.create();
for(TopicPartitionKS tp : topicPartitionKS){
String topic = tp.getTopic();
Integer partition = tp.getPartition();
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.LIST_GROUP_METRICS, clusterId, groupName, topic, partition, startTime, endTime, interval, aggDsl);
Map<String/*metric*/, List<MetricPointVO>> metricMap = esOpClient.performRequest(realIndex, dsl,
s -> handleGroupMetrics(s, aggType, metrics), 3);
for(String metric : metricMap.keySet()){
table.put(metric, topic + "&" + partition, metricMap.get(metric));
}
}
return table;
}
/**
* 获取[startTime,endTime]时间段内的groupName对应的topic&partition
*/
public Set<TopicPartitionKS> listGroupTopicPartitions(Long clusterPhyId, String groupName, Long startTime, Long endTime) {
//1、获取需要查下的索引
String realIndex = realIndex(startTime, endTime);
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.GET_GROUP_TOPIC_PARTITION, clusterPhyId, groupName, startTime, endTime);
List<GroupMetricPO> groupMetricPOS = esOpClient.performRequestWithRouting(routing(clusterPhyId, groupName), realIndex, dsl, GroupMetricPO.class);
return groupMetricPOS.stream().map(g -> new TopicPartitionKS(g.getTopic(), g.getPartitionId().intValue())).collect( Collectors.toSet());
}
/**************************************************** private method ****************************************************/
private GroupMetricPO handleGroupMetricESQueryResponse(ESQueryResponse response, List<String> metrics,
Long clusterPhyId, String group, String topic){
GroupMetricPO groupMetricPO = new GroupMetricPO();
groupMetricPO.setClusterPhyId(clusterPhyId);
groupMetricPO.setGroup(group);
groupMetricPO.setTopic(topic);
groupMetricPO.setGroupMetric(ZERO);
if(null == response || null == response.getAggs()){
return groupMetricPO;
}
Map<String, ESAggr> esAggrMap = response.getAggs().getEsAggrMap();
if (null == esAggrMap) {
return groupMetricPO;
}
for(String metric : metrics){
String value = esAggrMap.get(metric).getUnusedMap().get(VALUE).toString();
groupMetricPO.getMetrics().put(metric, Float.valueOf(value));
}
return groupMetricPO;
}
private Map<String/*metric*/, List<MetricPointVO>> handleGroupMetrics(ESQueryResponse response, String aggType, List<String> metrics){
Map<String, ESAggr> esAggrMap = checkBucketsAndHitsOfResponseAggs(response);
if(null == esAggrMap){return new HashMap<>();}
Map<String, List<MetricPointVO>> metricMap = new HashMap<>();
for(String metric : metrics){
List<MetricPointVO> metricPoints = new ArrayList<>();
esAggrMap.get(HIST).getBucketList().forEach( esBucket -> {
try {
if (null != esBucket.getUnusedMap().get(KEY)) {
Long timestamp = Long.valueOf(esBucket.getUnusedMap().get(KEY).toString());
String value = esBucket.getAggrMap().get(metric).getUnusedMap().get(VALUE).toString();
MetricPointVO metricPoint = new MetricPointVO();
metricPoint.setAggType(aggType);
metricPoint.setTimeStamp(timestamp);
metricPoint.setValue(value);
metricPoint.setName(metric);
metricPoints.add(metricPoint);
}
}catch (Exception e){
LOGGER.error("method=handleESQueryResponse||metric={}||errMsg=exception!", metric, e);
}
} );
metricMap.put(metric, metricPoints);
}
return metricMap;
}
private String routing(Long clusterPhyId, String groupName){
return clusterPhyId + "@" + groupName;
}
}

View File

@@ -0,0 +1,53 @@
package com.xiaojukeji.know.streaming.km.persistence.es.dao;
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.PartitionMetricPO;
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslsConstant;
import org.springframework.stereotype.Component;
import javax.annotation.PostConstruct;
import java.util.List;
import static com.xiaojukeji.know.streaming.km.common.enums.metric.KafkaMetricIndexEnum.PARTITION_INFO;
/**
* @author didi
*/
@Component
public class PartitionMetricESDAO extends BaseMetricESDAO {
@PostConstruct
public void init() {
super.indexName = PARTITION_INFO.getIndex();
BaseMetricESDAO.register(PARTITION_INFO, this);
}
public PartitionMetricPO getPartitionLatestMetrics(Long clusterPhyId, String topic,
Integer brokerId, Integer partitionId,
List<String> metricNames){
Long endTime = getLatestMetricTime();
Long startTime = endTime - FIVE_MIN;
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.GET_PARTITION_LATEST_METRICS, clusterPhyId, topic, brokerId, partitionId, startTime, endTime);
PartitionMetricPO partitionMetricPO = esOpClient.performRequestAndTakeFirst(
partitionId.toString(), realIndex(startTime, endTime), dsl, PartitionMetricPO.class);
return (null == partitionMetricPO) ? new PartitionMetricPO(clusterPhyId, topic, brokerId, partitionId)
: filterMetrics(partitionMetricPO, metricNames);
}
public List<PartitionMetricPO> listPartitionLatestMetricsByTopic(Long clusterPhyId, String topic, List<String> metricNames){
Long endTime = getLatestMetricTime();
Long startTime = endTime - FIVE_MIN;
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.LIST_PARTITION_LATEST_METRICS_BY_TOPIC, clusterPhyId, topic, startTime, endTime);
List<PartitionMetricPO> partitionMetricPOS = esOpClient.performRequest(
realIndex(startTime, endTime), dsl, PartitionMetricPO.class);
return filterMetrics(partitionMetricPOS, metricNames);
}
}

View File

@@ -0,0 +1,94 @@
package com.xiaojukeji.know.streaming.km.persistence.es.dao;
import com.didiglobal.logi.elasticsearch.client.response.query.query.ESQueryResponse;
import com.didiglobal.logi.elasticsearch.client.response.query.query.aggs.ESAggr;
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.ReplicationMetricPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslsConstant;
import org.springframework.stereotype.Component;
import javax.annotation.PostConstruct;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.VALUE;
import static com.xiaojukeji.know.streaming.km.common.enums.metric.KafkaMetricIndexEnum.REPLICATION_INFO;
/**
* @author didi
*/
@Component
public class ReplicationMetricESDAO extends BaseMetricESDAO {
@PostConstruct
public void init() {
super.indexName = REPLICATION_INFO.getIndex();
BaseMetricESDAO.register(REPLICATION_INFO, this);
}
/**
* 获取集群 clusterId 中 brokerId 最新的统计指标
*/
public ReplicationMetricPO getReplicationLatestMetrics(Long clusterPhyId, Integer brokerId, String topic,
Integer partitionId, List<String> metricNames){
Long endTime = getLatestMetricTime();
Long startTime = endTime - FIVE_MIN;
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.GET_REPLICATION_LATEST_METRICS, clusterPhyId, brokerId, topic, partitionId, startTime, endTime);
ReplicationMetricPO replicationMetricPO = esOpClient.performRequestAndTakeFirst(
realIndex(startTime, endTime), dsl, ReplicationMetricPO.class);
return (null == replicationMetricPO) ? new ReplicationMetricPO(clusterPhyId, topic, brokerId, partitionId)
: filterMetrics(replicationMetricPO, metricNames);
}
/**
* 获取集群 clusterPhyId 中每个 metric 的指定 partitionId 在指定时间[startTime、endTime]区间内聚合计算(avg、max)之后的统计值
*/
public Map<String/*metric*/, MetricPointVO> getReplicationMetricsPoint(Long clusterPhyId, String topic,
Integer brokerId, Integer partitionId, List<String> metrics,
String aggType, Long startTime, Long endTime){
//1、获取需要查下的索引
String realIndex = realIndex(startTime, endTime);
//2、构造agg查询条件
String aggDsl = buildAggsDSL(metrics, aggType);
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.GET_REPLICATION_AGG_SINGLE_METRICS, clusterPhyId, topic, brokerId, partitionId, startTime, endTime, aggDsl);
return esOpClient.performRequestWithRouting(String.valueOf(brokerId), realIndex, dsl,
s -> handleSingleESQueryResponse(s, metrics, aggType), 3);
}
/**************************************************** private method ****************************************************/
private Map<String/*metric*/, MetricPointVO> handleSingleESQueryResponse(ESQueryResponse response, List<String> metrics, String aggType){
Map<String/*metric*/, MetricPointVO> metricMap = new HashMap<>();
if(null == response || null == response.getAggs()){
return metricMap;
}
Map<String, ESAggr> esAggrMap = response.getAggs().getEsAggrMap();
if (null == esAggrMap) {
return metricMap;
}
for(String metric : metrics){
String value = esAggrMap.get(metric).getUnusedMap().get(VALUE).toString();
MetricPointVO metricPoint = new MetricPointVO();
metricPoint.setAggType(aggType);
metricPoint.setValue(value);
metricPoint.setName(metric);
metricMap.put(metric, metricPoint);
}
return metricMap;
}
}

View File

@@ -0,0 +1,425 @@
package com.xiaojukeji.know.streaming.km.persistence.es.dao;
import com.didiglobal.logi.elasticsearch.client.response.query.query.ESQueryResponse;
import com.didiglobal.logi.elasticsearch.client.response.query.query.aggs.ESAggr;
import com.google.common.collect.HashBasedTable;
import com.google.common.collect.Table;
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchFuzzy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchShould;
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchTerm;
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchSort;
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.TopicMetricPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import com.xiaojukeji.know.streaming.km.common.utils.FutureWaitUtil;
import com.xiaojukeji.know.streaming.km.common.utils.MetricsUtils;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslsConstant;
import org.springframework.stereotype.Component;
import org.springframework.util.StringUtils;
import javax.annotation.PostConstruct;
import java.util.*;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.*;
import static com.xiaojukeji.know.streaming.km.common.enums.metric.KafkaMetricIndexEnum.TOPIC_INFO;
@Component
public class TopicMetricESDAO extends BaseMetricESDAO {
@PostConstruct
public void init() {
super.indexName = TOPIC_INFO.getIndex();
BaseMetricESDAO.register(TOPIC_INFO, this);
}
protected FutureWaitUtil<Void> queryFuture = FutureWaitUtil.init("TopicMetricESDAO", 4,8, 500);
public List<TopicMetricPO> listTopicMaxMinMetrics(Long clusterPhyId, List<String> topics, String metric, boolean max, Long startTime, Long endTime){
//1、获取需要查下的索引
String realIndex = realIndex(startTime, endTime);
SearchSort sort = new SearchSort(metric, max, true);
List<TopicMetricPO> ret = new ArrayList<>();
for(String topic : topics){
String sortDsl = buildSortDsl(sort, SearchSort.DEFAULT);
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.GET_TOPIC_MAX_OR_MIN_SINGLE_METRIC, clusterPhyId, startTime, endTime, topic, sortDsl);
TopicMetricPO topicMetricPO = esOpClient.performRequestAndTakeFirst(topic, realIndex, dsl, TopicMetricPO.class);
ret.add(topicMetricPO);
}
return ret;
}
/**
* 获取集群 clusterPhyId 中多个 topic 的每个 metric 的指定 topic 在指定时间[startTime、endTime]区间内聚合计算(avg、max)之后的统计值
* 注意es的(avg、max)聚合计算只能获取值,不能获取值之外的信息,如:时间
*/
public Table<String/*topics*/, String/*metric*/, MetricPointVO> getTopicsAggsMetricsValue(Long clusterPhyId, List<String> topics, List<String> metrics,
String aggType, Long startTime, Long endTime){
//1、获取需要查下的索引
String realIndex = realIndex(startTime, endTime);
//2、构造agg查询条件
String aggDsl = buildAggsDSL(metrics, aggType);
String shouldDsl = buildShouldDsl(new SearchShould("topic", topics, true));
StringBuilder appendQueryDsl = new StringBuilder();
if(!StringUtils.isEmpty(shouldDsl)){
appendQueryDsl.append(",").append(shouldDsl);
}
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.GET_TOPIC_AGG_SINGLE_METRICS, clusterPhyId, startTime, endTime, appendQueryDsl.toString(), aggDsl);
return esOpClient.performRequest(realIndex, dsl,
s -> handleSingleESQueryResponse(s, metrics, aggType), 3);
}
/**
* 分页获取
*/
public List<TopicMetricPO> listTopicWithLatestMetrics(Long clusterId, SearchSort sort, SearchFuzzy fuzzy, List<SearchShould> shoulds, List<SearchTerm> terms){
//1、构建dsl
String sortDsl = buildSortDsl(sort, SearchSort.DEFAULT);
String shouldDsl = buildShouldDsl(shoulds);
String prefixDsl = buildPrefixDsl(fuzzy);
String termDsl = buildTermsDsl(terms);
StringBuilder appendQueryDsl = new StringBuilder();
if(!StringUtils.isEmpty(termDsl)){
appendQueryDsl.append(",").append(termDsl);
}
if(!StringUtils.isEmpty(prefixDsl)){
appendQueryDsl.append(",").append(prefixDsl);
}
if(!StringUtils.isEmpty(shouldDsl)){
appendQueryDsl.append(",").append(shouldDsl);
}
//2、获取最近的指标时间点
Long latestMetricTime = getLatestMetricTime(clusterId, appendQueryDsl.toString());
if(null == latestMetricTime){
return new ArrayList<>();
}
//3、获取需要查下的索引
Long startTime = latestMetricTime - ONE_HOUR;
String realIndex = realIndex(startTime, latestMetricTime);
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.LIST_TOPIC_WITH_LATEST_METRICS, clusterId, latestMetricTime, appendQueryDsl.toString(), sortDsl);
return esOpClient.performRequest(realIndex, dsl, TopicMetricPO.class);
}
/**
* 获取 match 命中或者不命中的次数,返回-1代表查询异常
*/
public Integer countMetricValue(Long clusterPhyId, String topic, SearchTerm term, Long startTime, Long endTime){
//1、获取需要查下的索引
String realIndex = realIndex(startTime, endTime);
String termDsl = buildTermsDsl(Arrays.asList(term));
String dsl = term.isEqual()
? dslLoaderUtil.getFormatDslByFileName(DslsConstant.COUNT_TOPIC_METRIC_VALUE, clusterPhyId, topic, startTime, endTime, termDsl)
: dslLoaderUtil.getFormatDslByFileName(DslsConstant.COUNT_TOPIC_NOT_METRIC_VALUE, clusterPhyId, topic, startTime, endTime, termDsl);
return esOpClient.performRequestWithRouting(topic, realIndex, dsl,
s -> handleESQueryResponseCount(s), 3);
}
/**
* 获取 topic 所在 broker 最新的指标
*/
public TopicMetricPO getTopicLatestMetricByBrokerId(Long clusterPhyId, String topic, Integer brokerId, List<String> metricNames){
Long endTime = getLatestMetricTime();
Long startTime = endTime - FIVE_MIN;
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.GET_TOPIC_BROKER_LATEST_METRICS, clusterPhyId, topic, brokerId, startTime, endTime);
TopicMetricPO topicMetricPO = esOpClient.performRequestAndTakeFirst(topic, realIndex(startTime, endTime), dsl, TopicMetricPO.class);
return (null == topicMetricPO) ? new TopicMetricPO(topic, clusterPhyId) : filterMetrics(topicMetricPO, metricNames);
}
/**
* 获取 topic 最新的指标
*/
public List<TopicMetricPO> listTopicLatestMetric(Long clusterPhyId, List<String> topics, List<String> metricNames){
Long endTime = getLatestMetricTime();
Long startTime = endTime - FIVE_MIN;
SearchShould should = new SearchShould("topic", topics);
should.setField(true);
String shouldDsl = buildShouldDsl(Arrays.asList(should));
StringBuilder appendQueryDsl = new StringBuilder();
if(!StringUtils.isEmpty(shouldDsl)){
appendQueryDsl.append(",").append(shouldDsl);
}
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.GET_TOPIC_LATEST_METRICS, clusterPhyId, startTime, endTime, appendQueryDsl.toString());
//topicMetricPOS 已经按照 timeStamp 倒序排好序了
List<TopicMetricPO> topicMetricPOS = esOpClient.performRequest(realIndex(startTime, endTime), dsl, TopicMetricPO.class);
//获取每个topic的第一个 TopicMetricPO 即可
Map<String, TopicMetricPO> topicMetricMap = new HashMap<>();
for(TopicMetricPO topicMetricPO : topicMetricPOS){
topicMetricPO.setMetrics(topicMetricPO.getMetrics(metricNames));
topicMetricMap.putIfAbsent(topicMetricPO.getTopic(), topicMetricPO);
}
return new ArrayList<>(topicMetricMap.values());
}
/**
* 获取 topic 最新的指标
*/
public TopicMetricPO getTopicLatestMetric(Long clusterPhyId, String topic, List<String> metricNames){
Long endTime = getLatestMetricTime();
Long startTime = endTime - FIVE_MIN;
SearchTerm searchTerm = new SearchTerm("topic", topic);
searchTerm.setField(true);
String termDsl = buildTermsDsl(Arrays.asList(searchTerm));
StringBuilder appendQueryDsl = new StringBuilder();
if(!StringUtils.isEmpty(termDsl)){
appendQueryDsl.append(",").append(termDsl);
}
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.GET_TOPIC_LATEST_METRICS, clusterPhyId, startTime, endTime, appendQueryDsl.toString());
TopicMetricPO topicMetricPO = esOpClient.performRequestAndTakeFirst(topic, realIndex(startTime, endTime), dsl, TopicMetricPO.class);
return (null == topicMetricPO) ? new TopicMetricPO(topic, clusterPhyId) : filterMetrics(topicMetricPO, metricNames);
}
/**
* 获取每个 metric 的 topN 个 topic 的指标,如果获取不到 topN 的topics, 则默认返回 defaultTopics 的指标
*/
public Table<String/*metric*/, String/*topics*/, List<MetricPointVO>> listTopicMetricsByTopN(Long clusterPhyId, List<String> defaultTopics,
List<String> metrics, String aggType, int topN,
Long startTime, Long endTime){
//1、获取topN要查询的topic每一个指标的topN的topic可能不一样
Map<String, List<String>> metricTopics = getTopNTopics(clusterPhyId, metrics, aggType, topN, startTime, endTime);
Table<String, String, List<MetricPointVO>> table = HashBasedTable.create();
for(String metric : metrics){
table.putAll(listTopicMetricsByTopics(clusterPhyId, Arrays.asList(metric),
aggType, metricTopics.getOrDefault(metric, defaultTopics), startTime, endTime));
}
return table;
}
/**
* 获取每个 metric 指定个 topic 的指标
*/
public Table<String/*metric*/, String/*topics*/, List<MetricPointVO>> listTopicMetricsByTopics(Long clusterPhyId, List<String> metrics,
String aggType, List<String> topics,
Long startTime, Long endTime){
//1、获取需要查下的索引
String realIndex = realIndex(startTime, endTime);
//2、根据查询的时间区间大小来确定指标点的聚合区间大小
String interval = MetricsUtils.getInterval(endTime - startTime);
//3、构造agg查询条件
String aggDsl = buildAggsDSL(metrics, aggType);
final Table<String, String, List<MetricPointVO>> table = HashBasedTable.create();
//4、构造dsl查询条件
for(String topic : topics){
try {
queryFuture.runnableTask(
String.format("class=TopicMetricESDAO||method=listTopicMetricsByTopics||ClusterPhyId=%d||topicName=%s",
clusterPhyId, topic),
3000,
() -> {
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.GET_TOPIC_AGG_LIST_METRICS, clusterPhyId, topic, startTime, endTime, interval, aggDsl);
Map<String/*metric*/, List<MetricPointVO>> metricMap = esOpClient.performRequestWithRouting(topic, realIndex, dsl,
s -> handleListESQueryResponse(s, metrics, aggType), 3);
synchronized (table){
for(String metric : metricMap.keySet()){
table.put(metric, topic, metricMap.get(metric));
}
}
});
}catch (Exception e){
LOGGER.error("method=listBrokerMetricsByBrokerIds||clusterPhyId={}||brokerId{}||errMsg=exception!",
clusterPhyId, topic, e);
}
}
queryFuture.waitExecute();
return table;
}
//public for test
public Map<String, List<String>> getTopNTopics(Long clusterPhyId, List<String> metrics,
String aggType, int topN,
Long startTime, Long endTime){
//1、获取需要查下的索引
String realIndex = realIndex(startTime, endTime);
//2、根据查询的时间区间大小来确定指标点的聚合区间大小
String interval = MetricsUtils.getInterval(endTime - startTime);
//3、构造agg查询条件
String aggDsl = buildAggsDSL(metrics, aggType);
//4、查询es
String dsl = dslLoaderUtil.getFormatDslByFileName(
DslsConstant.GET_TOPIC_AGG_TOP_METRICS, clusterPhyId, startTime, endTime, interval, aggDsl);
return esOpClient.performRequest(realIndex, dsl,
s -> handleTopTopicESQueryResponse(s, metrics, topN), 3);
}
/**************************************************** private method ****************************************************/
private Table<String/*topic*/, String/*metric*/, MetricPointVO> handleSingleESQueryResponse(ESQueryResponse response, List<String> metrics, String aggType){
Table<String, String, MetricPointVO> table = HashBasedTable.create();
Map<String, ESAggr> esAggrMap = checkBucketsAndHitsOfResponseAggs(response);
if(null == esAggrMap){return table;}
for(String metric : metrics){
esAggrMap.get(HIST).getBucketList().forEach( esBucket -> {
try {
if (null != esBucket.getUnusedMap().get(KEY)) {
String topic = esBucket.getUnusedMap().get(KEY).toString();
String value = esBucket.getAggrMap().get(metric).getUnusedMap().get(VALUE).toString();
MetricPointVO metricPoint = new MetricPointVO();
metricPoint.setAggType(aggType);
metricPoint.setValue(value);
metricPoint.setName(metric);
table.put(topic, metric, metricPoint);
}else {
LOGGER.debug("method=handleListESQueryResponse||metric={}||errMsg=get topic is null!", metric);
}
}catch (Exception e){
LOGGER.error("method=handleListESQueryResponse||metric={}||errMsg=exception!", metric, e);
}
});
}
return table;
}
private Map<String, List<MetricPointVO>> handleListESQueryResponse(ESQueryResponse response, List<String> metrics, String aggType){
Map<String, List<MetricPointVO>> metricMap = new HashMap<>();
Map<String, ESAggr> esAggrMap = checkBucketsAndHitsOfResponseAggs(response);
if(null == esAggrMap){return metricMap;}
for(String metric : metrics){
List<MetricPointVO> metricPoints = new ArrayList<>();
esAggrMap.get(HIST).getBucketList().forEach( esBucket -> {
try {
if (null != esBucket.getUnusedMap().get(KEY)) {
Long timestamp = Long.valueOf(esBucket.getUnusedMap().get(KEY).toString());
String value = esBucket.getAggrMap().get(metric).getUnusedMap().get(VALUE).toString();
MetricPointVO metricPoint = new MetricPointVO();
metricPoint.setAggType(aggType);
metricPoint.setTimeStamp(timestamp);
metricPoint.setValue(value);
metricPoint.setName(metric);
metricPoints.add(metricPoint);
}else {
LOGGER.info("");
}
}catch (Exception e){
LOGGER.error("method=handleListESQueryResponse||metric={}||errMsg=exception!", metric, e);
}
} );
metricMap.put(metric, metricPoints);
}
return metricMap;
}
private Map<String, List<String>> handleTopTopicESQueryResponse(ESQueryResponse response, List<String> metrics, int topN){
Map<String, List<String>> ret = new HashMap<>();
Map<String, ESAggr> esAggrMap = checkBucketsAndHitsOfResponseAggs(response);
if(null == esAggrMap){return ret;}
Map<String, List<Tuple<String, Double>>> metricsTopicValueMap = new HashMap<>();
//1、先获取每个指标对应的所有 topic 以及指标的值
for(String metric : metrics) {
esAggrMap.get(HIST).getBucketList().forEach( esBucket -> {
try {
if (null != esBucket.getUnusedMap().get(KEY)) {
String topic = esBucket.getUnusedMap().get(KEY).toString();
Double value = Double.valueOf(esBucket.getAggrMap().get(HIST).getBucketList().get(0).getAggrMap()
.get(metric).getUnusedMap().get(VALUE).toString());
List<Tuple<String, Double>> brokerValue = (null == metricsTopicValueMap.get(metric)) ?
new ArrayList<>() : metricsTopicValueMap.get(metric);
brokerValue.add(new Tuple<>(topic, value));
metricsTopicValueMap.put(metric, brokerValue);
}
}catch (Exception e){
LOGGER.error("method=handleTopBrokerESQueryResponse||metric={}||errMsg=exception!", metric, e);
}
} );
}
//2、对每个指标的broker按照指标值排序并截取前topN个brokerIds
for(String metric : metricsTopicValueMap.keySet()){
List<Tuple<String, Double>> brokerValue = metricsTopicValueMap.get(metric);
brokerValue.sort((o1, o2) -> {
if(null == o1 || null == o2){return 0;}
return o2.getV2().compareTo(o1.getV2());
} );
List<Tuple<String, Double>> temp = (brokerValue.size() > topN) ? brokerValue.subList(0, topN) : brokerValue;
List<String> topics = temp.stream().map(t -> t.getV1()).collect(Collectors.toList());
ret.put(metric, topics);
}
return ret;
}
private Map<String/*metric*/, Map<String/*topic*/, List<MetricPointVO>>> topicMetricMap2MetricTopicMap(
Map<String/*topic*/, Map<String/*metric*/, List<MetricPointVO>>> topicMetricMap){
Map<String/*metric*/, Map<String/*topic*/, List<MetricPointVO>>> ret = new HashMap<>();
for(String topic : topicMetricMap.keySet()){
Map<String/*metric*/, List<MetricPointVO>> metricMap = topicMetricMap.get(topic);
for(String metric : metricMap.keySet()){
Map<String/*topic*/, List<MetricPointVO>> brokerMap = (null == ret.get(metric)) ? new HashMap<>() : ret.get(metric);
brokerMap.put(topic, metricMap.get(metric));
ret.put(metric, brokerMap);
}
}
return ret;
}
}

View File

@@ -0,0 +1,234 @@
package com.xiaojukeji.know.streaming.km.persistence.es.dsls;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.parser.DefaultJSONParser;
import com.alibaba.fastjson.parser.Feature;
import com.alibaba.fastjson.parser.ParserConfig;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.xiaojukeji.know.streaming.km.common.utils.EnvUtil;
import org.apache.commons.lang3.StringUtils;
import org.springframework.stereotype.Component;
import javax.annotation.PostConstruct;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.lang.reflect.Field;
import java.util.List;
import java.util.Map;
/**
* @author: D10865
* @description:
* @date: Create on 2019/2/28 上午10:08
* @modified By D10865
*
* 加载dsl查询语句工具类
*
*/
@Component
public class DslLoaderUtil {
private static final ILog LOGGER = LogFactory.getLog("ES_LOGGER");
/**
* 查询语句容器
*/
private Map<String/*fileRelativePath*/, String/*dslContent*/> dslsMap = Maps.newHashMap();
@PostConstruct
public void init() {
LOGGER.info("class=DslLoaderUtil||method=init||DslLoaderUtil init start.");
List<String> dslFileNames = Lists.newLinkedList();
// 反射获取接口中定义的变量中的值
Field[] fields = DslsConstant.class.getDeclaredFields();
for (int i = 0; i < fields.length; ++i) {
fields[i].setAccessible(true);
try {
dslFileNames.add(fields[i].get(null).toString());
} catch (IllegalAccessException e) {
LOGGER.error("class=DslLoaderUtil||method=init||errMsg=fail to read {} error. ", fields[i].getName(),
e);
}
}
// 加载dsl文件及内容
for (String fileName : dslFileNames) {
dslsMap.put(fileName, readDslFileInJarFile(fileName));
}
// 输出加载的查询语句
LOGGER.info("class=DslLoaderUtil||method=init||msg=dsl files count {}", dslsMap.size());
for (Map.Entry<String/*fileRelativePath*/, String/*dslContent*/> entry : dslsMap.entrySet()) {
LOGGER.info("class=DslLoaderUtil||method=init||msg=file name {}, dsl content {}", entry.getKey(),
entry.getValue());
}
LOGGER.info("class=DslLoaderUtil||method=init||DslLoaderUtil init finished.");
}
/**
* 获取查询语句
*
* @param fileName
* @return
*/
public String getDslByFileName(String fileName) {
return dslsMap.get(fileName);
}
/**
* 获取格式化的查询语句
*
* @param fileName
* @param args
* @return
*/
public String getFormatDslByFileName(String fileName, Object... args) {
String loadDslContent = getDslByFileName(fileName);
if (StringUtils.isBlank(loadDslContent)) {
LOGGER.error("class=DslLoaderUtil||method=getFormatDslByFileName||errMsg=dsl file {} content is empty",
fileName);
return "";
}
// 格式化查询语句
String dsl = trimJsonBank( String.format(loadDslContent, args));
// 如果不是线上环境则输出dsl语句
if (!EnvUtil.isOnline()) {
LOGGER.info("class=DslLoaderUtil||method=getFormatDslByFileName||dsl={}", dsl);
}
return dsl;
}
public String getFormatDslForCatIndexByCondition(String fileName, String boolMustDsl, Object... args) {
String formatDslByFileName = getFormatDslByFileName(fileName, args);
return formatDslByFileName.replace("\"boolMustDsl\"", boolMustDsl);
}
public String getFormatDslByFileNameByAggParam(String fileName, String clusterPhyMetrics, String interval,
String aggType, Object... args) {
String formatDslByFileName = getFormatDslByFileName(fileName, args);
return formatDslByFileName
.replace("{interval}", interval)
.replace("{clusterPhyMetrics}", clusterPhyMetrics)
.replace("{aggType}", aggType);
}
public String getFormatDslByFileNameAndOtherParam(String fileName, String interval, String aggsDsl,
Object... args) {
String formatDslByFileName = getFormatDslByFileName(fileName, args);
return formatDslByFileName
.replace("{interval}", interval)
.replace("\"aggsDsl\":1", aggsDsl);
}
public String getDslByTopNNameInfo(String fileName, String interval, String topNameStr, String aggsDsl,
Object... args) {
String formatDslByFileName = getFormatDslByFileName(fileName, args);
return formatDslByFileName
.replace("{interval}", interval)
.replace("\"aggsDsl\":1", aggsDsl)
.replace("\"topNameListStr\"", topNameStr);
}
/**************************************************** private method ****************************************************/
/**
* 去除json中的空格
*
* @param sourceDsl
* @return
*/
private String trimJsonBank(String sourceDsl) {
List<String> dslList = Lists.newArrayList();
DefaultJSONParser parser = null;
Object obj = null;
String dsl = sourceDsl;
// 解析多个json直到pos为0
for (;;) {
try {
// 这里需要Feature.OrderedField.getMask()保持有序
parser = new DefaultJSONParser(dsl, ParserConfig.getGlobalInstance(),
JSON.DEFAULT_PARSER_FEATURE | Feature.OrderedField.getMask());
obj = parser.parse();
} catch (Exception t) {
LOGGER.error("class=DslLoaderUtil||method=trimJsonBank||errMsg=parse json {} error. ", dsl, t);
}
if (obj == null) {
break;
}
if (obj instanceof JSONObject) {
dslList.add( JSON.toJSONString(obj, SerializerFeature.WriteMapNullValue));
int pos = parser.getLexer().pos();
if (pos <= 0) {
break;
}
dsl = dsl.substring(pos);
parser.getLexer().close();
} else {
parser.getLexer().close();
break;
}
}
// 格式化异常或者有多个查询语句,返回原来的查询语句
if (dslList.isEmpty() || dslList.size() > 1) {
return sourceDsl;
}
return dslList.get(0);
}
/**
* 从jar包中读取dsl语句文件
*
* @param fileName
* @return
*/
private String readDslFileInJarFile(String fileName) {
InputStream inputStream = this.getClass().getClassLoader()
.getResourceAsStream( String.format("dsl/%s", fileName));
if (inputStream != null) {
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
String line = null;
List<String> lines = Lists.newLinkedList();
try {
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
return StringUtils.join(lines, "");
} catch (IOException e) {
LOGGER.error("class=DslLoaderUtil||method=readDslFileInJarFile||errMsg=read file {} error. ", fileName,
e);
return "";
} finally {
try {
inputStream.close();
} catch (IOException e) {
LOGGER.error(
"class=DslLoaderUtil||method=readDslFileInJarFile||errMsg=fail to close file {} error. ",
fileName, e);
}
}
} else {
LOGGER.error("class=DslLoaderUtil||method=readDslFileInJarFile||errMsg=fail to read file {} content",
fileName);
return "";
}
}
}

View File

@@ -0,0 +1,83 @@
package com.xiaojukeji.know.streaming.km.persistence.es.dsls;
/**
* @author: D10865
* @description:
* @date: Create on 2019/2/28 上午10:23
* @modified By D10865
*
* 查询语句文件名常量
*
* 命名规则 类名/方法名
*
* 在dslFiles目录下新建以类名为名称的文件夹以方法名为名称的文件名
*
*/
public class DslsConstant {
private DslsConstant() {}
/**************************************************** Base ****************************************************/
public static final String GET_LATEST_METRIC_TIME = "BaseMetricESDAO/getLatestMetricTime";
/**************************************************** Broker ****************************************************/
public static final String GET_BROKER_AGG_SINGLE_METRICS = "BrokerMetricESDAO/getAggSingleBrokerMetrics";
public static final String GET_BROKER_AGG_LIST_METRICS = "BrokerMetricESDAO/getAggListBrokerMetrics";
public static final String GET_BROKER_AGG_TOP_METRICS = "BrokerMetricESDAO/getAggTopMetricsBrokers";
public static final String GET_BROKER_LATEST_METRICS = "BrokerMetricESDAO/getBrokerLatestMetrics";
/**************************************************** Topic ****************************************************/
public static final String GET_TOPIC_AGG_LIST_METRICS = "TopicMetricESDAO/getAggListMetrics";
public static final String GET_TOPIC_AGG_SINGLE_METRICS = "TopicMetricESDAO/getAggSingleMetrics";
public static final String GET_TOPIC_MAX_OR_MIN_SINGLE_METRIC = "TopicMetricESDAO/getMaxOrMinSingleMetric";
public static final String GET_TOPIC_AGG_TOP_METRICS = "TopicMetricESDAO/getAggTopMetricsTopics";
public static final String GET_TOPIC_BROKER_LATEST_METRICS = "TopicMetricESDAO/getTopicLatestMetricByBrokerId";
public static final String GET_TOPIC_LATEST_METRICS = "TopicMetricESDAO/getTopicLatestMetric";
public static final String LIST_TOPIC_WITH_LATEST_METRICS = "TopicMetricESDAO/listTopicWithLatestMetrics";
public static final String COUNT_TOPIC_METRIC_VALUE = "TopicMetricESDAO/countTopicMetricValue";
public static final String COUNT_TOPIC_NOT_METRIC_VALUE = "TopicMetricESDAO/countTopicNotMetricValue";
/**************************************************** Cluster ****************************************************/
public static final String GET_CLUSTER_AGG_LIST_METRICS = "ClusterMetricESDAO/getAggListClusterMetrics";
public static final String GET_CLUSTER_AGG_SINGLE_METRICS = "ClusterMetricESDAO/getAggSingleClusterMetrics";
public static final String LIST_CLUSTER_WITH_LATEST_METRICS = "ClusterMetricESDAO/listClusterWithLatestMetrics";
public static final String GET_CLUSTER_LATEST_METRICS = "ClusterMetricESDAO/getClusterLatestMetrics";
/**************************************************** Partition ****************************************************/
public static final String GET_PARTITION_LATEST_METRICS = "PartitionMetricESDAO/getPartitionLatestMetrics";
public static final String LIST_PARTITION_LATEST_METRICS_BY_TOPIC = "PartitionMetricESDAO/listPartitionLatestMetricsByTopic";
/**************************************************** REPLICATION ****************************************************/
public static final String GET_REPLICATION_AGG_SINGLE_METRICS = "ReplicationMetricESDAO/getAggSingleReplicationMetrics";
public static final String GET_REPLICATION_LATEST_METRICS = "ReplicationMetricESDAO/getReplicationLatestMetrics";
/**************************************************** Group ****************************************************/
public static final String GET_GROUP_TOPIC_PARTITION = "GroupMetricESDAO/getTopicPartitionOfGroup";
public static final String LIST_GROUP_METRICS = "GroupMetricESDAO/listGroupMetrics";
public static final String LIST_GROUP_LATEST_METRICS_BY_GROUP_TOPIC = "GroupMetricESDAO/listLatestMetricsAggByGroupTopic";
public static final String LIST_GROUP_LATEST_METRICS_OF_PARTITION = "GroupMetricESDAO/listPartitionLatestMetrics";
public static final String COUNT_GROUP_METRIC_VALUE = "GroupMetricESDAO/countGroupMetricValue";
public static final String COUNT_GROUP_NOT_METRIC_VALUE = "GroupMetricESDAO/countGroupNotMetricValue";
}

View File

@@ -0,0 +1,16 @@
package com.xiaojukeji.know.streaming.km.persistence.jmx;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.JmxConfig;
import javax.management.ObjectName;
/**
* 从Jmx获取相关数据的服务接口
* @author tukun, zengqiao
* @date 2015/11/11.
*/
public interface JmxDAO {
Object getJmxValue(String jmxHost, Integer jmxPort, JmxConfig jmxConfig, ObjectName objectName, String attribute);
Object getJmxValue(Long clusterPhyId, Integer brokerId, String jmxHost, Integer jmxPort, JmxConfig jmxConfig, ObjectName objectName, String attribute);
}

View File

@@ -0,0 +1,48 @@
package com.xiaojukeji.know.streaming.km.persistence.jmx.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.JmxConfig;
import com.xiaojukeji.know.streaming.km.common.jmx.JmxConnectorWrap;
import com.xiaojukeji.know.streaming.km.persistence.jmx.JmxDAO;
import org.springframework.stereotype.Repository;
import javax.management.*;
/**
* @author tukun, zengqiao
* @date 2015/11/11.
*/
@Repository
public class JmxDAOImpl implements JmxDAO {
private static final ILog log = LogFactory.getLog(JmxDAOImpl.class);
@Override
public Object getJmxValue(String jmxHost, Integer jmxPort, JmxConfig jmxConfig, ObjectName objectName, String attribute) {
return this.getJmxValue(null, null, jmxHost, jmxPort, jmxConfig, objectName, attribute);
}
@Override
public Object getJmxValue(Long clusterPhyId, Integer brokerId, String jmxHost, Integer jmxPort, JmxConfig jmxConfig, ObjectName objectName, String attribute) {
JmxConnectorWrap jmxConnectorWrap = null;
try {
jmxConnectorWrap = new JmxConnectorWrap(clusterPhyId, brokerId, null, jmxHost, jmxPort, jmxConfig);
if (!jmxConnectorWrap.checkJmxConnectionAndInitIfNeed()) {
log.error("method=getJmxValue||clusterPhyId={}||brokerId={}||jmxHost={}||jmxPort={}||jmxConfig={}||errMgs=create jmx client failed",
clusterPhyId, brokerId, jmxHost, jmxPort, jmxConfig);
return null;
}
return jmxConnectorWrap.getAttribute(objectName, attribute);
} catch (Exception e) {
log.error("method=getJmxValue||clusterPhyId={}||brokerId={}||jmxHost={}||jmxPort={}||jmxConfig={}||objectName={}||attribute={}||msg=get attribute failed||errMsg={}",
clusterPhyId, brokerId, jmxHost, jmxPort, jmxConfig, objectName, attribute, e);
} finally {
if (jmxConnectorWrap != null) {
jmxConnectorWrap.close();
}
}
return null;
}
}

View File

@@ -0,0 +1,116 @@
package com.xiaojukeji.know.streaming.km.persistence.kafka;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.persistence.AbstractClusterLoadedChangedHandler;
import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.springframework.stereotype.Component;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
@Slf4j
@Component
public class KafkaAdminClient extends AbstractClusterLoadedChangedHandler {
private static final Map<Long, AdminClient> KAFKA_ADMIN_CLIENT_MAP = new ConcurrentHashMap<>();
public AdminClient getClient(Long clusterPhyId) throws NotExistException {
AdminClient adminClient = KAFKA_ADMIN_CLIENT_MAP.get(clusterPhyId);
if (adminClient != null) {
return adminClient;
}
adminClient = this.createKafkaAdminClient(clusterPhyId);
if (adminClient == null) {
throw new NotExistException("kafka admin-client not exist due to create failed");
}
return adminClient;
}
/**************************************************** private method ****************************************************/
@Override
protected void add(ClusterPhy clusterPhy) {
// ignore 后续按需创建,因此这里的操作直接忽略
}
@Override
protected void modify(ClusterPhy newClusterPhy, ClusterPhy oldClusterPhy) {
if (newClusterPhy.getBootstrapServers().equals(oldClusterPhy.getBootstrapServers())
&& newClusterPhy.getClientProperties().equals(oldClusterPhy.getClientProperties())) {
// 集群信息虽然变化但是服务地址和client配置没有变化则直接返回
return;
}
// 去除历史的,新的继续按需创建
this.remove(newClusterPhy);
}
@Override
protected void remove(ClusterPhy clusterPhy) {
this.closeKafkaAdminClient(clusterPhy.getId());
}
private void closeKafkaAdminClient(Long clusterPhyId) {
try {
modifyClientMapLock.lock();
AdminClient adminClient = KAFKA_ADMIN_CLIENT_MAP.remove(clusterPhyId);
if (adminClient == null) {
return;
}
log.info("close kafka AdminClient starting, clusterPhyId:{}", clusterPhyId);
adminClient.close();
log.info("close kafka AdminClient success, clusterPhyId:{}", clusterPhyId);
} catch (Exception e) {
log.error("close kafka AdminClient failed, clusterPhyId:{}", clusterPhyId, e);
} finally {
modifyClientMapLock.unlock();
}
}
private AdminClient createKafkaAdminClient(Long clusterPhyId) throws NotExistException {
ClusterPhy clusterPhy = LoadedClusterPhyCache.getByPhyId(clusterPhyId);
if (clusterPhy == null) {
throw new NotExistException(String.format("clusterPhyId:%d not exist", clusterPhyId));
}
return this.createKafkaAdminClient(clusterPhyId, clusterPhy.getBootstrapServers(), ConvertUtil.str2ObjByJson(clusterPhy.getClientProperties(), Properties.class));
}
private AdminClient createKafkaAdminClient(Long clusterPhyId, String bootstrapServers, Properties props) {
try {
modifyClientMapLock.lock();
AdminClient adminClient = KAFKA_ADMIN_CLIENT_MAP.get(clusterPhyId);
if (adminClient != null) {
return adminClient;
}
log.debug("create kafka AdminClient starting, clusterPhyId:{} props:{}", clusterPhyId, props);
if (props == null) {
props = new Properties();
}
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
KAFKA_ADMIN_CLIENT_MAP.put(clusterPhyId, AdminClient.create(props));
log.info("create kafka AdminClient success, clusterPhyId:{}", clusterPhyId);
} catch (Exception e) {
log.error("create kafka AdminClient failed, clusterPhyId:{} props:{}", clusterPhyId, props, e);
} finally {
modifyClientMapLock.unlock();
}
return KAFKA_ADMIN_CLIENT_MAP.get(clusterPhyId);
}
}

View File

@@ -0,0 +1,158 @@
package com.xiaojukeji.know.streaming.km.persistence.kafka;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.persistence.AbstractClusterLoadedChangedHandler;
import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache;
import kafka.zk.AdminZkClient;
import kafka.zk.KafkaZkClient;
import org.apache.kafka.common.utils.Time;
import org.apache.zookeeper.client.ZKClientConfig;
import org.springframework.stereotype.Component;
import scala.Option;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
@Component
public class KafkaAdminZKClient extends AbstractClusterLoadedChangedHandler implements KafkaClient<KafkaZkClient> {
private static final ILog log = LogFactory.getLog(KafkaAdminZKClient.class);
/**
* Kafka提供的KafkaZkClient
*/
private static final Map<Long, KafkaZkClient> KAFKA_ZK_CLIENT_MAP = new ConcurrentHashMap<>();
/**
* zk-client最近创建时间
*/
private static final Map<Long, Long> KAFKA_ZK_CLIENT_CREATE_TIME = new ConcurrentHashMap<>();
@Override
public KafkaZkClient getClient(Long clusterPhyId) throws NotExistException {
KafkaZkClient kafkaZkClient = KAFKA_ZK_CLIENT_MAP.get(clusterPhyId);
if (kafkaZkClient != null) {
return kafkaZkClient;
}
kafkaZkClient = this.createZKClient(clusterPhyId);
if (kafkaZkClient == null) {
throw new NotExistException("kafka kafka-zk-client not exist due to create failed");
}
return kafkaZkClient;
}
public AdminZkClient getKafkaZKWrapClient(Long clusterPhyId) throws NotExistException {
return new AdminZkClient(this.getClient(clusterPhyId));
}
public Long getZKClientCreateTime(Long clusterPhyId) {
return KAFKA_ZK_CLIENT_CREATE_TIME.get(clusterPhyId);
}
/**************************************************** private method ****************************************************/
@Override
protected void add(ClusterPhy clusterPhy) {
// ignore 后续按需创建,因此这里的操作直接忽略
}
@Override
protected void modify(ClusterPhy newClusterPhy, ClusterPhy oldClusterPhy) {
if (newClusterPhy.getZookeeper().equals(oldClusterPhy.getZookeeper())) {
// 集群的ZK地址并未编辑直接返回
return;
}
// 去除历史的,新的继续按需创建
this.remove(newClusterPhy);
}
@Override
protected void remove(ClusterPhy clusterPhy) {
// 关闭ZK客户端
this.closeZKClient(clusterPhy.getId());
// 移除该时间
KAFKA_ZK_CLIENT_CREATE_TIME.remove(clusterPhy.getId());
}
private void closeZKClient(Long clusterPhyId) {
try {
modifyClientMapLock.lock();
KafkaZkClient kafkaZkClient = KAFKA_ZK_CLIENT_MAP.remove(clusterPhyId);
if (kafkaZkClient == null) {
return;
}
log.info("close ZK Client starting, clusterPhyId:{}", clusterPhyId);
kafkaZkClient.close();
log.info("close ZK Client success, clusterPhyId:{}", clusterPhyId);
} catch (Exception e) {
log.error("close ZK Client failed, clusterPhyId:{}", clusterPhyId, e);
} finally {
modifyClientMapLock.unlock();
}
}
private KafkaZkClient createZKClient(Long clusterPhyId) throws NotExistException {
ClusterPhy clusterPhy = LoadedClusterPhyCache.getByPhyId(clusterPhyId);
if (clusterPhy == null) {
log.warn("create ZK Client failed, cluster not exist, clusterPhyId:{}", clusterPhyId);
throw new NotExistException(MsgConstant.getClusterPhyNotExist(clusterPhyId));
}
if (ValidateUtils.isBlank(clusterPhy.getZookeeper())) {
log.warn("create ZK Client failed, zookeeper not exist, clusterPhyId:{}", clusterPhyId);
return null;
}
return this.createZKClient(clusterPhyId, clusterPhy.getZookeeper());
}
private KafkaZkClient createZKClient(Long clusterPhyId, String zookeeperAddress) {
try {
modifyClientMapLock.lock();
KafkaZkClient kafkaZkClient = KAFKA_ZK_CLIENT_MAP.get(clusterPhyId);
if (kafkaZkClient != null) {
return kafkaZkClient;
}
log.debug("create ZK Client starting, clusterPhyId:{} zookeeperAddress:{}", clusterPhyId, zookeeperAddress);
kafkaZkClient = KafkaZkClient.apply(
zookeeperAddress,
false,
Constant.DEFAULT_SESSION_TIMEOUT_UNIT_MS,
Constant.DEFAULT_SESSION_TIMEOUT_UNIT_MS,
5,
Time.SYSTEM,
"KnowStreaming-clusterPhyId-" + clusterPhyId,
"SessionExpireListener",
Option.apply("KnowStreaming-clusterPhyId-" + clusterPhyId),
Option.apply(new ZKClientConfig())
);
KAFKA_ZK_CLIENT_MAP.put(clusterPhyId, kafkaZkClient);
KAFKA_ZK_CLIENT_CREATE_TIME.put(clusterPhyId, System.currentTimeMillis());
log.info("create ZK Client success, clusterPhyId:{}", clusterPhyId);
} catch (Exception e) {
log.error("create ZK Client failed, clusterPhyId:{} zookeeperAddress:{}", clusterPhyId, zookeeperAddress, e);
} finally {
modifyClientMapLock.unlock();
}
return KAFKA_ZK_CLIENT_MAP.get(clusterPhyId);
}
}

View File

@@ -0,0 +1,12 @@
package com.xiaojukeji.know.streaming.km.persistence.kafka;
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
public interface KafkaClient<T> {
/**
* 获取Kafka客户端
* @param clusterPhyId 物理集群ID
* @return
*/
T getClient(Long clusterPhyId) throws NotExistException;
}

View File

@@ -0,0 +1,135 @@
package com.xiaojukeji.know.streaming.km.persistence.kafka;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.KafkaConsumerFactory;
import com.xiaojukeji.know.streaming.km.persistence.AbstractClusterLoadedChangedHandler;
import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache;
import org.apache.commons.pool2.impl.GenericObjectPool;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.locks.ReentrantLock;
@Component
public class KafkaConsumerClient extends AbstractClusterLoadedChangedHandler implements KafkaClient<KafkaConsumer<String, String>> {
private static final ILog log = LogFactory.getLog(KafkaConsumerClient.class);
@Value(value = "${client-pool.kafka-consumer.min-idle-client-num:24}")
private Integer kafkaConsumerMinIdleClientNum;
@Value(value = "${client-pool.kafka-consumer.max-idle-client-num:24}")
private Integer kafkaConsumerMaxIdleClientNum;
@Value(value = "${client-pool.kafka-consumer.max-total-client-num:24}")
private Integer kafkaConsumerMaxTotalClientNum;
@Value(value = "${client-pool.kafka-consumer.borrow-timeout-unit-ms:3000}")
private Integer kafkaConsumerBorrowTimeoutUnitMs;
private static final Map<Long, GenericObjectPool<KafkaConsumer<String, String>>> KAFKA_CONSUMER_POOL = new ConcurrentHashMap<>();
private static final ReentrantLock modifyPoolLock = new ReentrantLock();
@Override
public KafkaConsumer<String, String> getClient(Long clusterPhyId) throws NotExistException {
KafkaConsumer<String, String> kafkaConsumer = this.borrowClientTryInitIfNotExist(clusterPhyId);
if (kafkaConsumer == null) {
throw new NotExistException(String.format("clusterPhyId:%d kafkaConsumer is null", clusterPhyId));
}
return kafkaConsumer;
}
public void returnClient(Long clusterPhyId, KafkaConsumer<String, String> kafkaConsumer) {
GenericObjectPool<KafkaConsumer<String, String>> objectPool = KAFKA_CONSUMER_POOL.get(clusterPhyId);
if (objectPool == null) {
return;
}
try {
objectPool.returnObject(kafkaConsumer);
} catch (Exception e) {
log.error("method=returnClient||clusterPhyId={}||errMsg=exception!", clusterPhyId, e);
}
}
/**************************************************** private method ****************************************************/
@Override
protected void add(ClusterPhy clusterPhy) {
// ignore 后续按需创建,因此这里的操作直接忽略
}
@Override
protected void modify(ClusterPhy newClusterPhy, ClusterPhy oldClusterPhy) {
if (newClusterPhy.getBootstrapServers().equals(oldClusterPhy.getBootstrapServers())
&& newClusterPhy.getClientProperties().equals(oldClusterPhy.getClientProperties())) {
// 集群信息虽然变化但是服务地址和client配置没有变化则直接返回
return;
}
// 去除历史的,新的继续按需创建
this.remove(newClusterPhy);
}
@Override
protected void remove(ClusterPhy clusterPhy) {
GenericObjectPool<KafkaConsumer<String, String>> genericObjectPool = KAFKA_CONSUMER_POOL.remove(clusterPhy.getId());
if (genericObjectPool == null) {
return;
}
genericObjectPool.close();
}
private KafkaConsumer<String, String> borrowClientTryInitIfNotExist(Long clusterPhyId) {
GenericObjectPool<KafkaConsumer<String, String>> objectPool = KAFKA_CONSUMER_POOL.get(clusterPhyId);
if (objectPool == null) {
initKafkaConsumerPool(LoadedClusterPhyCache.getByPhyId(clusterPhyId));
objectPool = KAFKA_CONSUMER_POOL.get(clusterPhyId);
}
if (objectPool == null) {
return null;
}
try {
return objectPool.borrowObject(kafkaConsumerBorrowTimeoutUnitMs);
} catch (Exception e) {
log.error("method=borrowClientTryInitIfNotExist||clusterPhyId={}||errMsg=exception!", clusterPhyId, e);
}
return null;
}
private void initKafkaConsumerPool(ClusterPhy clusterPhy) {
modifyPoolLock.lock();
try {
if (clusterPhy == null) {
return;
}
GenericObjectPool<KafkaConsumer<String, String>> objectPool = KAFKA_CONSUMER_POOL.get(clusterPhy.getId());
if (objectPool != null) {
return;
}
GenericObjectPoolConfig<KafkaConsumer<String, String>> config = new GenericObjectPoolConfig<>();
config.setMaxIdle(kafkaConsumerMaxIdleClientNum);
config.setMinIdle(kafkaConsumerMinIdleClientNum);
config.setMaxTotal(kafkaConsumerMaxTotalClientNum);
KAFKA_CONSUMER_POOL.put(clusterPhy.getId(), new GenericObjectPool<>(new KafkaConsumerFactory(clusterPhy), config));
} catch (Exception e) {
log.error("method=initKafkaConsumerPool||clusterPhy={}||errMsg=exception!", clusterPhy, e);
} finally {
modifyPoolLock.unlock();
}
}
}

View File

@@ -0,0 +1,196 @@
package com.xiaojukeji.know.streaming.km.persistence.kafka;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.po.broker.BrokerPO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.JmxConfig;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.jmx.JmxConnectorWrap;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.persistence.AbstractClusterLoadedChangedHandler;
import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache;
import com.xiaojukeji.know.streaming.km.persistence.mysql.broker.BrokerDAO;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
@Component
public class KafkaJMXClient extends AbstractClusterLoadedChangedHandler {
private static final ILog log = LogFactory.getLog(KafkaAdminZKClient.class);
@Autowired
private BrokerDAO brokerDAO;
private static final Map<Long, Map<Integer, JmxConnectorWrap>> JMX_MAP = new ConcurrentHashMap<>();
public JmxConnectorWrap getClientWithCheck(Long clusterPhyId, Integer brokerId){
JmxConnectorWrap jmxConnectorWrap = this.getClient(clusterPhyId, brokerId);
if (ValidateUtils.isNull(jmxConnectorWrap) || !jmxConnectorWrap.checkJmxConnectionAndInitIfNeed()) {
log.error("method=getClientWithCheck||clusterPhyId={}||brokerId={}||msg=get jmx connector failed!", clusterPhyId, brokerId);
return null;
}
return jmxConnectorWrap;
}
public JmxConnectorWrap getClient(Long clusterPhyId, Integer brokerId) {
Map<Integer, JmxConnectorWrap> jmxMap = JMX_MAP.getOrDefault(clusterPhyId, new ConcurrentHashMap<>());
if (jmxMap == null) {
// 集群不存在, 直接返回null
return null;
}
JmxConnectorWrap jmxConnectorWrap = jmxMap.get(brokerId);
if (jmxConnectorWrap != null) {
// 已新建成功,则直接返回
return jmxConnectorWrap;
}
// 未创建,则进行创建
return this.createJmxConnectorWrap(clusterPhyId, brokerId);
}
public void checkAndRemoveIfIllegal(Long clusterPhyId, List<Broker> allAliveBrokerList) {
Map<Integer, JmxConnectorWrap> jmxMap = JMX_MAP.get(clusterPhyId);
if (jmxMap == null) {
return;
}
// 转换格式
Map<Integer, Long> brokerIdAndStartTimeMap = allAliveBrokerList.stream().collect(Collectors.toMap(Broker::getBrokerId, Broker::getStartTimestamp));
// 获取jmx客户端非法的brokerId
Set<Integer> illegalBrokerIdSet = jmxMap.entrySet()
.stream()
.filter(entry -> {
Long startTime = brokerIdAndStartTimeMap.get(entry.getKey());
if (startTime == null) {
// broker不存在
return true;
}
return entry.getValue().brokerChanged(startTime);
})
.map(elem -> elem.getKey())
.collect(Collectors.toSet());
for (Integer brokerId: illegalBrokerIdSet) {
log.warn("method=checkAndRemoveIfIllegal||clusterPhyId={}||brokerId={}||msg=remove jmx-client", clusterPhyId, brokerId);
JmxConnectorWrap jmxConnectorWrap = jmxMap.remove(brokerId);
if (jmxConnectorWrap == null) {
continue;
}
jmxConnectorWrap.close();
}
}
/**************************************************** private method ****************************************************/
@Override
protected void add(ClusterPhy clusterPhy) {
JMX_MAP.putIfAbsent(clusterPhy.getId(), new ConcurrentHashMap<>());
}
@Override
protected void modify(ClusterPhy newClusterPhy, ClusterPhy oldClusterPhy) {
if (newClusterPhy.getClientProperties().equals(oldClusterPhy.getClientProperties())
&& newClusterPhy.getZookeeper().equals(oldClusterPhy.getZookeeper())
&& newClusterPhy.getBootstrapServers().equals(oldClusterPhy.getBootstrapServers())) {
// 集群信息虽然变化,但是相关没有变化,则直接返回
return;
}
// 存在变化时, 则先移除, 再重新加入
this.remove(newClusterPhy);
this.add(newClusterPhy);
}
@Override
protected void remove(ClusterPhy clusterPhy) {
Map<Integer, JmxConnectorWrap> jmxMap = JMX_MAP.remove(clusterPhy.getId());
if (jmxMap == null) {
return;
}
for (JmxConnectorWrap jmxConnectorWrap: jmxMap.values()) {
jmxConnectorWrap.close();
}
}
private JmxConnectorWrap createJmxConnectorWrap(Long clusterPhyId, Integer brokerId) {
ClusterPhy clusterPhy = LoadedClusterPhyCache.getByPhyId(clusterPhyId);
if (clusterPhy == null) {
// 集群不存在
return null;
}
return this.createJmxConnectorWrap(clusterPhy, brokerId);
}
private JmxConnectorWrap createJmxConnectorWrap(ClusterPhy clusterPhy, Integer brokerId) {
Broker broker = this.getBrokerFromDB(clusterPhy.getId(), brokerId);
if (broker == null) {
return null;
}
try {
modifyClientMapLock.lock();
JmxConnectorWrap jmxMap = JMX_MAP.getOrDefault(clusterPhy.getId(), new ConcurrentHashMap<>()).get(brokerId);
if (jmxMap != null) {
return jmxMap;
}
log.debug("method=createJmxConnectorWrap||clusterPhyId={}||brokerId={}||msg=create JmxConnectorWrap starting", clusterPhy.getId(), brokerId);
JmxConfig jmxConfig = ConvertUtil.str2ObjByJson(clusterPhy.getJmxProperties(), JmxConfig.class);
if (jmxConfig == null) {
jmxConfig = new JmxConfig();
}
JmxConnectorWrap jmxConnectorWrap = new JmxConnectorWrap(
clusterPhy.getId(),
brokerId,
broker.getStartTimestamp(),
broker.getHost(),
broker.getJmxPort() != null? broker.getJmxPort(): jmxConfig.getJmxPort(),
jmxConfig
);
JMX_MAP.get(clusterPhy.getId()).put(brokerId, jmxConnectorWrap);
log.info("method=createJmxConnectorWrap||clusterPhyId={}||brokerId={}||msg=create JmxConnectorWrap success", clusterPhy.getId(), brokerId);
return jmxConnectorWrap;
} catch (Exception e) {
log.error("method=createJmxConnectorWrap||clusterPhyId={}||brokerId={}msg=create JmxConnectorWrap failed||errMsg=exception||", clusterPhy.getId(), brokerId, e);
} finally {
modifyClientMapLock.unlock();
}
return null;
}
private Broker getBrokerFromDB(Long clusterPhyId, Integer brokerId) {
LambdaQueryWrapper<BrokerPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(BrokerPO::getClusterPhyId, clusterPhyId);
lambdaQueryWrapper.eq(BrokerPO::getBrokerId, brokerId);
lambdaQueryWrapper.eq(BrokerPO::getStatus, Constant.ALIVE);
BrokerPO brokerPO = brokerDAO.selectOne(lambdaQueryWrapper);
return ConvertUtil.obj2Obj(brokerPO, Broker.class);
}
}

View File

@@ -0,0 +1,18 @@
package com.xiaojukeji.know.streaming.km.persistence.mysql;
import com.xiaojukeji.know.streaming.km.common.bean.po.ControllerChangeLogPO;
import org.springframework.stereotype.Repository;
import java.util.List;
import java.util.Map;
@Repository
public interface ControllerChangeLogDAO {
int insert(ControllerChangeLogPO controllerChangeLogPO);
List<ControllerChangeLogPO> paginationQuery(Map<String, Object> params);
Long queryCount(Map<String, Object> params);
}

View File

@@ -0,0 +1,9 @@
package com.xiaojukeji.know.streaming.km.persistence.mysql;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.xiaojukeji.know.streaming.km.common.bean.po.KafkaAclPO;
import org.springframework.stereotype.Repository;
@Repository
public interface KafkaAclDAO extends BaseMapper<KafkaAclPO> {
}

View File

@@ -0,0 +1,10 @@
package com.xiaojukeji.know.streaming.km.persistence.mysql;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.xiaojukeji.know.streaming.km.common.bean.po.KafkaUserPO;
import org.springframework.stereotype.Repository;
@Repository
public interface KafkaUserDAO extends BaseMapper<KafkaUserPO> {
int replace(KafkaUserPO kafkaPrincipalPO);
}

View File

@@ -0,0 +1,10 @@
package com.xiaojukeji.know.streaming.km.persistence.mysql.broker;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.xiaojukeji.know.streaming.km.common.bean.po.broker.BrokerConfigPO;
import org.springframework.stereotype.Repository;
@Repository
public interface BrokerConfigDAO extends BaseMapper<BrokerConfigPO> {
int replace(BrokerConfigPO po);
}

View File

@@ -0,0 +1,10 @@
package com.xiaojukeji.know.streaming.km.persistence.mysql.broker;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.xiaojukeji.know.streaming.km.common.bean.po.broker.BrokerPO;
import org.springframework.stereotype.Repository;
@Repository
public interface BrokerDAO extends BaseMapper<BrokerPO> {
int replace(BrokerPO brokerPO);
}

View File

@@ -0,0 +1,9 @@
package com.xiaojukeji.know.streaming.km.persistence.mysql.changerecord;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.xiaojukeji.know.streaming.km.common.bean.po.changerecord.KafkaChangeRecordPO;
import org.springframework.stereotype.Repository;
@Repository
public interface KafkaChangeRecordDAO extends BaseMapper<KafkaChangeRecordPO> {
}

View File

@@ -0,0 +1,10 @@
package com.xiaojukeji.know.streaming.km.persistence.mysql.cluster;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.xiaojukeji.know.streaming.km.common.bean.po.cluster.ClusterPhyPO;
import org.springframework.stereotype.Repository;
@Repository
public interface ClusterPhyDAO extends BaseMapper<ClusterPhyPO> {
int addAndSetId(ClusterPhyPO clusterPhyPO);
}

View File

@@ -0,0 +1,12 @@
package com.xiaojukeji.know.streaming.km.persistence.mysql.config;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.xiaojukeji.know.streaming.km.common.bean.po.config.PlatformClusterConfigPO;
import org.springframework.stereotype.Repository;
import java.util.List;
@Repository
public interface PlatformClusterConfigDAO extends BaseMapper<PlatformClusterConfigPO> {
int batchReplace(List<PlatformClusterConfigPO> poList);
}

View File

@@ -0,0 +1,10 @@
package com.xiaojukeji.know.streaming.km.persistence.mysql.group;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupMemberPO;
import org.springframework.stereotype.Repository;
@Repository
public interface GroupMemberDAO extends BaseMapper<GroupMemberPO> {
int replace(GroupMemberPO po);
}

View File

@@ -0,0 +1,10 @@
package com.xiaojukeji.know.streaming.km.persistence.mysql.health;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.xiaojukeji.know.streaming.km.common.bean.po.health.HealthCheckResultPO;
import org.springframework.stereotype.Repository;
@Repository
public interface HealthCheckResultDAO extends BaseMapper<HealthCheckResultPO> {
int replace(HealthCheckResultPO healthCheckResultPO);
}

View File

@@ -0,0 +1,12 @@
/*
* Copyright (c) 2015, WINIT and/or its affiliates. All rights reserved. Use, Copy is subject to authorized license.
*/
package com.xiaojukeji.know.streaming.km.persistence.mysql.job;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.xiaojukeji.know.streaming.km.common.bean.po.job.JobPO;
import org.springframework.stereotype.Repository;
@Repository
public interface JobDAO extends BaseMapper<JobPO> {
}

View File

@@ -0,0 +1,12 @@
package com.xiaojukeji.know.streaming.km.persistence.mysql.kafkacontroller;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.xiaojukeji.know.streaming.km.common.bean.po.kafkacontrollr.KafkaControllerPO;
import org.springframework.stereotype.Repository;
import java.util.List;
@Repository
public interface KafkaControllerDAO extends BaseMapper<KafkaControllerPO> {
List<KafkaControllerPO> listAllLatest();
}

View File

@@ -0,0 +1,9 @@
package com.xiaojukeji.know.streaming.km.persistence.mysql.km;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.xiaojukeji.know.streaming.km.common.bean.po.km.KmNodePO;
import org.springframework.stereotype.Repository;
@Repository
public interface KmNodeDAO extends BaseMapper<KmNodePO> {
}

View File

@@ -0,0 +1,10 @@
package com.xiaojukeji.know.streaming.km.persistence.mysql.partition;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.xiaojukeji.know.streaming.km.common.bean.po.partition.PartitionPO;
import org.springframework.stereotype.Repository;
@Repository
public interface PartitionDAO extends BaseMapper<PartitionPO> {
int replace(PartitionPO po);
}

View File

@@ -0,0 +1,10 @@
package com.xiaojukeji.know.streaming.km.persistence.mysql.reassign;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.xiaojukeji.know.streaming.km.common.bean.po.reassign.ReassignJobPO;
import org.springframework.stereotype.Repository;
@Repository
public interface ReassignJobDAO extends BaseMapper<ReassignJobPO> {
int addAndSetId(ReassignJobPO po);
}

View File

@@ -0,0 +1,9 @@
package com.xiaojukeji.know.streaming.km.persistence.mysql.reassign;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.xiaojukeji.know.streaming.km.common.bean.po.reassign.ReassignSubJobPO;
import org.springframework.stereotype.Repository;
@Repository
public interface ReassignSubJobDAO extends BaseMapper<ReassignSubJobPO> {
}

View File

@@ -0,0 +1,12 @@
package com.xiaojukeji.know.streaming.km.persistence.mysql.topic;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.xiaojukeji.know.streaming.km.common.bean.po.topic.TopicPO;
import org.springframework.stereotype.Repository;
@Repository
public interface TopicDAO extends BaseMapper<TopicPO> {
int replaceAll(TopicPO topicPO);
int updateConfig(TopicPO topicPO);
}

View File

@@ -0,0 +1,107 @@
package com.xiaojukeji.know.streaming.km.persistence.schedule;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.event.cluster.ClusterPhyLoadChangedEvent;
import com.xiaojukeji.know.streaming.km.common.bean.po.cluster.ClusterPhyPO;
import com.xiaojukeji.know.streaming.km.common.component.SpringTool;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache;
import com.xiaojukeji.know.streaming.km.persistence.mysql.cluster.ClusterPhyDAO;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import javax.annotation.PostConstruct;
import java.util.List;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.function.Function;
import java.util.stream.Collectors;
@Component
public class ScheduleFlushClusterTask {
private static final ILog log = LogFactory.getLog(ScheduleFlushClusterTask.class);
@Autowired
private ClusterPhyDAO clusterPhyDAO;
private final BlockingQueue<ClusterPhyLoadChangedEvent> eventQueue = new LinkedBlockingQueue<>(2000);
private final Thread handleEventThread = new Thread(() -> handleEvent(), "ScheduleFlushClusterTaskThread");
@PostConstruct
public void init() {
// 启动线程
handleEventThread.start();
// 立即加载集群
flush();
}
@Scheduled(cron="0/10 * * * * ?")
public void flush() {
// 查询DB数据
List<ClusterPhyPO> poList = clusterPhyDAO.selectList(null);
// 转Map
Map<Long, ClusterPhy> inDBClusterMap = ConvertUtil.list2List(poList, ClusterPhy.class).stream().collect(Collectors.toMap(ClusterPhy::getId, Function.identity(), (key1, key2) -> key2));
// 新增的集群
for (ClusterPhy inDBClusterPhy: inDBClusterMap.values()) {
if (LoadedClusterPhyCache.containsByPhyId(inDBClusterPhy.getId())) {
// 已经存在
continue;
}
LoadedClusterPhyCache.replace(inDBClusterPhy);
this.put2Queue(new ClusterPhyLoadChangedEvent(this, inDBClusterPhy, null, OperationEnum.ADD));
}
// 移除的集群
for (ClusterPhy inCacheClusterPhy: LoadedClusterPhyCache.listAll().values()) {
if (inDBClusterMap.containsKey(inCacheClusterPhy.getId())) {
// 已经存在
continue;
}
LoadedClusterPhyCache.remove(inCacheClusterPhy.getId());
this.put2Queue(new ClusterPhyLoadChangedEvent(this, null, inCacheClusterPhy, OperationEnum.DELETE));
}
// 被修改配置的集群
for (ClusterPhy inDBClusterPhy: inDBClusterMap.values()) {
ClusterPhy inCacheClusterPhy = LoadedClusterPhyCache.getByPhyId(inDBClusterPhy.getId());
if (inCacheClusterPhy == null || inDBClusterPhy.equals(inCacheClusterPhy)) {
// 不存在 || 相等
continue;
}
LoadedClusterPhyCache.replace(inDBClusterPhy);
this.put2Queue(new ClusterPhyLoadChangedEvent(this, inDBClusterPhy, inCacheClusterPhy, OperationEnum.EDIT));
}
}
private void put2Queue(ClusterPhyLoadChangedEvent event) {
try {
eventQueue.put(event);
} catch (Exception e) {
log.error("method=put2Queue||event={}||errMsg=exception", event, e);
}
}
private void handleEvent() {
while (true) {
try {
ClusterPhyLoadChangedEvent event = eventQueue.take();
SpringTool.publish(event);
} catch (Exception e) {
log.error("method=handleEvent||errMsg=exception", e);
}
}
}
}

View File

@@ -0,0 +1,87 @@
package com.xiaojukeji.know.streaming.km.persistence.zk;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafkacontroller.KafkaController;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.data.Stat;
import java.util.List;
/**
* 读取Kafka-ZK数据服务
* @author zengqiao
* @date 22/03/08
*/
public interface KafkaZKDAO {
/**
* 获取Broker元信息
* @param zkAddress
* @return
* @throws KeeperException ZK异常
* @throws AdminOperateException 操作异常
*/
Broker getBrokerMetadata(String zkAddress) throws KeeperException.NoNodeException, AdminOperateException;
/**
* 获取Broker元信息
* @param clusterPhyId 物理集群ID
* @param brokerId BrokerId
* @return
* @throws NotExistException 不存在异常
* @throws KeeperException ZK异常
* @throws AdminOperateException 操作异常
*/
Broker getBrokerMetadata(Long clusterPhyId, Integer brokerId) throws NotExistException, KeeperException, AdminOperateException;
/**
* 获取Topic元信息
* @param clusterPhyId 物理集群ID
* @param topicName Topic名称
* @return
* @throws NotExistException 不存在异常
* @throws KeeperException ZK异常
* @throws AdminOperateException 操作异常
*/
Topic getTopicMetadata(Long clusterPhyId, String topicName) throws NotExistException, KeeperException, AdminOperateException;
/**
* 获取Topic元信息
* @param clusterPhyId 物理集群ID
* @return
* @throws NotExistException 不存在异常
* @throws KeeperException ZK异常
* @throws AdminOperateException 操作异常
*/
List<Topic> getAllTopicMetadata(Long clusterPhyId, boolean addWatch) throws NotExistException, KeeperException, AdminOperateException;
/**
* 获取KafkaController信息
* @param clusterPhyId 物理集群ID
* @return
* @throws NotExistException 不存在异常
* @throws KeeperException ZK异常
* @throws AdminOperateException 操作异常
*/
KafkaController getKafkaController(Long clusterPhyId, boolean addWatch) throws NotExistException, KeeperException, AdminOperateException;
List<String> getChildren(Long clusterPhyId, String path, boolean addWatch) throws NotExistException, KeeperException, AdminOperateException;
List<String> getChildren(ZooKeeper zooKeeper, String path) throws KeeperException, InterruptedException;
Tuple<byte[], Stat> getDataAndStat(Long clusterPhyId, String path) throws NotExistException, KeeperException, AdminOperateException;
<T> T getData(Long clusterPhyId, String path, Class<T> clazz) throws NotExistException, KeeperException, AdminOperateException;
/**
* 创建config-change节点使用的是v1版本的结构
* @param clusterPhyId 集群ID
* @param entityType 实体类型
* @param entityName 实体名称
* @throws NotExistException 异常信息除了这个异常之外ZK还会有自己的异常抛出来
*/
void createConfigChangeNotificationVersionOne(Long clusterPhyId, String entityType, String entityName) throws NotExistException;
}

View File

@@ -0,0 +1,272 @@
package com.xiaojukeji.know.streaming.km.persistence.zk.impl;
import com.alibaba.fastjson.JSON;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafkacontroller.KafkaController;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
import com.xiaojukeji.know.streaming.km.common.enums.topic.TopicTypeEnum;
import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.common.zookeeper.znode.ControllerData;
import com.xiaojukeji.know.streaming.km.common.zookeeper.znode.brokers.BrokerMetadata;
import com.xiaojukeji.know.streaming.km.common.zookeeper.znode.brokers.PartitionMap;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
import com.xiaojukeji.know.streaming.km.persistence.zk.KafkaZKDAO;
import kafka.utils.Json;
import kafka.zk.*;
import kafka.zookeeper.AsyncResponse;
import kafka.zookeeper.CreateRequest;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.data.Stat;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import scala.Option;
import java.util.*;
/**
* 读取Kafka-ZK数据服务
* @author zengqiao
* @date 22/03/08
*/
@Repository
public class KafkaZKDAOImpl implements KafkaZKDAO {
private static final ILog logger = LogFactory.getLog(KafkaZKDAOImpl.class);
@Autowired
private KafkaAdminZKClient kafkaAdminZKClient;
@Override
public Broker getBrokerMetadata(String zkAddress) throws KeeperException.NoNodeException, AdminOperateException {
ZooKeeper zooKeeper = null;
try {
zooKeeper = new ZooKeeper(zkAddress, 1000, watchedEvent -> logger.info(" receive event : " + watchedEvent.getType().name()));
List<String> brokerIdList = this.getChildren(zooKeeper, BrokerIdsZNode.path());
if (brokerIdList == null || brokerIdList.isEmpty()) {
return null;
}
BrokerMetadata brokerMetadata = this.getData(zooKeeper, BrokerIdZNode.path(Integer.parseInt(brokerIdList.get(0))), false, BrokerMetadata.class);
return Broker.buildFrom(null, Integer.valueOf(brokerIdList.get(0)), brokerMetadata);
} catch (KeeperException.NoNodeException nne) {
logger.warn("method=getBrokerMetadata||zkAddress={}||errMsg=exception", zkAddress, nne);
throw nne;
} catch (Exception e) {
logger.error("method=getBrokerMetadata||zkAddress={}||errMsg=exception", zkAddress, e);
throw new AdminOperateException("read zk failed", e, ResultStatus.ZK_OPERATE_FAILED);
} finally {
try {
if (zooKeeper != null) {
zooKeeper.close();
}
} catch (Exception e) {
logger.error("method=getBrokerMetadata||zkAddress={}||msg=close failed||errMsg=exception", zkAddress, e);
}
}
}
@Override
public Broker getBrokerMetadata(Long clusterPhyId, Integer brokerId) throws NotExistException, KeeperException, AdminOperateException {
KafkaZkClient kafkaZkClient = kafkaAdminZKClient.getClient(clusterPhyId);
try {
BrokerMetadata metadata = this.getData(kafkaZkClient.currentZooKeeper(), BrokerIdZNode.path(brokerId), false, BrokerMetadata.class);
BrokerMetadata.parseAndUpdateBrokerMetadata(metadata);
return Broker.buildFrom(clusterPhyId, brokerId, metadata);
} catch (KeeperException ke) {
logger.error("method=getBrokerMetadata||clusterPhyId={}||brokerId={}||errMsg=exception", clusterPhyId, brokerId, ke);
throw ke;
} catch (Exception e) {
logger.error("method=getBrokerMetadata||clusterPhyId={}||brokerId={}||errMsg=exception", clusterPhyId, brokerId, e);
throw new AdminOperateException("read zk failed", e, ResultStatus.ZK_OPERATE_FAILED);
}
}
@Override
public Topic getTopicMetadata(Long clusterPhyId, String topicName) throws NotExistException, KeeperException, AdminOperateException {
KafkaZkClient kafkaZkClient = kafkaAdminZKClient.getClient(clusterPhyId);
try {
Stat stat = new Stat();
PartitionMap partitionMap = this.getData(kafkaZkClient.currentZooKeeper(), TopicZNode.path(topicName), false, PartitionMap.class, stat);
Topic topic = new Topic();
topic.setClusterPhyId(clusterPhyId);
topic.setTopicName(topicName);
topic.setBrokerIdSet(new HashSet<>());
topic.setCreateTime(stat.getCtime());
topic.setUpdateTime(stat.getMtime());
topic.setPartitionMap(partitionMap.getPartitions());
topic.setReplicaNum(partitionMap.getPartitions().values().iterator().next().size());
topic.setPartitionNum(partitionMap.getPartitions().size());
topic.setType(TopicTypeEnum.getTopicTypeCode(topicName));
Set<Integer> brokerIdSet = new HashSet<>();
Map<Integer, List<Integer>> topicBrokers = partitionMap.getPartitions();
for (Map.Entry<Integer, List<Integer>> entry : topicBrokers.entrySet()) {
brokerIdSet.addAll(entry.getValue());
}
topic.setBrokerIdSet(brokerIdSet);
return topic;
} catch (KeeperException ke) {
logger.error("method=getTopicMetadata||clusterPhyId={}||topicName={}||errMsg=exception", clusterPhyId, topicName, ke);
throw ke;
} catch (Exception e) {
logger.error("method=getTopicMetadata||clusterPhyId={}||topicName={}||errMsg=exception", clusterPhyId, topicName, e);
throw new AdminOperateException("read zk failed", e, ResultStatus.ZK_OPERATE_FAILED);
}
}
@Override
public List<Topic> getAllTopicMetadata(Long clusterPhyId, boolean addWatch) throws NotExistException, KeeperException, AdminOperateException {
List<Topic> topicList = new ArrayList<>();
try {
List<String> topicNameList = this.getChildren(clusterPhyId, TopicsZNode.path(), addWatch);
for (String topicName: topicNameList) {
topicList.add(this.getTopicMetadata(clusterPhyId, topicName));
}
return topicList;
} catch (KeeperException ke) {
logger.error("method=getAllTopicMetadata||clusterPhyId={}||errMsg=exception", clusterPhyId, ke);
throw ke;
} catch (Exception e) {
logger.error("method=getAllTopicMetadata||clusterPhyId={}||errMsg=exception", clusterPhyId, e);
throw new AdminOperateException("read zk failed", e, ResultStatus.ZK_OPERATE_FAILED);
}
}
@Override
public KafkaController getKafkaController(Long clusterPhyId, boolean addWatch) throws NotExistException, KeeperException, AdminOperateException {
KafkaZkClient kafkaZkClient = kafkaAdminZKClient.getClient(clusterPhyId);
try {
ControllerData controllerData = this.getData(kafkaZkClient.currentZooKeeper(), ControllerZNode.path(), addWatch, ControllerData.class);
KafkaController kafkaController = new KafkaController();
kafkaController.setClusterPhyId(clusterPhyId);
kafkaController.setBrokerId(controllerData.getBrokerid());
kafkaController.setTimestamp(controllerData.getTimestamp());
return kafkaController;
} catch (KeeperException.NoNodeException nne) {
// controller节点不存在则直接返回null
return null;
} catch (KeeperException ke) {
logger.error("method=getKafkaController||clusterPhyId={}||errMsg=exception", clusterPhyId, ke);
throw ke;
} catch (Exception e) {
logger.error("method=getKafkaController||clusterPhyId={}||errMsg=exception", clusterPhyId, e);
throw new AdminOperateException("read zk failed", e, ResultStatus.ZK_OPERATE_FAILED);
}
}
@Override
public List<String> getChildren(Long clusterPhyId, String path, boolean addWatch) throws NotExistException, KeeperException, AdminOperateException {
KafkaZkClient kafkaZkClient = kafkaAdminZKClient.getClient(clusterPhyId);
try {
List<String> children = kafkaZkClient.currentZooKeeper().getChildren(path, addWatch);
return children;
} catch (KeeperException ke) {
logger.error("method=getChildren||clusterPhyId={}||path={}||errMsg=exception", clusterPhyId, path, ke);
throw ke;
} catch (Exception e) {
logger.error("method=getChildren||clusterPhyId={}||path={}||errMsg=exception", clusterPhyId, path, e);
throw new AdminOperateException("read zk failed", e, ResultStatus.ZK_OPERATE_FAILED);
}
}
@Override
public List<String> getChildren(ZooKeeper zooKeeper, String path) throws KeeperException, InterruptedException {
return zooKeeper.getChildren(path, false);
}
@Override
public Tuple<byte[], Stat> getDataAndStat(Long clusterPhyId, String path) throws NotExistException, KeeperException, AdminOperateException {
KafkaZkClient kafkaZkClient = kafkaAdminZKClient.getClient(clusterPhyId);
try {
Stat stat = new Stat();
byte[] data = kafkaZkClient.currentZooKeeper().getData(path, false, stat);
return new Tuple<>(data, stat);
} catch (KeeperException ke) {
logger.error("method=getDataAndStat||clusterPhyId={}||path={}||errMsg=exception", clusterPhyId, path, ke);
throw ke;
} catch (Exception e) {
logger.error("method=getDataAndStat||clusterPhyId={}||path={}||errMsg=exception", clusterPhyId, path, e);
throw new AdminOperateException("read zk failed", e, ResultStatus.ZK_OPERATE_FAILED);
}
}
@Override
public <T> T getData(Long clusterPhyId, String path, Class<T> clazz) throws NotExistException, KeeperException, AdminOperateException {
KafkaZkClient kafkaZkClient = kafkaAdminZKClient.getClient(clusterPhyId);
try {
return this.getData(kafkaZkClient.currentZooKeeper(), path, false, clazz);
} catch (KeeperException ke) {
logger.error("method=getData||clusterPhyId={}||path={}||errMsg=exception", clusterPhyId, path, ke);
throw ke;
} catch (Exception e) {
logger.error("method=getData||clusterPhyId={}||path={}||errMsg=exception", clusterPhyId, path, e);
throw new AdminOperateException("read zk failed", e, ResultStatus.ZK_OPERATE_FAILED);
}
}
@Override
public void createConfigChangeNotificationVersionOne(Long clusterPhyId, String entityType, String entityName) throws NotExistException {
// 具体实现参考KafkaZkClient.createConfigChangeNotification() 的实现,仅在组装数据的时候进行调整
KafkaZkClient kafkaZkClient = kafkaAdminZKClient.getClient(clusterPhyId);
kafkaZkClient.makeSurePersistentPathExists(ConfigEntityChangeNotificationZNode.path());
String notificationPath = ConfigEntityChangeNotificationSequenceZNode.createPath();
Map<String, Object> dataMap = new HashMap<>();
dataMap.put("version", 1);
dataMap.put("entity_type", entityType);
dataMap.put("entity_name", entityName);
CreateRequest createRequest = new CreateRequest(
notificationPath,
Json.encodeAsBytes(dataMap),
kafkaZkClient.defaultAcls(notificationPath),
CreateMode.PERSISTENT_SEQUENTIAL,
Option.apply(null)
);
AsyncResponse createResponse = kafkaZkClient.retryRequestUntilConnected(createRequest, ZkVersion.MatchAnyVersion());
createResponse.maybeThrow();
}
/**************************************************** private method ****************************************************/
private <T> T getData(ZooKeeper zooKeeper, String path, boolean addWatch, Class<T> clazz, Stat stat) throws KeeperException, InterruptedException {
byte[] bytes = zooKeeper.getData(path, addWatch, stat);
return JSON.parseObject(bytes, clazz);
}
private <T> T getData(ZooKeeper zooKeeper, String path, boolean addWatch, Class<T> clazz) throws KeeperException, InterruptedException {
byte[] bytes = zooKeeper.getData(path, addWatch, null);
return JSON.parseObject(bytes, clazz);
}
}

View File

@@ -0,0 +1,26 @@
{
"_source": "timestamp",
"size": 1,
"query": {
"bool": {
"must": [
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
%s
]
}
},
"sort": [
{
"timestamp": {
"order": "desc"
}
}
]
}

View File

@@ -0,0 +1,44 @@
{
"size": 0,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"brokerId": {
"value": %d
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
]
}
},
"aggs": {
"hist": {
"date_histogram": {
"field": "timestamp",
"fixed_interval": "%s",
"time_zone": "Asia/Shanghai",
"min_doc_count": 0
},
"aggs": {
%s
}
}
}
}

View File

@@ -0,0 +1,34 @@
{
"size": 0,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"brokerId": {
"value": %d
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
]
}
},
"aggs": {
%s
}
}

View File

@@ -0,0 +1,45 @@
{
"size": 0,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
]
}
},
"aggs": {
"hist": {
"terms": {
"field": "brokerId",
"collect_mode": "breadth_first"
},
"aggs": {
"hist": {
"date_histogram": {
"field": "timestamp",
"fixed_interval": "%s",
"time_zone": "Asia/Shanghai",
"min_doc_count": 0
},
"aggs": {
%s
}
}
}
}
}
}

View File

@@ -0,0 +1,38 @@
{
"size": 1,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"brokerId": {
"value": %d
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
]
}
},
"sort": [
{
"timestamp": {
"order": "desc"
}
}
]
}

View File

@@ -0,0 +1,37 @@
{
"size": 0,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
]
}
},
"aggs": {
"hist": {
"date_histogram": {
"field": "timestamp",
"fixed_interval": "%s",
"time_zone": "Asia/Shanghai",
"min_doc_count": 0
},
"aggs": {
%s
}
}
}
}

View File

@@ -0,0 +1,27 @@
{
"size": 0,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
]
}
},
"aggs": {
%s
}
}

View File

@@ -0,0 +1,31 @@
{
"size": 1,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
]
}
},
"sort": [
{
"timestamp": {
"order": "desc"
}
}
]
}

View File

@@ -0,0 +1,17 @@
{
"query": {
"bool": {
"must": [
{
"term": {
"timestamp": {
"value": %d
}
}
}
%s
]
}
},
"sort":[%s]
}

View File

@@ -0,0 +1,38 @@
{
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"group": {
"value": "%s"
}
}
},
{
"term": {
"groupMetric": {
"value": "1"
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
},
%s
]
}
}
}

View File

@@ -0,0 +1,40 @@
{
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"group": {
"value": "%s"
}
}
},
{
"term": {
"groupMetric": {
"value": "1"
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
],
"must_not": [
%s
]
}
}
}

View File

@@ -0,0 +1,45 @@
{
"size": 1000,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"group": {
"value": "%s"
}
}
},
{
"term": {
"groupMetric": {
"value": "0"
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
]
}
},
"sort": [
{
"timestamp": {
"order": "desc"
}
}
]
}

View File

@@ -0,0 +1,71 @@
{
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"group": {
"value": "%s"
}
}
},
{
"term": {
"topic": {
"value": "%s"
}
}
},
{
"term": {
"partitionId": {
"value": %d
}
}
},
{
"term": {
"groupMetric": {
"value": "0"
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
]
}
},
"sort": [
{
"timestamp": {
"order": "desc"
}
}
],
"aggs": {
"hist": {
"date_histogram": {
"field": "timestamp",
"fixed_interval": "%s",
"time_zone": "Asia/Shanghai",
"min_doc_count": 0
},
"aggs": {
%s
}
}
}
}

View File

@@ -0,0 +1,55 @@
{
"size": 1,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"group": {
"value": "%s"
}
}
},
{
"term": {
"topic": {
"value": "%s"
}
}
},
{
"term": {
"groupMetric": {
"value": "0"
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
]
}
},
"aggs": {
%s
},
"sort": [
{
"timestamp": {
"order": "desc"
}
}
]
}

View File

@@ -0,0 +1,51 @@
{
"size": 1000,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"group": {
"value": "%s"
}
}
},
{
"term": {
"topic": {
"value": "%s"
}
}
},
{
"term": {
"groupMetric": {
"value": "0"
}
}
},
{
"term": {
"timestamp": {
"value": %d
}
}
}
]
}
},
"sort": [
{
"timestamp": {
"order": "desc"
}
}
]
}

View File

@@ -0,0 +1,52 @@
{
"size": 1,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"topic": {
"value": "%s"
}
}
},
{
"term": {
"brokerId": {
"value": %d
}
}
},
{
"term": {
"partitionId": {
"value": %d
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
]
}
},
"sort": [
{
"timestamp": {
"order": "desc"
}
}
]
}

View File

@@ -0,0 +1,37 @@
{
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"topic": {
"value": "%s"
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
]
}
},
"sort": [
{
"timestamp": {
"order": "desc"
}
}
]
}

View File

@@ -0,0 +1,34 @@
{
"size": 0,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"brokerId": {
"value": %d
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
]
}
},
"aggs": {
%s
}
}

View File

@@ -0,0 +1,52 @@
{
"size": 1,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"brokerId": {
"value": %d
}
}
},
{
"term": {
"topic": {
"value": "%s"
}
}
},
{
"term": {
"partitionId": {
"value": %d
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
]
}
},
"sort": [
{
"timestamp": {
"order": "desc"
}
}
]
}

View File

@@ -0,0 +1,38 @@
{
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"topic": {
"value": "%s"
}
}
},
{
"term": {
"brokerAgg": {
"value": "1"
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
},
%s
]
}
}
}

View File

@@ -0,0 +1,40 @@
{
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"topic": {
"value": "%s"
}
}
},
{
"term": {
"brokerAgg": {
"value": "1"
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
],
"must_not": [
%s
]
}
}
}

View File

@@ -0,0 +1,44 @@
{
"size": 0,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"topic": {
"value": "%s"
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
]
}
},
"aggs": {
"hist": {
"date_histogram": {
"field": "timestamp",
"fixed_interval": "%s",
"time_zone": "Asia/Shanghai",
"min_doc_count": 0
},
"aggs": {
%s
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"size": 0,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
%s
]
}
},
"aggs": {
"hist": {
"terms": {
"field": "topic",
"collect_mode": "breadth_first"
},
"aggs": {
%s
}
}
}
}

View File

@@ -0,0 +1,45 @@
{
"size": 0,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
]
}
},
"aggs": {
"hist": {
"terms": {
"field": "topic",
"collect_mode": "breadth_first"
},
"aggs": {
"hist": {
"date_histogram": {
"field": "timestamp",
"fixed_interval": "%s",
"time_zone": "Asia/Shanghai",
"min_doc_count": 0
},
"aggs": {
%s
}
}
}
}
}
}

View File

@@ -0,0 +1,32 @@
{
"size": 1,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
},
{
"term": {
"topic": {
"value": "%s"
}
}
}
]
}
},
"sort":[%s]
}

View File

@@ -0,0 +1,39 @@
{
"size":1000,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"brokerAgg": {
"value": "1"
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
%s
]
}
},
"sort": [
{
"timestamp": {
"order": "desc"
}
}
]
}

View File

@@ -0,0 +1,45 @@
{
"size": 1,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"topic": {
"value": "%s"
}
}
},
{
"term": {
"brokerId": {
"value": %d
}
}
},
{
"range": {
"timestamp": {
"gte": %d,
"lte": %d
}
}
}
]
}
},
"sort": [
{
"timestamp": {
"order": "desc"
}
}
]
}

View File

@@ -0,0 +1,32 @@
{
"size": 5000,
"query": {
"bool": {
"must": [
{
"term": {
"clusterPhyId": {
"value": %d
}
}
},
{
"term": {
"timestamp": {
"value": %d
}
}
},
{
"term": {
"brokerAgg": {
"value": "1"
}
}
}
%s
]
}
},
"sort":[%s]
}

View File

@@ -0,0 +1,23 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.xiaojukeji.know.streaming.km.persistence.mysql.broker.BrokerConfigDAO">
<resultMap id="BrokerConfigMap" type="com.xiaojukeji.know.streaming.km.common.bean.po.broker.BrokerConfigPO">
<id column="id" property="id" />
<result column="create_time" property="createTime" />
<result column="update_time" property="updateTime" />
<result column="cluster_phy_id" property="clusterPhyId" />
<result column="broker_id" property="brokerId" />
<result column="config_name" property="configName" />
<result column="config_value" property="configValue" />
<result column="diff_type" property="diffType" />
</resultMap>
<insert id="replace" parameterType="com.xiaojukeji.know.streaming.km.common.bean.po.broker.BrokerConfigPO">
REPLACE ks_km_broker_config
(cluster_phy_id, broker_id, config_name, config_value, diff_type, update_time)
VALUES
(#{clusterPhyId}, #{brokerId}, #{configName}, #{configValue}, #{diffType}, #{updateTime})
</insert>
</mapper>

View File

@@ -0,0 +1,25 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.xiaojukeji.know.streaming.km.persistence.mysql.broker.BrokerDAO">
<resultMap id="BrokerMap" type="com.xiaojukeji.know.streaming.km.common.bean.po.broker.BrokerPO">
<id column="id" property="id" />
<result column="create_time" property="createTime" />
<result column="update_time" property="updateTime" />
<result column="cluster_phy_id" property="clusterPhyId" />
<result column="broker_id" property="brokerId" />
<result column="host" property="host" />
<result column="port" property="port" />
<result column="jmx_port" property="jmxPort" />
<result column="start_timestamp" property="startTimestamp" />
<result column="status" property="status" />
</resultMap>
<insert id="replace" parameterType="com.xiaojukeji.know.streaming.km.common.bean.po.broker.BrokerPO">
REPLACE ks_km_broker
(cluster_phy_id, broker_id, host, port, jmx_port, start_timestamp, status, update_time)
VALUES
(#{clusterPhyId}, #{brokerId}, #{host}, #{port}, #{jmxPort}, #{startTimestamp}, #{status}, #{updateTime})
</insert>
</mapper>

View File

@@ -0,0 +1,30 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.xiaojukeji.know.streaming.km.persistence.mysql.cluster.ClusterPhyDAO">
<resultMap id="ClusterPhyMap" type="com.xiaojukeji.know.streaming.km.common.bean.po.cluster.ClusterPhyPO">
<id column="id" property="id" />
<result column="create_time" property="createTime" />
<result column="update_time" property="updateTime" />
<result column="name" property="name" />
<result column="zookeeper" property="zookeeper" />
<result column="bootstrap_servers" property="bootstrapServers" />
<result column="kafka_version" property="kafkaVersion" />
<result column="client_properties" property="clientProperties" />
<result column="jmx_properties" property="jmxProperties" />
<result column="auth_type" property="authType" />
<result column="run_state" property="runState" />
<result column="description" property="description" />
</resultMap>
<insert id="addAndSetId"
parameterType="com.xiaojukeji.know.streaming.km.common.bean.po.cluster.ClusterPhyPO"
useGeneratedKeys="true"
keyProperty="id">
INSERT INTO ks_km_physical_cluster
(name, zookeeper, bootstrap_servers, kafka_version, client_properties, jmx_properties, description, auth_type, run_state)
VALUES
(#{name}, #{zookeeper}, #{bootstrapServers}, #{kafkaVersion}, #{clientProperties}, #{jmxProperties}, #{description}, #{authType}, #{runState})
</insert>
</mapper>

View File

@@ -0,0 +1,23 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.xiaojukeji.know.streaming.km.persistence.mysql.group.GroupMemberDAO">
<resultMap id="GroupMemberMap" type="com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupMemberPO">
<id column="id" property="id" />
<result column="create_time" property="createTime" />
<result column="update_time" property="updateTime" />
<result column="cluster_phy_id" property="clusterPhyId" />
<result column="topic_name" property="topicName" />
<result column="group_name" property="groupName" />
<result column="state" property="state" />
<result column="member_count" property="memberCount" />
</resultMap>
<insert id="replace" parameterType="com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupMemberPO">
REPLACE ks_km_group_member
(cluster_phy_id, topic_name, group_name, `state`, member_count, update_time)
VALUES
(#{clusterPhyId}, #{topicName}, #{groupName}, #{state}, #{memberCount}, #{updateTime})
</insert>
</mapper>

View File

@@ -0,0 +1,23 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.xiaojukeji.know.streaming.km.persistence.mysql.health.HealthCheckResultDAO">
<resultMap id="ClusterPhyMap" type="com.xiaojukeji.know.streaming.km.common.bean.po.health.HealthCheckResultPO">
<id column="id" property="id" />
<result column="create_time" property="createTime" />
<result column="update_time" property="updateTime" />
<result column="dimension" property="dimension" />
<result column="config_name" property="configName" />
<result column="cluster_phy_id" property="clusterPhyId" />
<result column="res_name" property="resName" />
<result column="passed" property="passed" />
</resultMap>
<insert id="replace" parameterType="com.xiaojukeji.know.streaming.km.common.bean.po.health.HealthCheckResultPO">
REPLACE ks_km_health_check_result
(dimension, config_name, cluster_phy_id, res_name, passed)
VALUES
(#{dimension}, #{configName}, #{clusterPhyId}, #{resName}, #{passed})
</insert>
</mapper>

View File

@@ -0,0 +1,20 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.xiaojukeji.know.streaming.km.persistence.mysql.KafkaAclDAO">
<resultMap id="ClusterPhyMap" type="com.xiaojukeji.know.streaming.km.common.bean.po.KafkaAclPO">
<id column="id" property="id" />
<result column="create_time" property="createTime" />
<result column="update_time" property="updateTime" />
<result column="cluster_phy_id" property="clusterPhyId" />
<result column="principal" property="principal" />
<result column="operation" property="operation" />
<result column="permission_type" property="permissionType" />
<result column="host" property="host" />
<result column="resource_type" property="resourceType" />
<result column="resource_name" property="resourceName" />
<result column="pattern_type" property="patternType" />
<result column="unique_field" property="uniqueField" />
</resultMap>
</mapper>

View File

@@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.xiaojukeji.know.streaming.km.persistence.mysql.kafkacontroller.KafkaControllerDAO">
<resultMap id="KafkaControllerMap" type="com.xiaojukeji.know.streaming.km.common.bean.po.kafkacontrollr.KafkaControllerPO">
<id column="id" property="id" />
<result column="create_time" property="createTime" />
<result column="update_time" property="updateTime" />
<result column="cluster_phy_id" property="clusterPhyId" />
<result column="broker_id" property="brokerId" />
<result column="broker_host" property="brokerHost" />
<result column="broker_rack" property="brokerRack" />
<result column="timestamp" property="timestamp" />
</resultMap>
<select id="listAllLatest" resultMap="KafkaControllerMap">
SELECT Table_B.* FROM
(SELECT max(`id`) as id FROM `ks_km_kafka_controller` GROUP BY `cluster_phy_id`) AS Table_A
JOIN `ks_km_kafka_controller` AS Table_B ON
Table_A.id=Table_B.id;
</select>
</mapper>

View File

@@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.xiaojukeji.know.streaming.km.persistence.mysql.KafkaUserDAO">
<resultMap id="ClusterPhyMap" type="com.xiaojukeji.know.streaming.km.common.bean.po.KafkaUserPO">
<id column="id" property="id" />
<result column="create_time" property="createTime" />
<result column="update_time" property="updateTime" />
<result column="cluster_phy_id" property="clusterPhyId" />
<result column="name" property="name" />
<result column="token" property="token" />
</resultMap>
<insert id="replace" parameterType="com.xiaojukeji.know.streaming.km.common.bean.po.KafkaUserPO">
REPLACE ks_km_kafka_acl
(cluster_phy_id, `name`, token, update_time)
VALUES
(#{clusterPhyId}, #{name}, #{token}, #{updateTime})
</insert>
</mapper>

View File

@@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.xiaojukeji.know.streaming.km.persistence.mysql.partition.PartitionDAO">
<resultMap id="PartitionMap" type="com.xiaojukeji.know.streaming.km.common.bean.po.partition.PartitionPO">
<id column="id" property="id" />
<result column="create_time" property="createTime" />
<result column="update_time" property="updateTime" />
<result column="cluster_phy_id" property="clusterPhyId" />
<result column="topic_name" property="topicName" />
<result column="partition_id" property="partitionId" />
<result column="leader_broker_id" property="leaderBrokerId" />
<result column="in_sync_replicas" property="inSyncReplicas" />
<result column="assign_replicas" property="assignReplicas" />
</resultMap>
<insert id="replace" parameterType="com.xiaojukeji.know.streaming.km.common.bean.po.partition.PartitionPO">
REPLACE ks_km_partition
(cluster_phy_id, topic_name, partition_id, leader_broker_id, in_sync_replicas, assign_replicas)
VALUES
(#{clusterPhyId}, #{topicName}, #{partitionId}, #{leaderBrokerId}, #{inSyncReplicas}, #{assignReplicas})
</insert>
</mapper>

View File

@@ -0,0 +1,38 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.xiaojukeji.know.streaming.km.persistence.mysql.config.PlatformClusterConfigDAO">
<resultMap id="PlatformClusterConfigMap" type="com.xiaojukeji.know.streaming.km.common.bean.po.config.PlatformClusterConfigPO">
<id column="id" property="id" />
<result column="create_time" property="createTime" />
<result column="update_time" property="updateTime" />
<result column="cluster_id" property="clusterId" />
<result column="value_group" property="valueGroup" />
<result column="value_name" property="valueName" />
<result column="value" property="value" />
<result column="description" property="description" />
<result column="operator" property="operator" />
</resultMap>
<insert id="batchReplace" parameterType="java.util.List">
REPLACE INTO ks_km_platform_cluster_config (
cluster_id,
`value_group`,
`value_name`,
`value`,
description,
operator
) VALUES
<foreach collection="list" item="item" index="index" separator=",">
(
#{item.clusterId},
#{item.valueGroup},
#{item.valueName},
#{item.value},
#{item.description},
#{item.operator}
)
</foreach>
</insert>
</mapper>

View File

@@ -0,0 +1,26 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.xiaojukeji.know.streaming.km.persistence.mysql.reassign.ReassignJobDAO">
<resultMap id="ReassignJobMap" type="com.xiaojukeji.know.streaming.km.common.bean.po.reassign.ReassignJobPO">
<id column="id" property="id" />
<result column="create_time" property="createTime" />
<result column="update_time" property="updateTime" />
<result column="cluster_phy_id" property="clusterPhyId" />
<result column="reassignment_json" property="reassignmentJson" />
<result column="description" property="description" />
<result column="throttle_unit_byte" property="throttleUnitByte" />
<result column="start_time" property="startTime" />
<result column="finished_time" property="finishedTime" />
<result column="creator" property="creator" />
<result column="status" property="status" />
</resultMap>
<insert id="addAndSetId" parameterType="com.xiaojukeji.know.streaming.km.common.bean.po.reassign.ReassignJobPO">
INSERT INTO ks_km_reassign_job
(id, cluster_phy_id, reassignment_json, description, throttle_unit_byte, start_time, finished_time, creator, status)
VALUES
(#{id}, #{clusterPhyId}, #{reassignmentJson}, #{description}, #{throttleUnitByte}, #{startTime}, #{finishedTime}, #{creator}, #{status})
</insert>
</mapper>

View File

@@ -0,0 +1,32 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.xiaojukeji.know.streaming.km.persistence.mysql.topic.TopicDAO">
<resultMap id="TopicMap" type="com.xiaojukeji.know.streaming.km.common.bean.po.topic.TopicPO">
<id column="id" property="id" />
<result column="create_time" property="createTime" />
<result column="update_time" property="updateTime" />
<result column="cluster_phy_id" property="clusterPhyId" />
<result column="topic_name" property="topicName" />
<result column="replica_num" property="replicaNum" />
<result column="partition_num" property="partitionNum" />
<result column="broker_ids" property="brokerIds" />
<result column="partition_map" property="partitionMap" />
<result column="retention_ms" property="retentionMs" />
<result column="type" property="type" />
<result column="description" property="description" />
</resultMap>
<insert id="replaceAll" parameterType="com.xiaojukeji.know.streaming.km.common.bean.po.topic.TopicPO">
REPLACE ks_km_topic
(cluster_phy_id, topic_name, replica_num, partition_num, broker_ids, partition_map, retention_ms, `type`, description)
VALUES
(#{clusterPhyId}, #{topicName}, #{replicaNum}, #{partitionNum}, #{brokerIds}, #{partitionMap}, #{retentionMs}, #{type}, #{description})
</insert>
<update id="updateConfig" parameterType="com.xiaojukeji.know.streaming.km.common.bean.po.topic.TopicPO">
UPDATE ks_km_topic SET retention_ms = #{retentionMs} WHERE cluster_phy_id = #{clusterPhyId} AND topic_name = #{topicName}
</update>
</mapper>