mirror of
https://github.com/didi/KnowStreaming.git
synced 2026-01-02 18:32:08 +08:00
合并3.3.0分支
This commit is contained in:
@@ -0,0 +1,127 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.cache;
|
||||
|
||||
import com.github.benmanes.caffeine.cache.Cache;
|
||||
import com.github.benmanes.caffeine.cache.Caffeine;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ClusterMetrics;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.TopicMetrics;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.partition.Partition;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.health.HealthCheckResultPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
@Component
|
||||
public class DataBaseDataLocalCache {
|
||||
@Value(value = "${cache.metric.topic-size:2000}")
|
||||
private Long topicLatestMetricsCacheSize;
|
||||
|
||||
@Value(value = "${cache.metric.cluster-size:2000}")
|
||||
private Long clusterLatestMetricsCacheSize;
|
||||
|
||||
@Value(value = "${cache.metadata.partition-size:2000}")
|
||||
private Long partitionsCacheSize;
|
||||
|
||||
@Value(value = "${cache.metadata.health-check-result-size:10000}")
|
||||
private Long healthCheckResultCacheSize;
|
||||
|
||||
@Value(value = "${cache.metadata.ha-topic-size:10000}")
|
||||
private Long haTopicCacheSize;
|
||||
|
||||
private static Cache<Long, Map<String, TopicMetrics>> topicLatestMetricsCache;
|
||||
|
||||
private static Cache<Long, ClusterMetrics> clusterLatestMetricsCache;
|
||||
|
||||
private static Cache<Long, Map<String, List<Partition>>> partitionsCache;
|
||||
|
||||
private static Cache<Long, Map<String, List<HealthCheckResultPO>>> healthCheckResultCache;
|
||||
|
||||
private static Cache<String, Boolean> haTopicCache;
|
||||
|
||||
@PostConstruct
|
||||
private void init() {
|
||||
topicLatestMetricsCache = Caffeine.newBuilder()
|
||||
.expireAfterWrite(360, TimeUnit.SECONDS)
|
||||
.maximumSize(topicLatestMetricsCacheSize)
|
||||
.build();
|
||||
|
||||
clusterLatestMetricsCache = Caffeine.newBuilder()
|
||||
.expireAfterWrite(180, TimeUnit.SECONDS)
|
||||
.maximumSize(clusterLatestMetricsCacheSize)
|
||||
.build();
|
||||
|
||||
partitionsCache = Caffeine.newBuilder()
|
||||
.expireAfterWrite(60, TimeUnit.SECONDS)
|
||||
.maximumSize(partitionsCacheSize)
|
||||
.build();
|
||||
|
||||
healthCheckResultCache = Caffeine.newBuilder()
|
||||
.expireAfterWrite(90, TimeUnit.SECONDS)
|
||||
.maximumSize(healthCheckResultCacheSize)
|
||||
.build();
|
||||
|
||||
haTopicCache = Caffeine.newBuilder()
|
||||
.expireAfterWrite(90, TimeUnit.SECONDS)
|
||||
.maximumSize(haTopicCacheSize)
|
||||
.build();
|
||||
}
|
||||
|
||||
public static Map<String, TopicMetrics> getTopicMetrics(Long clusterPhyId) {
|
||||
return topicLatestMetricsCache.getIfPresent(clusterPhyId);
|
||||
}
|
||||
|
||||
public static void putTopicMetrics(Long clusterPhyId, Map<String, TopicMetrics> metricsMap) {
|
||||
topicLatestMetricsCache.put(clusterPhyId, metricsMap);
|
||||
}
|
||||
|
||||
public static ClusterMetrics getClusterLatestMetrics(Long clusterPhyId) {
|
||||
return clusterLatestMetricsCache.getIfPresent(clusterPhyId);
|
||||
}
|
||||
|
||||
public static void putClusterLatestMetrics(Long clusterPhyId, ClusterMetrics metrics) {
|
||||
clusterLatestMetricsCache.put(clusterPhyId, metrics);
|
||||
}
|
||||
|
||||
public static Map<String, List<Partition>> getPartitions(Long clusterPhyId) {
|
||||
return partitionsCache.getIfPresent(clusterPhyId);
|
||||
}
|
||||
|
||||
public static void putPartitions(Long clusterPhyId, Map<String, List<Partition>> partitionMap) {
|
||||
partitionsCache.put(clusterPhyId, partitionMap);
|
||||
}
|
||||
|
||||
public static Map<String, List<HealthCheckResultPO>> getHealthCheckResults(Long clusterId, HealthCheckDimensionEnum dimensionEnum) {
|
||||
return healthCheckResultCache.getIfPresent(getHealthCheckCacheKey(clusterId, dimensionEnum.getDimension()));
|
||||
}
|
||||
|
||||
public static void putHealthCheckResults(Long cacheKey, Map<String, List<HealthCheckResultPO>> poMap) {
|
||||
healthCheckResultCache.put(cacheKey, poMap);
|
||||
}
|
||||
|
||||
public static void putHealthCheckResults(Long clusterId, HealthCheckDimensionEnum dimensionEnum, Map<String, List<HealthCheckResultPO>> poMap) {
|
||||
healthCheckResultCache.put(getHealthCheckCacheKey(clusterId, dimensionEnum.getDimension()), poMap);
|
||||
}
|
||||
|
||||
public static Long getHealthCheckCacheKey(Long clusterId, Integer dimensionCode) {
|
||||
return clusterId * HealthCheckDimensionEnum.MAX_VAL.getDimension() + dimensionCode;
|
||||
}
|
||||
|
||||
public static void putHaTopic(Long clusterPhyId, String topicName) {
|
||||
String key = clusterPhyId + "@" + topicName;
|
||||
haTopicCache.put(key, true);
|
||||
}
|
||||
|
||||
public static boolean isHaTopic(Long clusterPhyId, String topicName) {
|
||||
String key = clusterPhyId + "@" + topicName;
|
||||
return haTopicCache.getIfPresent(key) != null;
|
||||
}
|
||||
|
||||
/**************************************************** private method ****************************************************/
|
||||
|
||||
private DataBaseDataLocalCache() {
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.connect;
|
||||
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.event.cluster.connect.ConnectClusterLoadChangedEvent;
|
||||
import org.springframework.context.ApplicationListener;
|
||||
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
/**
|
||||
* @author wyb
|
||||
* @date 2022/11/7
|
||||
*/
|
||||
public abstract class AbstractConnectClusterChangeHandler implements ApplicationListener<ConnectClusterLoadChangedEvent> {
|
||||
|
||||
private static final ILog log = LogFactory.getLog(AbstractConnectClusterChangeHandler.class);
|
||||
|
||||
protected final ReentrantLock modifyClientMapLock = new ReentrantLock();
|
||||
|
||||
protected abstract void add(ConnectCluster connectCluster);
|
||||
|
||||
protected abstract void modify(ConnectCluster newConnectCluster, ConnectCluster oldConnectCluster);
|
||||
|
||||
protected abstract void remove(ConnectCluster connectCluster);
|
||||
|
||||
|
||||
@Override
|
||||
public void onApplicationEvent(ConnectClusterLoadChangedEvent event) {
|
||||
switch (event.getOperationEnum()) {
|
||||
case ADD:
|
||||
this.add(event.getInDBConnectCluster());
|
||||
break;
|
||||
case EDIT:
|
||||
this.modify(event.getInDBConnectCluster(), event.getInCacheConnectCluster());
|
||||
break;
|
||||
case DELETE:
|
||||
this.remove(event.getInCacheConnectCluster());
|
||||
break;
|
||||
default:
|
||||
log.error("method=onApplicationEvent||event={}||msg=illegal event", event);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,146 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.connect;
|
||||
|
||||
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.JmxConfig;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectWorker;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectWorkerPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.jmx.JmxConnectorWrap;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.connect.cache.LoadedConnectClusterCache;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.ConnectWorkerDAO;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
/**
|
||||
* @author wyb
|
||||
* @date 2022/10/31
|
||||
*/
|
||||
@Component
|
||||
public class ConnectJMXClient extends AbstractConnectClusterChangeHandler {
|
||||
private static final ILog log = LogFactory.getLog(ConnectJMXClient.class);
|
||||
|
||||
private static final Map<Long, Map<String, JmxConnectorWrap>> JMX_MAP = new ConcurrentHashMap<>();
|
||||
|
||||
@Autowired
|
||||
private ConnectWorkerDAO connectWorkerDAO;
|
||||
|
||||
|
||||
public JmxConnectorWrap getClientWithCheck(Long connectClusterId, String workerId) {
|
||||
JmxConnectorWrap jmxConnectorWrap = this.getClient(connectClusterId, workerId);
|
||||
|
||||
if (ValidateUtils.isNull(jmxConnectorWrap) || !jmxConnectorWrap.checkJmxConnectionAndInitIfNeed()) {
|
||||
log.error("method=getClientWithCheck||connectClusterId={}||workerId={}||msg=get jmx connector failed!", connectClusterId, workerId);
|
||||
return null;
|
||||
}
|
||||
|
||||
return jmxConnectorWrap;
|
||||
}
|
||||
|
||||
public JmxConnectorWrap getClient(Long connectorClusterId, String workerId) {
|
||||
Map<String, JmxConnectorWrap> jmxMap = JMX_MAP.getOrDefault(connectorClusterId, new ConcurrentHashMap<>());
|
||||
|
||||
JmxConnectorWrap jmxConnectorWrap = jmxMap.get(workerId);
|
||||
if (jmxConnectorWrap != null) {
|
||||
// 已新建成功,则直接返回
|
||||
return jmxConnectorWrap;
|
||||
}
|
||||
|
||||
// 未创建,则进行创建
|
||||
return this.createJmxConnectorWrap(connectorClusterId, workerId);
|
||||
}
|
||||
|
||||
private JmxConnectorWrap createJmxConnectorWrap(Long connectorClusterId, String workerId) {
|
||||
ConnectCluster connectCluster = LoadedConnectClusterCache.getByPhyId(connectorClusterId);
|
||||
if (connectCluster == null) {
|
||||
return null;
|
||||
}
|
||||
return this.createJmxConnectorWrap(connectCluster, workerId);
|
||||
}
|
||||
|
||||
private JmxConnectorWrap createJmxConnectorWrap(ConnectCluster connectCluster, String workerId) {
|
||||
ConnectWorker connectWorker = this.getConnectWorkerFromDB(connectCluster.getId(), workerId);
|
||||
if (connectWorker == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
modifyClientMapLock.lock();
|
||||
|
||||
JmxConnectorWrap jmxConnectorWrap = JMX_MAP.getOrDefault(connectCluster.getId(), new ConcurrentHashMap<>()).get(workerId);
|
||||
if (jmxConnectorWrap != null) {
|
||||
return jmxConnectorWrap;
|
||||
}
|
||||
|
||||
log.debug("method=createJmxConnectorWrap||connectClusterId={}||workerId={}||msg=create JmxConnectorWrap starting", connectCluster.getId(), workerId);
|
||||
|
||||
JmxConfig jmxConfig = ConvertUtil.str2ObjByJson(connectCluster.getJmxProperties(), JmxConfig.class);
|
||||
if (jmxConfig == null) {
|
||||
jmxConfig = new JmxConfig();
|
||||
}
|
||||
|
||||
|
||||
jmxConnectorWrap = new JmxConnectorWrap(
|
||||
"connectClusterId: " + connectCluster.getId() + " workerId: " + workerId,
|
||||
null,
|
||||
connectWorker.getHost(),
|
||||
connectWorker.getJmxPort() != null ? connectWorker.getJmxPort() : jmxConfig.getJmxPort(),
|
||||
jmxConfig
|
||||
);
|
||||
|
||||
Map<String, JmxConnectorWrap> workerMap = JMX_MAP.getOrDefault(connectCluster.getId(), new ConcurrentHashMap<>());
|
||||
workerMap.put(workerId, jmxConnectorWrap);
|
||||
JMX_MAP.put(connectCluster.getId(), workerMap);
|
||||
return jmxConnectorWrap;
|
||||
} catch (Exception e) {
|
||||
log.debug("method=createJmxConnectorWrap||connectClusterId={}||workerId={}||msg=create JmxConnectorWrap failed||errMsg=exception||", connectCluster.getId(), workerId, e);
|
||||
} finally {
|
||||
modifyClientMapLock.unlock();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
private ConnectWorker getConnectWorkerFromDB(Long connectorClusterId, String workerId) {
|
||||
LambdaQueryWrapper<ConnectWorkerPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
|
||||
lambdaQueryWrapper.eq(ConnectWorkerPO::getConnectClusterId, connectorClusterId);
|
||||
lambdaQueryWrapper.eq(ConnectWorkerPO::getWorkerId, workerId);
|
||||
ConnectWorkerPO connectWorkerPO = connectWorkerDAO.selectOne(lambdaQueryWrapper);
|
||||
if (connectWorkerPO == null) {
|
||||
return null;
|
||||
}
|
||||
return ConvertUtil.obj2Obj(connectWorkerPO, ConnectWorker.class);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected void add(ConnectCluster connectCluster) {
|
||||
JMX_MAP.putIfAbsent(connectCluster.getId(), new ConcurrentHashMap<>());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void modify(ConnectCluster newConnectCluster, ConnectCluster oldConnectCluster) {
|
||||
if (newConnectCluster.getJmxProperties().equals(oldConnectCluster.getJmxProperties())) {
|
||||
return;
|
||||
}
|
||||
this.remove(newConnectCluster);
|
||||
this.add(newConnectCluster);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void remove(ConnectCluster connectCluster) {
|
||||
Map<String, JmxConnectorWrap> jmxMap = JMX_MAP.remove(connectCluster.getId());
|
||||
if (jmxMap == null) {
|
||||
return;
|
||||
}
|
||||
for (JmxConnectorWrap jmxConnectorWrap : jmxMap.values()) {
|
||||
jmxConnectorWrap.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.connect.cache;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
/**
|
||||
* @author wyb
|
||||
* @date 2022/11/7
|
||||
*/
|
||||
public class LoadedConnectClusterCache {
|
||||
private static final Map<Long, ConnectCluster> CONNECT_CLUSTER_MAP = new ConcurrentHashMap<>();
|
||||
|
||||
public static boolean containsByPhyId(Long connectClusterId) {
|
||||
return CONNECT_CLUSTER_MAP.containsKey(connectClusterId);
|
||||
}
|
||||
|
||||
public static ConnectCluster getByPhyId(Long connectClusterId) {
|
||||
return CONNECT_CLUSTER_MAP.get(connectClusterId);
|
||||
}
|
||||
|
||||
public static ConnectCluster remove(Long connectClusterId) {
|
||||
return CONNECT_CLUSTER_MAP.remove(connectClusterId);
|
||||
}
|
||||
|
||||
public static void replace(ConnectCluster connectCluster) {
|
||||
CONNECT_CLUSTER_MAP.put(connectCluster.getId(), connectCluster);
|
||||
}
|
||||
|
||||
public static Map<Long, ConnectCluster> listAll() {
|
||||
return CONNECT_CLUSTER_MAP;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
@@ -0,0 +1,104 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.connect.schedule;
|
||||
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.event.cluster.connect.ConnectClusterLoadChangedEvent;
|
||||
import com.xiaojukeji.know.streaming.km.common.component.SpringTool;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.connect.cache.LoadedConnectClusterCache;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.ConnectClusterDAO;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.scheduling.annotation.Scheduled;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* @author wyb
|
||||
* @date 2022/11/7
|
||||
*/
|
||||
@Component
|
||||
public class ScheduleFlushConnectClusterTask {
|
||||
private static final ILog log = LogFactory.getLog(ScheduleFlushConnectClusterTask.class);
|
||||
|
||||
@Autowired
|
||||
private ConnectClusterDAO connectClusterDAO;
|
||||
|
||||
private final BlockingQueue<ConnectClusterLoadChangedEvent> eventQueue = new LinkedBlockingQueue<>(2000);
|
||||
|
||||
private final Thread handleEventThread = new Thread(() -> handleEvent(), "ScheduleFlushConnectClusterTask");
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
// 启动线程
|
||||
handleEventThread.start();
|
||||
|
||||
// 立即加载集群
|
||||
flush();
|
||||
}
|
||||
|
||||
@Scheduled(cron="0/10 * * * * ?")
|
||||
public void flush() {
|
||||
List<ConnectCluster> inDBConnectClusterList = ConvertUtil.list2List(connectClusterDAO.selectList(null), ConnectCluster.class);
|
||||
Map<Long, ConnectCluster> inDBConnectClusterMap = inDBConnectClusterList.stream().collect(Collectors.toMap(ConnectCluster::getId, Function.identity()));
|
||||
|
||||
//排查新增
|
||||
for (ConnectCluster inDBConnectCluster : inDBConnectClusterList) {
|
||||
ConnectCluster inCacheConnectCluster = LoadedConnectClusterCache.getByPhyId(inDBConnectCluster.getId());
|
||||
//存在,查看是否需要替换
|
||||
if (inCacheConnectCluster != null) {
|
||||
if (inCacheConnectCluster.equals(inDBConnectCluster)) {
|
||||
continue;
|
||||
}
|
||||
LoadedConnectClusterCache.replace(inCacheConnectCluster);
|
||||
this.put2Queue(new ConnectClusterLoadChangedEvent(this, inDBConnectCluster, inCacheConnectCluster, OperationEnum.EDIT));
|
||||
|
||||
} else {
|
||||
LoadedConnectClusterCache.replace(inDBConnectCluster);
|
||||
this.put2Queue(new ConnectClusterLoadChangedEvent(this, inDBConnectCluster, null, OperationEnum.ADD));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//排查删除
|
||||
for (ConnectCluster inCacheConnectCluster : LoadedConnectClusterCache.listAll().values()) {
|
||||
if (inDBConnectClusterMap.containsKey(inCacheConnectCluster.getId())) {
|
||||
continue;
|
||||
}
|
||||
LoadedConnectClusterCache.remove(inCacheConnectCluster.getId());
|
||||
this.put2Queue(new ConnectClusterLoadChangedEvent(this, null, inCacheConnectCluster, OperationEnum.DELETE));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
private void put2Queue(ConnectClusterLoadChangedEvent event) {
|
||||
try {
|
||||
eventQueue.put(event);
|
||||
} catch (Exception e) {
|
||||
log.error("method=put2Queue||event={}||errMsg=exception", event, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
private void handleEvent() {
|
||||
while (true) {
|
||||
try {
|
||||
ConnectClusterLoadChangedEvent event = eventQueue.take();
|
||||
SpringTool.publish(event);
|
||||
} catch (Exception e) {
|
||||
log.error("method=handleEvent||errMsg=exception", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.es;
|
||||
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.LoggerUtil;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslLoaderUtil;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
|
||||
@@ -9,7 +9,7 @@ import org.springframework.beans.factory.annotation.Autowired;
|
||||
* 直接操作es集群的dao
|
||||
*/
|
||||
public abstract class BaseESDAO {
|
||||
protected static final ILog LOGGER = LogFactory.getLog("ES_LOGGER");
|
||||
protected static final ILog LOGGER = LoggerUtil.getESLogger();
|
||||
|
||||
/**
|
||||
* 加载查询语句工具类
|
||||
|
||||
@@ -0,0 +1,155 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.es;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.alibaba.fastjson.JSONObject;
|
||||
import com.alibaba.fastjson.parser.DefaultJSONParser;
|
||||
import com.alibaba.fastjson.parser.Feature;
|
||||
import com.alibaba.fastjson.parser.ParserConfig;
|
||||
import com.alibaba.fastjson.serializer.SerializerFeature;
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.LoggerUtil;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* @author didi
|
||||
*/
|
||||
public class ESFileLoader {
|
||||
private static final ILog LOGGER = LoggerUtil.getESLogger();
|
||||
|
||||
public Map<String, String> loaderFileContext(String filePath, Field[] fields) {
|
||||
LOGGER.info("method=loaderFileContext||DslLoaderUtil init start.");
|
||||
List<String> dslFileNames = Lists.newLinkedList();
|
||||
Map<String, String> fileContextMap = new HashMap<>();
|
||||
|
||||
if(null == fields || 0 == fields.length){
|
||||
return fileContextMap;
|
||||
}
|
||||
|
||||
// 反射获取接口中定义的变量中的值
|
||||
for (int i = 0; i < fields.length; ++i) {
|
||||
fields[i].setAccessible(true);
|
||||
try {
|
||||
dslFileNames.add(fields[i].get(null).toString());
|
||||
} catch (IllegalAccessException e) {
|
||||
LOGGER.error("method=loaderFileContext||errMsg=fail to read {} error. ", fields[i].getName(),
|
||||
e);
|
||||
}
|
||||
}
|
||||
|
||||
// 加载dsl文件及内容
|
||||
for (String fileName : dslFileNames) {
|
||||
fileContextMap.put(fileName, readEsFileInJarFile(filePath, fileName));
|
||||
}
|
||||
|
||||
// 输出加载的查询语句
|
||||
LOGGER.info("method=loaderFileContext||msg=dsl files count {}", fileContextMap.size());
|
||||
for (Map.Entry<String/*fileRelativePath*/, String/*dslContent*/> entry : fileContextMap.entrySet()) {
|
||||
LOGGER.info("method=loaderFileContext||msg=file name {}, dsl content {}", entry.getKey(),
|
||||
entry.getValue());
|
||||
}
|
||||
|
||||
LOGGER.info("method=loaderFileContext||DslLoaderUtil init finished.");
|
||||
|
||||
return fileContextMap;
|
||||
}
|
||||
|
||||
/**
|
||||
* 去除json中的空格
|
||||
*
|
||||
* @param sourceDsl
|
||||
* @return
|
||||
*/
|
||||
public String trimJsonBank(String sourceDsl) {
|
||||
List<String> dslList = Lists.newArrayList();
|
||||
|
||||
DefaultJSONParser parser = null;
|
||||
Object obj = null;
|
||||
String dsl = sourceDsl;
|
||||
|
||||
// 解析多个json,直到pos为0
|
||||
for (;;) {
|
||||
try {
|
||||
// 这里需要Feature.OrderedField.getMask()保持有序
|
||||
parser = new DefaultJSONParser(dsl, ParserConfig.getGlobalInstance(),
|
||||
JSON.DEFAULT_PARSER_FEATURE | Feature.OrderedField.getMask());
|
||||
obj = parser.parse();
|
||||
} catch (Exception t) {
|
||||
LOGGER.error("method=trimJsonBank||errMsg=parse json {} error. ", dsl, t);
|
||||
}
|
||||
if (obj == null) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (obj instanceof JSONObject) {
|
||||
dslList.add( JSON.toJSONString(obj, SerializerFeature.WriteMapNullValue));
|
||||
int pos = parser.getLexer().pos();
|
||||
if (pos <= 0) {
|
||||
break;
|
||||
}
|
||||
dsl = dsl.substring(pos);
|
||||
parser.getLexer().close();
|
||||
} else {
|
||||
parser.getLexer().close();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// 格式化异常或者有多个查询语句,返回原来的查询语句
|
||||
if (dslList.isEmpty() || dslList.size() > 1) {
|
||||
return sourceDsl;
|
||||
}
|
||||
|
||||
return dslList.get(0);
|
||||
}
|
||||
|
||||
/**
|
||||
* 从jar包中读取es相关的语句文件
|
||||
*
|
||||
* @param fileName
|
||||
* @return
|
||||
*/
|
||||
private String readEsFileInJarFile(String filePath, String fileName) {
|
||||
InputStream inputStream = this.getClass().getClassLoader()
|
||||
.getResourceAsStream( filePath + fileName);
|
||||
|
||||
if (inputStream != null) {
|
||||
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
|
||||
String line = null;
|
||||
List<String> lines = Lists.newLinkedList();
|
||||
try {
|
||||
while ((line = bufferedReader.readLine()) != null) {
|
||||
lines.add(line);
|
||||
}
|
||||
return StringUtils.join(lines, "");
|
||||
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("method=readDslFileInJarFile||errMsg=read file {} error. ", fileName,
|
||||
e);
|
||||
|
||||
return "";
|
||||
} finally {
|
||||
try {
|
||||
inputStream.close();
|
||||
} catch (IOException e) {
|
||||
LOGGER.error(
|
||||
"method=readDslFileInJarFile||errMsg=fail to close file {} error. ",
|
||||
fileName, e);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOGGER.error("method=readDslFileInJarFile||errMsg=fail to read file {} content",
|
||||
fileName);
|
||||
return "";
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import com.alibaba.fastjson.JSON;
|
||||
import com.didiglobal.logi.elasticsearch.client.ESClient;
|
||||
import com.didiglobal.logi.elasticsearch.client.gateway.document.ESIndexRequest;
|
||||
import com.didiglobal.logi.elasticsearch.client.gateway.document.ESIndexResponse;
|
||||
import com.didiglobal.logi.elasticsearch.client.model.exception.ESIndexNotFoundException;
|
||||
import com.didiglobal.logi.elasticsearch.client.model.type.ESVersion;
|
||||
import com.didiglobal.logi.elasticsearch.client.request.batch.BatchNode;
|
||||
import com.didiglobal.logi.elasticsearch.client.request.batch.BatchType;
|
||||
@@ -11,16 +12,20 @@ import com.didiglobal.logi.elasticsearch.client.request.batch.ESBatchRequest;
|
||||
import com.didiglobal.logi.elasticsearch.client.request.query.query.ESQueryRequest;
|
||||
import com.didiglobal.logi.elasticsearch.client.response.batch.ESBatchResponse;
|
||||
import com.didiglobal.logi.elasticsearch.client.response.batch.IndexResultItemNode;
|
||||
import com.didiglobal.logi.elasticsearch.client.response.indices.catindices.CatIndexResult;
|
||||
import com.didiglobal.logi.elasticsearch.client.response.indices.catindices.ESIndicesCatIndicesResponse;
|
||||
import com.didiglobal.logi.elasticsearch.client.response.indices.deleteindex.ESIndicesDeleteIndexResponse;
|
||||
import com.didiglobal.logi.elasticsearch.client.response.indices.gettemplate.ESIndicesGetTemplateResponse;
|
||||
import com.didiglobal.logi.elasticsearch.client.response.indices.putindex.ESIndicesPutIndexResponse;
|
||||
import com.didiglobal.logi.elasticsearch.client.response.indices.puttemplate.ESIndicesPutTemplateResponse;
|
||||
import com.didiglobal.logi.elasticsearch.client.response.query.query.ESQueryResponse;
|
||||
import com.didiglobal.logi.elasticsearch.client.response.setting.template.TemplateConfig;
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.BaseESPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.EnvUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.LoggerUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.http.HttpStatus;
|
||||
@@ -33,14 +38,15 @@ import javax.annotation.Nullable;
|
||||
import javax.annotation.PostConstruct;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@Component
|
||||
public class ESOpClient {
|
||||
private static final ILog LOGGER = LogFactory.getLog("ES_LOGGER");
|
||||
private static final ILog LOGGER = LoggerUtil.getESLogger();
|
||||
|
||||
/**
|
||||
* es 地址
|
||||
@@ -57,7 +63,7 @@ public class ESOpClient {
|
||||
/**
|
||||
* 客户端个数
|
||||
*/
|
||||
@Value("${es.client.client-cnt:10}")
|
||||
@Value("${es.client.client-cnt:2}")
|
||||
private Integer clientCnt;
|
||||
|
||||
/**
|
||||
@@ -75,72 +81,39 @@ public class ESOpClient {
|
||||
/**
|
||||
* 更新es数据的客户端连接队列
|
||||
*/
|
||||
private LinkedBlockingQueue<ESClient> esClientPool;
|
||||
private List<ESClient> esClientPool;
|
||||
|
||||
private static final Integer ES_OPERATE_TIMEOUT = 30;
|
||||
|
||||
@PostConstruct
|
||||
public void init(){
|
||||
esClientPool = new LinkedBlockingQueue<>( clientCnt );
|
||||
esClientPool = new ArrayList<>(clientCnt);
|
||||
|
||||
for (int i = 0; i < clientCnt; ++i) {
|
||||
ESClient esClient = this.buildEsClient(esAddress, esPass, "", "");
|
||||
if (esClient != null) {
|
||||
this.esClientPool.add(esClient);
|
||||
LOGGER.info("class=ESOpClient||method=init||msg=add new es client {}", esAddress);
|
||||
LOGGER.info("method=init||esAddress={}||msg=add new es client", esAddress);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 从更新es http 客户端连接池找那个获取
|
||||
*
|
||||
* @return
|
||||
* 获取ES客户端
|
||||
*/
|
||||
public ESClient getESClientFromPool() {
|
||||
ESClient esClient = null;
|
||||
int retryCount = 0;
|
||||
|
||||
// 如果esClient为空或者重试次数小于5次,循环获取
|
||||
while (esClient == null && retryCount < 5) {
|
||||
try {
|
||||
++retryCount;
|
||||
esClient = esClientPool.poll(3, TimeUnit.SECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
if (ValidateUtils.isEmptyList(esClientPool)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (esClient == null) {
|
||||
LOGGER.error( "class=ESOpClient||method=getESClientFromPool||errMsg=fail to get es client from pool");
|
||||
}
|
||||
|
||||
return esClient;
|
||||
}
|
||||
|
||||
/**
|
||||
* 归还到es http 客户端连接池
|
||||
*
|
||||
* @param esClient
|
||||
*/
|
||||
public void returnESClientToPool(ESClient esClient) {
|
||||
try {
|
||||
this.esClientPool.put(esClient);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
return esClientPool.get((int)(System.currentTimeMillis() % clientCnt));
|
||||
}
|
||||
|
||||
/**
|
||||
* 查询并获取第一个元素
|
||||
*
|
||||
* @param indexName
|
||||
* @param queryDsl
|
||||
* @param clzz
|
||||
* @param <T>
|
||||
* @return
|
||||
*/
|
||||
public <T> T performRequestAndTakeFirst(String indexName, String queryDsl, Class<T> clzz) {
|
||||
List<T> hits = performRequest(indexName, queryDsl, clzz);
|
||||
|
||||
public <T> T performRequestAndTakeFirst(String indexName, String queryDsl, Class<T> clazz) {
|
||||
List<T> hits = this.performRequest(indexName, queryDsl, clazz);
|
||||
if (CollectionUtils.isEmpty(hits)) {
|
||||
return null;
|
||||
}
|
||||
@@ -150,31 +123,20 @@ public class ESOpClient {
|
||||
|
||||
/**
|
||||
* 查询并获取第一个元素
|
||||
*
|
||||
* @param indexName
|
||||
* @param queryDsl
|
||||
* @param clazz
|
||||
* @param <T>
|
||||
* @return
|
||||
*/
|
||||
public <T> T performRequestAndTakeFirst(String routingValue, String indexName,
|
||||
String queryDsl, Class<T> clazz) {
|
||||
List<T> hits = performRequestWithRouting(routingValue, indexName, queryDsl, clazz);
|
||||
|
||||
if (CollectionUtils.isEmpty(hits)) {return null;}
|
||||
public <T> T performRequestAndTakeFirst(String routingValue, String indexName, String queryDsl, Class<T> clazz) {
|
||||
List<T> hits = this.performRequestWithRouting(routingValue, indexName, queryDsl, clazz);
|
||||
if (CollectionUtils.isEmpty(hits)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return hits.get(0);
|
||||
}
|
||||
|
||||
/**
|
||||
* 执行查询
|
||||
*
|
||||
* @param indexName
|
||||
* @param queryDsl
|
||||
* @return
|
||||
* @throws IOException
|
||||
*/
|
||||
public ESQueryResponse performRequest(String indexName,String queryDsl) {
|
||||
public ESQueryResponse performRequest(String indexName, String queryDsl) {
|
||||
return doQuery(new ESQueryRequest().indices(indexName).source(queryDsl));
|
||||
}
|
||||
|
||||
@@ -187,9 +149,8 @@ public class ESOpClient {
|
||||
return func.apply(esQueryResponse);
|
||||
}
|
||||
|
||||
public <T> List<T> performRequest(String indexName, String queryDsl, Class<T> clzz) {
|
||||
ESQueryResponse esQueryResponse = doQuery(
|
||||
new ESQueryRequest().indices(indexName).source(queryDsl).clazz(clzz));
|
||||
public <T> List<T> performRequest(String indexName, String queryDsl, Class<T> clzz) {
|
||||
ESQueryResponse esQueryResponse = this.doQuery(new ESQueryRequest().indices(indexName).source(queryDsl).clazz(clzz));
|
||||
if (esQueryResponse == null) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
@@ -227,8 +188,7 @@ public class ESOpClient {
|
||||
return hits;
|
||||
}
|
||||
|
||||
public <R> R performRequestWithRouting(String routingValue, String indexName,
|
||||
String queryDsl, Function<ESQueryResponse, R> func, int tryTimes) {
|
||||
public <R> R performRequestWithRouting(String routingValue, String indexName, String queryDsl, Function<ESQueryResponse, R> func, int tryTimes) {
|
||||
ESQueryResponse esQueryResponse;
|
||||
do {
|
||||
esQueryResponse = doQuery(new ESQueryRequest().routing(routingValue).indices(indexName).source(queryDsl));
|
||||
@@ -239,16 +199,12 @@ public class ESOpClient {
|
||||
|
||||
/**
|
||||
* 写入单条
|
||||
*
|
||||
* @param source
|
||||
* @return
|
||||
*/
|
||||
public boolean index(String indexName, String id, String source) {
|
||||
ESClient esClient = null;
|
||||
ESIndexResponse response = null;
|
||||
|
||||
try {
|
||||
esClient = getESClientFromPool();
|
||||
ESClient esClient = this.getESClientFromPool();
|
||||
if (esClient == null) {
|
||||
return false;
|
||||
}
|
||||
@@ -267,20 +223,11 @@ public class ESOpClient {
|
||||
return response.getRestStatus().getStatus() == HttpStatus.SC_OK
|
||||
|| response.getRestStatus().getStatus() == HttpStatus.SC_CREATED;
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn(
|
||||
"class=ESOpClient||method=index||indexName={}||id={}||source={}||errMsg=index doc error. ",
|
||||
indexName, id, source, e);
|
||||
if (response != null) {
|
||||
LOGGER.warn(
|
||||
"class=ESOpClient||method=index||indexName={}||id={}||source={}||errMsg=response {}",
|
||||
indexName, id, source, JSON.toJSONString(response));
|
||||
}
|
||||
} finally {
|
||||
if (esClient != null) {
|
||||
returnESClientToPool(esClient);
|
||||
}
|
||||
LOGGER.error(
|
||||
"method=index||indexName={}||id={}||source={}||response={}||errMsg=index failed",
|
||||
indexName, id, source, ConvertUtil.obj2Json(response), e
|
||||
);
|
||||
}
|
||||
|
||||
return false;
|
||||
@@ -288,19 +235,15 @@ public class ESOpClient {
|
||||
|
||||
/**
|
||||
* 批量写入
|
||||
*
|
||||
* @param indexName
|
||||
* @return
|
||||
*/
|
||||
public boolean batchInsert(String indexName, List<? extends BaseESPO> pos) {
|
||||
if (CollectionUtils.isEmpty(pos)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
ESClient esClient = null;
|
||||
ESBatchResponse response = null;
|
||||
try {
|
||||
esClient = getESClientFromPool();
|
||||
ESClient esClient = this.getESClientFromPool();
|
||||
if (esClient == null) {
|
||||
return false;
|
||||
}
|
||||
@@ -329,16 +272,10 @@ public class ESOpClient {
|
||||
return response.getRestStatus().getStatus() == HttpStatus.SC_OK && !response.getErrors();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn(
|
||||
"method=batchInsert||indexName={}||errMsg=batch insert error. ", indexName, e);
|
||||
if (response != null) {
|
||||
LOGGER.warn("method=batchInsert||indexName={}||errMsg=response {}", indexName, JSON.toJSONString(response));
|
||||
}
|
||||
|
||||
} finally {
|
||||
if (esClient != null) {
|
||||
returnESClientToPool(esClient);
|
||||
}
|
||||
LOGGER.error(
|
||||
"method=batchInsert||indexName={}||response={}||errMsg=batch index failed",
|
||||
indexName, ConvertUtil.obj2Json(response), e
|
||||
);
|
||||
}
|
||||
|
||||
return false;
|
||||
@@ -348,9 +285,8 @@ public class ESOpClient {
|
||||
* 根据表达式判断索引是否已存在
|
||||
*/
|
||||
public boolean indexExist(String indexName) {
|
||||
ESClient esClient = null;
|
||||
try {
|
||||
esClient = this.getESClientFromPool();
|
||||
ESClient esClient = this.getESClientFromPool();
|
||||
if (esClient == null) {
|
||||
return false;
|
||||
}
|
||||
@@ -358,11 +294,7 @@ public class ESOpClient {
|
||||
// 检查索引是否存在
|
||||
return esClient.admin().indices().prepareExists(indexName).execute().actionGet(30, TimeUnit.SECONDS).isExists();
|
||||
} catch (Exception e){
|
||||
LOGGER.warn("class=ESOpClient||method=indexExist||indexName={}||msg=exception!", indexName, e);
|
||||
} finally {
|
||||
if (esClient != null) {
|
||||
returnESClientToPool(esClient);
|
||||
}
|
||||
LOGGER.error("method=indexExist||indexName={}||msg=exception!", indexName, e);
|
||||
}
|
||||
|
||||
return false;
|
||||
@@ -372,48 +304,45 @@ public class ESOpClient {
|
||||
* 创建索引
|
||||
*/
|
||||
public boolean createIndex(String indexName) {
|
||||
if (indexExist(indexName)) {
|
||||
if (this.indexExist(indexName)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
ESClient client = getESClientFromPool();
|
||||
if (client != null) {
|
||||
try {
|
||||
ESIndicesPutIndexResponse response = client.admin().indices().preparePutIndex(indexName).execute()
|
||||
.actionGet(30, TimeUnit.SECONDS);
|
||||
return response.getAcknowledged();
|
||||
} catch (Exception e){
|
||||
LOGGER.warn( "msg=create index fail||indexName={}", indexName, e);
|
||||
} finally {
|
||||
returnESClientToPool(client);
|
||||
}
|
||||
ESClient client = this.getESClientFromPool();
|
||||
try {
|
||||
ESIndicesPutIndexResponse response = client
|
||||
.admin()
|
||||
.indices()
|
||||
.preparePutIndex(indexName)
|
||||
.execute()
|
||||
.actionGet(ES_OPERATE_TIMEOUT, TimeUnit.SECONDS);
|
||||
|
||||
return response.getAcknowledged();
|
||||
} catch (Exception e){
|
||||
LOGGER.error( "method=createIndex||indexName={}||errMsg=exception!", indexName, e);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean templateExist(String indexTemplateName){
|
||||
ESClient esClient = null;
|
||||
|
||||
try {
|
||||
esClient = this.getESClientFromPool();
|
||||
ESClient esClient = this.getESClientFromPool();
|
||||
|
||||
// 获取es中原来index template的配置
|
||||
ESIndicesGetTemplateResponse getTemplateResponse =
|
||||
esClient.admin().indices().prepareGetTemplate( indexTemplateName ).execute().actionGet( 30, TimeUnit.SECONDS );
|
||||
ESIndicesGetTemplateResponse getTemplateResponse = esClient
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareGetTemplate(indexTemplateName)
|
||||
.execute()
|
||||
.actionGet( ES_OPERATE_TIMEOUT, TimeUnit.SECONDS );
|
||||
|
||||
TemplateConfig templateConfig = getTemplateResponse.getMultiTemplatesConfig().getSingleConfig();
|
||||
|
||||
if (null != templateConfig) {
|
||||
return true;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn( "method=templateExist||indexTemplateName={}||msg=exception!",
|
||||
indexTemplateName, e);
|
||||
} finally {
|
||||
if (esClient != null) {
|
||||
this.returnESClientToPool(esClient);
|
||||
}
|
||||
LOGGER.error( "method=templateExist||indexTemplateName={}||msg=exception!", indexTemplateName, e);
|
||||
}
|
||||
|
||||
return false;
|
||||
@@ -423,27 +352,77 @@ public class ESOpClient {
|
||||
* 创建索引模板
|
||||
*/
|
||||
public boolean createIndexTemplateIfNotExist(String indexTemplateName, String config) {
|
||||
ESClient esClient = null;
|
||||
|
||||
try {
|
||||
esClient = this.getESClientFromPool();
|
||||
ESClient esClient = this.getESClientFromPool();
|
||||
|
||||
//存在模板就返回,不存在就创建
|
||||
if(templateExist(indexTemplateName)){return true;}
|
||||
// 存在模板就返回,不存在就创建
|
||||
if(this.templateExist(indexTemplateName)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// 创建新的模板
|
||||
ESIndicesPutTemplateResponse response = esClient.admin().indices().preparePutTemplate( indexTemplateName )
|
||||
.setTemplateConfig( config ).execute().actionGet( 30, TimeUnit.SECONDS );
|
||||
ESIndicesPutTemplateResponse response = esClient
|
||||
.admin()
|
||||
.indices()
|
||||
.preparePutTemplate( indexTemplateName )
|
||||
.setTemplateConfig(config)
|
||||
.execute()
|
||||
.actionGet(ES_OPERATE_TIMEOUT, TimeUnit.SECONDS);
|
||||
|
||||
return response.getAcknowledged();
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn( "method=createIndexTemplateIfNotExist||indexTemplateName={}||config={}||msg=exception!",
|
||||
LOGGER.error(
|
||||
"method=createIndexTemplateIfNotExist||indexTemplateName={}||config={}||msg=exception!",
|
||||
indexTemplateName, config, e
|
||||
);
|
||||
} finally {
|
||||
if (esClient != null) {
|
||||
this.returnESClientToPool(esClient);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* 根据索引模板获取所有的索引
|
||||
*/
|
||||
public List<String> listIndexByName(String indexName) {
|
||||
try {
|
||||
ESClient esClient = this.getESClientFromPool();
|
||||
|
||||
ESIndicesCatIndicesResponse response = esClient
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareCatIndices(indexName + "*")
|
||||
.execute()
|
||||
.actionGet(ES_OPERATE_TIMEOUT, TimeUnit.SECONDS);
|
||||
if(null != response) {
|
||||
return response.getCatIndexResults().stream().map(CatIndexResult::getIndex).collect(Collectors.toList());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error( "method=listIndexByName||indexName={}||msg=exception!", indexName, e);
|
||||
}
|
||||
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
/**
|
||||
* 删除索引
|
||||
*/
|
||||
public boolean delIndexByName(String indexRealName){
|
||||
try {
|
||||
ESClient esClient = this.getESClientFromPool();
|
||||
|
||||
ESIndicesDeleteIndexResponse response = esClient
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareDeleteIndex(indexRealName)
|
||||
.execute()
|
||||
.actionGet(ES_OPERATE_TIMEOUT, TimeUnit.SECONDS);
|
||||
|
||||
return response.getAcknowledged();
|
||||
} catch (ESIndexNotFoundException nfe) {
|
||||
// 索引不存在时,debug环境时再进行打印
|
||||
LOGGER.debug( "method=delIndexByName||indexRealName={}||errMsg=exception!", indexRealName, nfe);
|
||||
} catch (Exception e) {
|
||||
LOGGER.error( "method=delIndexByName||indexRealName={}||errMsg=exception!", indexRealName, e);
|
||||
}
|
||||
|
||||
return false;
|
||||
@@ -453,61 +432,60 @@ public class ESOpClient {
|
||||
|
||||
/**
|
||||
* 执行查询
|
||||
* @param request
|
||||
* @return
|
||||
*/
|
||||
@Nullable
|
||||
private ESQueryResponse doQuery(ESQueryRequest request) {
|
||||
ESClient esClient = null;
|
||||
try {
|
||||
esClient = getESClientFromPool();
|
||||
ESClient esClient = this.getESClientFromPool();
|
||||
ESQueryResponse response = esClient.query(request).actionGet(120, TimeUnit.SECONDS);
|
||||
|
||||
if(!EnvUtil.isOnline()){
|
||||
LOGGER.info("method=doQuery||indexName={}||queryDsl={}||ret={}",
|
||||
request.indices(), bytesReferenceConvertDsl(request.source()), JSON.toJSONString(response));
|
||||
}
|
||||
LOGGER.debug(
|
||||
"method=doQuery||indexName={}||queryDsl={}||ret={}",
|
||||
request.indices(), bytesReferenceConvertDsl(request.source()), JSON.toJSONString(response)
|
||||
);
|
||||
|
||||
return response;
|
||||
} catch (Exception e) {
|
||||
LOGGER.error( "method=doQuery||indexName={}||queryDsl={}||errMsg=query error. ",
|
||||
request.indices(), bytesReferenceConvertDsl(request.source()), e);
|
||||
LOGGER.error(
|
||||
"method=doQuery||indexName={}||queryDsl={}||errMsg=query error. ",
|
||||
request.indices(),
|
||||
bytesReferenceConvertDsl(request.source()),
|
||||
e
|
||||
);
|
||||
|
||||
return null;
|
||||
}finally {
|
||||
if (esClient != null) {
|
||||
returnESClientToPool(esClient);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private boolean handleErrorResponse(String indexName, List<? extends BaseESPO> pos, ESBatchResponse response) {
|
||||
if (response.getErrors().booleanValue()) {
|
||||
int errorItemIndex = 0;
|
||||
|
||||
if (CollectionUtils.isNotEmpty(response.getItems())) {
|
||||
for (IndexResultItemNode item : response.getItems()) {
|
||||
recordErrorResponseItem(indexName, pos, errorItemIndex++, item);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
if (response.getErrors()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
int errorItemIndex = 0;
|
||||
|
||||
if (CollectionUtils.isNotEmpty(response.getItems())) {
|
||||
for (IndexResultItemNode item : response.getItems()) {
|
||||
recordErrorResponseItem(indexName, pos, errorItemIndex++, item);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private void recordErrorResponseItem(String indexName, List<? extends BaseESPO> pos, int errorItemIndex, IndexResultItemNode item) {
|
||||
if (item.getIndex() != null && item.getIndex().getShards() != null
|
||||
if (item.getIndex() != null
|
||||
&& item.getIndex().getShards() != null
|
||||
&& CollectionUtils.isNotEmpty(item.getIndex().getShards().getFailures())) {
|
||||
LOGGER.warn(
|
||||
"class=ESOpClient||method=batchInsert||indexName={}||errMsg=Failures: {}, content: {}",
|
||||
"method=batchInsert||indexName={}||errMsg=Failures: {}, content: {}",
|
||||
indexName, item.getIndex().getShards().getFailures().toString(),
|
||||
JSON.toJSONString(pos.get(errorItemIndex)));
|
||||
}
|
||||
|
||||
if (item.getIndex() != null && item.getIndex().getError() != null) {
|
||||
LOGGER.warn(
|
||||
"class=ESOpClient||method=batchInsert||indexName={}||errMsg=Error: {}, content: {}",
|
||||
"method=batchInsert||indexName={}||errMsg=Error: {}, content: {}",
|
||||
indexName, item.getIndex().getError().getReason(),
|
||||
JSON.toJSONString(pos.get(errorItemIndex)));
|
||||
}
|
||||
@@ -515,21 +493,18 @@ public class ESOpClient {
|
||||
|
||||
/**
|
||||
* 转换dsl语句
|
||||
*
|
||||
* @param bytes
|
||||
* @return
|
||||
*/
|
||||
private String bytesReferenceConvertDsl(BytesReference bytes) {
|
||||
try {
|
||||
return XContentHelper.convertToJson(bytes, false);
|
||||
} catch (IOException e) {
|
||||
LOGGER.warn("class=ESOpClient||method=bytesReferenceConvertDsl||errMsg=fail to covert", e);
|
||||
LOGGER.warn("method=bytesReferenceConvertDsl||errMsg=fail to covert", e);
|
||||
}
|
||||
|
||||
return "";
|
||||
}
|
||||
|
||||
private ESClient buildEsClient(String address,String password,String clusterName, String version) {
|
||||
private ESClient buildEsClient(String address, String password,String clusterName, String version) {
|
||||
if (StringUtils.isBlank(address)) {
|
||||
return null;
|
||||
}
|
||||
@@ -564,7 +539,7 @@ public class ESOpClient {
|
||||
// ignore
|
||||
}
|
||||
|
||||
LOGGER.error("class=ESESOpClient||method=buildEsClient||errMsg={}||address={}", e.getMessage(), address, e);
|
||||
LOGGER.error("method=buildEsClient||address={}||errMsg=exception", address, e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,41 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.es;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.FutureWaitUtil;
|
||||
import lombok.NoArgsConstructor;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
|
||||
/**
|
||||
* 处理ES请求的线程池
|
||||
*/
|
||||
@Service
|
||||
@NoArgsConstructor
|
||||
public class ESTPService {
|
||||
@Value("${thread-pool.es.search.thread-num:10}")
|
||||
private Integer esSearchThreadCnt;
|
||||
|
||||
@Value("${thread-pool.es.search.queue-size:5000}")
|
||||
private Integer esSearchThreadQueueSize;
|
||||
|
||||
private FutureWaitUtil<Object> searchESTP;
|
||||
|
||||
@PostConstruct
|
||||
private void init() {
|
||||
searchESTP = FutureWaitUtil.init(
|
||||
"SearchESTP",
|
||||
esSearchThreadCnt,
|
||||
esSearchThreadCnt,
|
||||
esSearchThreadQueueSize
|
||||
);
|
||||
}
|
||||
|
||||
public void submitSearchTask(String taskName, Integer timeoutUnisMs, Runnable runnable) {
|
||||
searchESTP.runnableTask(taskName, timeoutUnisMs, runnable);
|
||||
}
|
||||
|
||||
public void waitExecute() {
|
||||
searchESTP.waitExecute();
|
||||
}
|
||||
}
|
||||
@@ -10,10 +10,16 @@ import com.xiaojukeji.know.streaming.km.common.bean.po.BaseESPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.BaseMetricESPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.CommonUtils;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.EnvUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.IndexNameUtils;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.BaseESDAO;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslsConstant;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.ESTPService;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslConstant;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.template.TemplateLoaderUtil;
|
||||
import lombok.NoArgsConstructor;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.scheduling.annotation.Scheduled;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
|
||||
@@ -27,8 +33,7 @@ public class BaseMetricESDAO extends BaseESDAO {
|
||||
/**
|
||||
* 操作的索引名称
|
||||
*/
|
||||
protected String indexName;
|
||||
protected String indexTemplate;
|
||||
protected String indexName;
|
||||
|
||||
protected static final Long ONE_MIN = 60 * 1000L;
|
||||
protected static final Long FIVE_MIN = 5 * ONE_MIN;
|
||||
@@ -42,12 +47,25 @@ public class BaseMetricESDAO extends BaseESDAO {
|
||||
*/
|
||||
private static Map<String, BaseMetricESDAO> ariusStatsEsDaoMap = Maps.newConcurrentMap();
|
||||
|
||||
@Autowired
|
||||
private TemplateLoaderUtil templateLoaderUtil;
|
||||
|
||||
@Autowired
|
||||
protected ESTPService esTPService;
|
||||
|
||||
/**
|
||||
* es 地址
|
||||
*/
|
||||
@Value("${es.index.expire:60}")
|
||||
private int indexExpireDays;
|
||||
|
||||
/**
|
||||
* 检查 es 索引是否存在,不存在则创建索引
|
||||
*/
|
||||
@Scheduled(cron = "0 3/5 * * * ?")
|
||||
public void checkCurrentDayIndexExist(){
|
||||
try {
|
||||
String indexTemplate = templateLoaderUtil.getContextByFileName(indexName);
|
||||
esOpClient.createIndexTemplateIfNotExist(indexName, indexTemplate);
|
||||
|
||||
//检查最近7天索引存在不存
|
||||
@@ -57,11 +75,30 @@ public class BaseMetricESDAO extends BaseESDAO {
|
||||
|
||||
esOpClient.createIndex(realIndex);
|
||||
}
|
||||
}catch (Exception e){
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("method=checkCurrentDayIndexExist||errMsg=exception!", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Scheduled(cron = "0 30/45 * * * ?")
|
||||
public void delExpireIndex(){
|
||||
List<String> indexList = esOpClient.listIndexByName(indexName);
|
||||
if(CollectionUtils.isEmpty(indexList)){return;}
|
||||
|
||||
indexList.sort((o1, o2) -> -o1.compareTo(o2));
|
||||
|
||||
int size = indexList.size();
|
||||
if(size > indexExpireDays){
|
||||
if(!EnvUtil.isOnline()){
|
||||
LOGGER.info("method=delExpireIndex||indexExpireDays={}||delIndex={}",
|
||||
indexExpireDays, indexList.subList(indexExpireDays, size));
|
||||
}
|
||||
|
||||
indexList.subList(indexExpireDays, size).stream().forEach(
|
||||
s -> esOpClient.delIndexByName(s));
|
||||
}
|
||||
}
|
||||
|
||||
public static BaseMetricESDAO getByStatsType(String statsType) {
|
||||
return ariusStatsEsDaoMap.get(statsType);
|
||||
}
|
||||
@@ -76,6 +113,15 @@ public class BaseMetricESDAO extends BaseESDAO {
|
||||
ariusStatsEsDaoMap.put(statsType, baseAriusStatsEsDao);
|
||||
}
|
||||
|
||||
/**
|
||||
* 注册不同维度数据对应操作的es类
|
||||
*
|
||||
* @param baseAriusStatsEsDao
|
||||
*/
|
||||
public void register(BaseMetricESDAO baseAriusStatsEsDao) {
|
||||
BaseMetricESDAO.register(indexName, baseAriusStatsEsDao);
|
||||
}
|
||||
|
||||
/**
|
||||
* 批量插入索引统计信息
|
||||
|
||||
@@ -323,21 +369,16 @@ public class BaseMetricESDAO extends BaseESDAO {
|
||||
sb.append(str, 1, str.length() - 1);
|
||||
}
|
||||
|
||||
protected Map<String, ESAggr> checkBucketsAndHitsOfResponseAggs(ESQueryResponse response){
|
||||
if(null == response || null == response.getAggs()){
|
||||
protected Map<String, ESAggr> checkBucketsAndHitsOfResponseAggs(ESQueryResponse response) {
|
||||
if(null == response
|
||||
|| null == response.getAggs()
|
||||
|| null == response.getAggs().getEsAggrMap()
|
||||
|| null == response.getAggs().getEsAggrMap().get(HIST)
|
||||
|| ValidateUtils.isEmptyList(response.getAggs().getEsAggrMap().get(HIST).getBucketList())) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Map<String, ESAggr> esAggrMap = response.getAggs().getEsAggrMap();
|
||||
if (null == esAggrMap || null == esAggrMap.get(HIST)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if(CollectionUtils.isEmpty(esAggrMap.get(HIST).getBucketList())){
|
||||
return null;
|
||||
}
|
||||
|
||||
return esAggrMap;
|
||||
return response.getAggs().getEsAggrMap();
|
||||
}
|
||||
|
||||
protected int handleESQueryResponseCount(ESQueryResponse response){
|
||||
@@ -389,7 +430,7 @@ public class BaseMetricESDAO extends BaseESDAO {
|
||||
Long endTime = System.currentTimeMillis();
|
||||
Long startTime = endTime - 12 * ONE_HOUR;
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(DslsConstant.GET_LATEST_METRIC_TIME, startTime, endTime, appendQueryDsl);
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName( DslConstant.GET_LATEST_METRIC_TIME, startTime, endTime, appendQueryDsl);
|
||||
String realIndexName = IndexNameUtils.genDailyIndexName(indexName, startTime, endTime);
|
||||
|
||||
return esOpClient.performRequest(
|
||||
|
||||
@@ -6,32 +6,27 @@ import com.google.common.collect.HashBasedTable;
|
||||
import com.google.common.collect.Table;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.BrokerMetricPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.FutureWaitUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.MetricsUtils;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslsConstant;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslConstant;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.*;
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.*;
|
||||
import static com.xiaojukeji.know.streaming.km.persistence.es.template.TemplateConstant.BROKER_INDEX;
|
||||
|
||||
@Component
|
||||
public class BrokerMetricESDAO extends BaseMetricESDAO {
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
super.indexName = BROKER_INDEX;
|
||||
super.indexTemplate = BROKER_TEMPLATE;
|
||||
super.indexName = BROKER_INDEX;
|
||||
checkCurrentDayIndexExist();
|
||||
BaseMetricESDAO.register(indexName, this);
|
||||
register( this);
|
||||
}
|
||||
|
||||
protected FutureWaitUtil<Void> queryFuture = FutureWaitUtil.init("BrokerMetricESDAO", 4,8, 500);
|
||||
|
||||
/**
|
||||
* 获取集群 clusterId 中 brokerId 最新的统计指标
|
||||
*/
|
||||
@@ -40,7 +35,7 @@ public class BrokerMetricESDAO extends BaseMetricESDAO {
|
||||
Long startTime = endTime - FIVE_MIN;
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_BROKER_LATEST_METRICS, clusterId, brokerId, startTime, endTime);
|
||||
DslConstant.GET_BROKER_LATEST_METRICS, clusterId, brokerId, startTime, endTime);
|
||||
|
||||
BrokerMetricPO brokerMetricPO = esOpClient.performRequestAndTakeFirst(
|
||||
brokerId.toString(),
|
||||
@@ -68,7 +63,7 @@ public class BrokerMetricESDAO extends BaseMetricESDAO {
|
||||
String aggDsl = buildAggsDSL(metrics, aggType);
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_BROKER_AGG_SINGLE_METRICS, clusterPhyId, brokerId, startTime, endTime, aggDsl);
|
||||
DslConstant.GET_BROKER_AGG_SINGLE_METRICS, clusterPhyId, brokerId, startTime, endTime, aggDsl);
|
||||
|
||||
return esOpClient.performRequestWithRouting(
|
||||
String.valueOf(brokerId),
|
||||
@@ -132,7 +127,7 @@ public class BrokerMetricESDAO extends BaseMetricESDAO {
|
||||
for(Long brokerId : brokerIds){
|
||||
try {
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_BROKER_AGG_LIST_METRICS,
|
||||
DslConstant.GET_BROKER_AGG_LIST_METRICS,
|
||||
clusterPhyId,
|
||||
brokerId,
|
||||
startTime,
|
||||
@@ -141,7 +136,7 @@ public class BrokerMetricESDAO extends BaseMetricESDAO {
|
||||
aggDsl
|
||||
);
|
||||
|
||||
queryFuture.runnableTask(
|
||||
esTPService.submitSearchTask(
|
||||
String.format("class=BrokerMetricESDAO||method=listBrokerMetricsByBrokerIds||ClusterPhyId=%d", clusterPhyId),
|
||||
5000,
|
||||
() -> {
|
||||
@@ -154,8 +149,8 @@ public class BrokerMetricESDAO extends BaseMetricESDAO {
|
||||
);
|
||||
|
||||
synchronized (table) {
|
||||
for(String metric : metricMap.keySet()){
|
||||
table.put(metric, brokerId, metricMap.get(metric));
|
||||
for(Map.Entry<String, List<MetricPointVO>> entry: metricMap.entrySet()){
|
||||
table.put(entry.getKey(), brokerId, entry.getValue());
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -164,7 +159,7 @@ public class BrokerMetricESDAO extends BaseMetricESDAO {
|
||||
}
|
||||
}
|
||||
|
||||
queryFuture.waitExecute();
|
||||
esTPService.waitExecute();
|
||||
|
||||
return table;
|
||||
}
|
||||
@@ -187,7 +182,7 @@ public class BrokerMetricESDAO extends BaseMetricESDAO {
|
||||
|
||||
//4、查询es
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_BROKER_AGG_TOP_METRICS, clusterPhyId, startTime, endTime, interval, aggDsl);
|
||||
DslConstant.GET_BROKER_AGG_TOP_METRICS, clusterPhyId, startTime, endTime, interval, aggDsl);
|
||||
|
||||
return esOpClient.performRequest(realIndex, dsl,
|
||||
s -> handleTopBrokerESQueryResponse(s, metrics, topN), 3);
|
||||
@@ -221,106 +216,86 @@ public class BrokerMetricESDAO extends BaseMetricESDAO {
|
||||
return metricMap;
|
||||
}
|
||||
|
||||
private Map<String, List<MetricPointVO>> handleListESQueryResponse(ESQueryResponse response, List<String> metrics, String aggType){
|
||||
private Map<String, List<MetricPointVO>> handleListESQueryResponse(ESQueryResponse response, List<String> metricNameList, String aggType){
|
||||
Map<String, List<MetricPointVO>> metricMap = new HashMap<>();
|
||||
|
||||
if(null == response || null == response.getAggs()){
|
||||
Map<String, ESAggr> esAggrMap = this.checkBucketsAndHitsOfResponseAggs(response);
|
||||
if (esAggrMap == null) {
|
||||
return metricMap;
|
||||
}
|
||||
|
||||
Map<String, ESAggr> esAggrMap = response.getAggs().getEsAggrMap();
|
||||
if (null == esAggrMap || null == esAggrMap.get(HIST)) {
|
||||
return metricMap;
|
||||
}
|
||||
|
||||
if(CollectionUtils.isEmpty(esAggrMap.get(HIST).getBucketList())){
|
||||
return metricMap;
|
||||
}
|
||||
|
||||
for(String metric : metrics){
|
||||
for(String metricName : metricNameList){
|
||||
List<MetricPointVO> metricPoints = new ArrayList<>();
|
||||
|
||||
esAggrMap.get(HIST).getBucketList().forEach( esBucket -> {
|
||||
esAggrMap.get(HIST).getBucketList().forEach(esBucket -> {
|
||||
try {
|
||||
if (null != esBucket.getUnusedMap().get(KEY)) {
|
||||
Long timestamp = Long.valueOf(esBucket.getUnusedMap().get(KEY).toString());
|
||||
Object value = esBucket.getAggrMap().get(metric).getUnusedMap().get(VALUE);
|
||||
if(null == value){return;}
|
||||
|
||||
MetricPointVO metricPoint = new MetricPointVO();
|
||||
metricPoint.setAggType(aggType);
|
||||
metricPoint.setTimeStamp(timestamp);
|
||||
metricPoint.setValue(value.toString());
|
||||
metricPoint.setName(metric);
|
||||
|
||||
metricPoints.add(metricPoint);
|
||||
}else {
|
||||
LOGGER.info("");
|
||||
if (null == esBucket.getUnusedMap().get(KEY)) {
|
||||
return;
|
||||
}
|
||||
}catch (Exception e){
|
||||
LOGGER.error("metric={}||errMsg=exception!", metric, e);
|
||||
|
||||
Long timestamp = Long.valueOf(esBucket.getUnusedMap().get(KEY).toString());
|
||||
Object value = esBucket.getAggrMap().get(metricName).getUnusedMap().get(VALUE);
|
||||
if(null == value) {
|
||||
return;
|
||||
}
|
||||
|
||||
metricPoints.add(new MetricPointVO(metricName, timestamp, value.toString(), aggType));
|
||||
} catch (Exception e){
|
||||
LOGGER.error("method=handleListESQueryResponse||metricName={}||errMsg=exception!", metricName, e);
|
||||
}
|
||||
} );
|
||||
|
||||
metricMap.put(metric, optimizeMetricPoints(metricPoints));
|
||||
metricMap.put(metricName, optimizeMetricPoints(metricPoints));
|
||||
}
|
||||
|
||||
return metricMap;
|
||||
}
|
||||
|
||||
private Map<String, List<Long>> handleTopBrokerESQueryResponse(ESQueryResponse response, List<String> metrics, int topN){
|
||||
private Map<String, List<Long>> handleTopBrokerESQueryResponse(ESQueryResponse response, List<String> metricNameList, int topN) {
|
||||
Map<String, List<Long>> ret = new HashMap<>();
|
||||
|
||||
if(null == response || null == response.getAggs()){
|
||||
Map<String, ESAggr> esAggrMap = this.checkBucketsAndHitsOfResponseAggs(response);
|
||||
if (esAggrMap == null) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
Map<String, ESAggr> esAggrMap = response.getAggs().getEsAggrMap();
|
||||
if (null == esAggrMap || null == esAggrMap.get(HIST)) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if(CollectionUtils.isEmpty(esAggrMap.get(HIST).getBucketList())){
|
||||
return ret;
|
||||
}
|
||||
|
||||
Map<String, List<Tuple<Long, Double>>> metricBrokerValueMap = new HashMap<>();
|
||||
Map<String, List<Tuple<Long, Double>>> metricNameBrokerValueMap = new HashMap<>();
|
||||
|
||||
//1、先获取每个指标对应的所有brokerIds以及指标的值
|
||||
for(String metric : metrics) {
|
||||
esAggrMap.get(HIST).getBucketList().forEach( esBucket -> {
|
||||
for(String metricName : metricNameList) {
|
||||
esAggrMap.get(HIST).getBucketList().forEach(esBucket -> {
|
||||
try {
|
||||
if (null != esBucket.getUnusedMap().get(KEY)) {
|
||||
Long brokerId = Long.valueOf(esBucket.getUnusedMap().get(KEY).toString());
|
||||
Object value = esBucket.getAggrMap().get(HIST).getBucketList().get(0).getAggrMap()
|
||||
.get(metric).getUnusedMap().get(VALUE);
|
||||
if(null == value){return;}
|
||||
|
||||
List<Tuple<Long, Double>> brokerValue = (null == metricBrokerValueMap.get(metric)) ?
|
||||
new ArrayList<>() : metricBrokerValueMap.get(metric);
|
||||
|
||||
brokerValue.add(new Tuple<>(brokerId, Double.valueOf(value.toString())));
|
||||
metricBrokerValueMap.put(metric, brokerValue);
|
||||
if (null == esBucket.getUnusedMap().get(KEY)) {
|
||||
return;
|
||||
}
|
||||
}catch (Exception e){
|
||||
LOGGER.error("metrice={}||errMsg=exception!", metric, e);
|
||||
|
||||
Long brokerId = Long.valueOf(esBucket.getUnusedMap().get(KEY).toString());
|
||||
Object value = esBucket.getAggrMap().get(HIST).getBucketList().get(0).getAggrMap().get(metricName).getUnusedMap().get(VALUE);
|
||||
if(null == value) {
|
||||
return;
|
||||
}
|
||||
|
||||
metricNameBrokerValueMap.putIfAbsent(metricName, new ArrayList<>());
|
||||
metricNameBrokerValueMap.get(metricName).add(new Tuple<>(brokerId, Double.valueOf(value.toString())));
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("method=handleTopBrokerESQueryResponse||metric={}||errMsg=exception!", metricName, e);
|
||||
}
|
||||
} );
|
||||
});
|
||||
}
|
||||
|
||||
//2、对每个指标的broker按照指标值排序,并截取前topN个brokerIds
|
||||
for(String metric : metricBrokerValueMap.keySet()){
|
||||
List<Tuple<Long, Double>> brokerValue = metricBrokerValueMap.get(metric);
|
||||
for(Map.Entry<String, List<Tuple<Long, Double>>> entry : metricNameBrokerValueMap.entrySet()){
|
||||
entry.getValue().sort((o1, o2) -> {
|
||||
if(null == o1 || null == o2){
|
||||
return 0;
|
||||
}
|
||||
|
||||
brokerValue.sort((o1, o2) -> {
|
||||
if(null == o1 || null == o2){return 0;}
|
||||
return o2.getV2().compareTo(o1.getV2());
|
||||
} );
|
||||
|
||||
List<Tuple<Long, Double>> temp = (brokerValue.size() > topN) ? brokerValue.subList(0, topN) : brokerValue;
|
||||
List<Long> brokerIds = temp.stream().map(t -> t.getV1()).collect( Collectors.toList());
|
||||
|
||||
ret.put(metric, brokerIds);
|
||||
// 获取TopN的Broker
|
||||
List<Long> brokerIdList = entry.getValue().subList(0, Math.min(topN, entry.getValue().size())).stream().map(elem -> elem.getV1()).collect(Collectors.toList());
|
||||
ret.put(entry.getKey(), brokerIdList);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -10,9 +10,8 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchRange;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchSort;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.ClusterMetricPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.FutureWaitUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.MetricsUtils;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslsConstant;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslConstant;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
@@ -23,21 +22,18 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.*;
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.*;
|
||||
import static com.xiaojukeji.know.streaming.km.persistence.es.template.TemplateConstant.CLUSTER_INDEX;
|
||||
|
||||
@Component
|
||||
public class ClusterMetricESDAO extends BaseMetricESDAO {
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
super.indexName = CLUSTER_INDEX;
|
||||
super.indexTemplate = CLUSTER_TEMPLATE;
|
||||
super.indexName = CLUSTER_INDEX;
|
||||
checkCurrentDayIndexExist();
|
||||
BaseMetricESDAO.register(indexName, this);
|
||||
register(this);
|
||||
}
|
||||
|
||||
protected FutureWaitUtil<Void> queryFuture = FutureWaitUtil.init("ClusterMetricESDAO", 4,8, 500);
|
||||
|
||||
/**
|
||||
* 获取集群 clusterId 最新的统计指标
|
||||
*/
|
||||
@@ -46,7 +42,7 @@ public class ClusterMetricESDAO extends BaseMetricESDAO {
|
||||
Long startTime = endTime - FIVE_MIN;
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_CLUSTER_LATEST_METRICS, clusterId, startTime, endTime);
|
||||
DslConstant.GET_CLUSTER_LATEST_METRICS, clusterId, startTime, endTime);
|
||||
|
||||
ClusterMetricPO clusterMetricPO = esOpClient.performRequestAndTakeFirst(
|
||||
clusterId.toString(), realIndex(startTime, endTime), dsl, ClusterMetricPO.class);
|
||||
@@ -67,7 +63,7 @@ public class ClusterMetricESDAO extends BaseMetricESDAO {
|
||||
String aggDsl = buildAggsDSL(metrics, aggType);
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_CLUSTER_AGG_SINGLE_METRICS, clusterPhyId, startTime, endTime, aggDsl);
|
||||
DslConstant.GET_CLUSTER_AGG_SINGLE_METRICS, clusterPhyId, startTime, endTime, aggDsl);
|
||||
|
||||
return esOpClient.performRequestWithRouting(String.valueOf(clusterPhyId), realIndex, dsl,
|
||||
s -> handleSingleESQueryResponse(s, metrics, aggType), 3);
|
||||
@@ -103,7 +99,7 @@ public class ClusterMetricESDAO extends BaseMetricESDAO {
|
||||
}
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.LIST_CLUSTER_WITH_LATEST_METRICS, latestMetricTime, appendQueryDsl.toString(), sortDsl);
|
||||
DslConstant.LIST_CLUSTER_WITH_LATEST_METRICS, latestMetricTime, appendQueryDsl.toString(), sortDsl);
|
||||
|
||||
return esOpClient.performRequest(realIndex, dsl, ClusterMetricPO.class);
|
||||
}
|
||||
@@ -128,30 +124,39 @@ public class ClusterMetricESDAO extends BaseMetricESDAO {
|
||||
//4、构造dsl查询条件,开始查询
|
||||
for(Long clusterPhyId : clusterPhyIds){
|
||||
try {
|
||||
queryFuture.runnableTask(
|
||||
esTPService.submitSearchTask(
|
||||
String.format("class=ClusterMetricESDAO||method=listClusterMetricsByClusterIds||ClusterPhyId=%d", clusterPhyId),
|
||||
5000,
|
||||
() -> {
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_CLUSTER_AGG_LIST_METRICS, clusterPhyId, startTime, endTime, interval, aggDsl);
|
||||
DslConstant.GET_CLUSTER_AGG_LIST_METRICS,
|
||||
clusterPhyId,
|
||||
startTime,
|
||||
endTime,
|
||||
interval,
|
||||
aggDsl
|
||||
);
|
||||
|
||||
Map<String/*metric*/, List<MetricPointVO>> metricMap = esOpClient.performRequestWithRouting(
|
||||
String.valueOf(clusterPhyId), realIndex, dsl,
|
||||
s -> handleListESQueryResponse(s, metrics, aggType), 3);
|
||||
String.valueOf(clusterPhyId),
|
||||
realIndex,
|
||||
dsl,
|
||||
s -> handleListESQueryResponse(s, metrics, aggType),
|
||||
DEFAULT_RETRY_TIME
|
||||
);
|
||||
|
||||
synchronized (table){
|
||||
for(String metric : metricMap.keySet()){
|
||||
table.put(metric, clusterPhyId, metricMap.get(metric));
|
||||
for(Map.Entry<String/*metric*/, List<MetricPointVO>> entry : metricMap.entrySet()){
|
||||
table.put(entry.getKey(), clusterPhyId, entry.getValue());
|
||||
}
|
||||
}
|
||||
});
|
||||
}catch (Exception e){
|
||||
LOGGER.error("method=listClusterMetricsByClusterIds||clusterPhyId={}||errMsg=exception!",
|
||||
clusterPhyId, e);
|
||||
LOGGER.error("method=listClusterMetricsByClusterIds||clusterPhyId={}||errMsg=exception!", clusterPhyId, e);
|
||||
}
|
||||
}
|
||||
|
||||
queryFuture.waitExecute();
|
||||
esTPService.waitExecute();
|
||||
return table;
|
||||
}
|
||||
|
||||
@@ -183,35 +188,33 @@ public class ClusterMetricESDAO extends BaseMetricESDAO {
|
||||
return metricMap;
|
||||
}
|
||||
|
||||
private Map<String, List<MetricPointVO>> handleListESQueryResponse(ESQueryResponse response, List<String> metrics, String aggType){
|
||||
Map<String, ESAggr> esAggrMap = checkBucketsAndHitsOfResponseAggs(response);
|
||||
if(null == esAggrMap){return new HashMap<>();}
|
||||
private Map<String, List<MetricPointVO>> handleListESQueryResponse(ESQueryResponse response, List<String> metricNameList, String aggType){
|
||||
Map<String, ESAggr> esAggrMap = this.checkBucketsAndHitsOfResponseAggs(response);
|
||||
if(null == esAggrMap) {
|
||||
return new HashMap<>();
|
||||
}
|
||||
|
||||
Map<String, List<MetricPointVO>> metricMap = new HashMap<>();
|
||||
for(String metric : metrics){
|
||||
for(String metricName : metricNameList) {
|
||||
List<MetricPointVO> metricPoints = new ArrayList<>();
|
||||
|
||||
esAggrMap.get(HIST).getBucketList().forEach( esBucket -> {
|
||||
try {
|
||||
if (null != esBucket.getUnusedMap().get(KEY)) {
|
||||
Long timestamp = Long.valueOf(esBucket.getUnusedMap().get(KEY).toString());
|
||||
Object value = esBucket.getAggrMap().get(metric).getUnusedMap().get(VALUE);
|
||||
if(null == value){return;}
|
||||
Object value = esBucket.getAggrMap().get(metricName).getUnusedMap().get(VALUE);
|
||||
if(null == value) {
|
||||
return;
|
||||
}
|
||||
|
||||
MetricPointVO metricPoint = new MetricPointVO();
|
||||
metricPoint.setAggType(aggType);
|
||||
metricPoint.setTimeStamp(timestamp);
|
||||
metricPoint.setValue(value.toString());
|
||||
metricPoint.setName(metric);
|
||||
|
||||
metricPoints.add(metricPoint);
|
||||
metricPoints.add(new MetricPointVO(metricName, timestamp, value.toString(), aggType));
|
||||
}
|
||||
}catch (Exception e){
|
||||
LOGGER.error("method=handleESQueryResponse||metric={}||errMsg=exception!", metric, e);
|
||||
} catch (Exception e){
|
||||
LOGGER.error("method=handleListESQueryResponse||metricName={}||errMsg=exception!", metricName, e);
|
||||
}
|
||||
} );
|
||||
|
||||
metricMap.put(metric, optimizeMetricPoints(metricPoints));
|
||||
metricMap.put(metricName, optimizeMetricPoints(metricPoints));
|
||||
}
|
||||
|
||||
return metricMap;
|
||||
|
||||
@@ -10,9 +10,8 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.TopicPartitionK
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.GroupMetricPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.AggTypeEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.FutureWaitUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.MetricsUtils;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslsConstant;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslConstant;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
@@ -23,21 +22,18 @@ import java.util.stream.Collectors;
|
||||
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.Constant.ZERO;
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.*;
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.*;
|
||||
import static com.xiaojukeji.know.streaming.km.persistence.es.template.TemplateConstant.GROUP_INDEX;
|
||||
|
||||
@Component
|
||||
public class GroupMetricESDAO extends BaseMetricESDAO {
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
super.indexName = GROUP_INDEX;
|
||||
super.indexTemplate = GROUP_TEMPLATE;
|
||||
super.indexName = GROUP_INDEX;
|
||||
checkCurrentDayIndexExist();
|
||||
BaseMetricESDAO.register(indexName, this);
|
||||
register(this);
|
||||
}
|
||||
|
||||
protected FutureWaitUtil<Void> queryFuture = FutureWaitUtil.init("GroupMetricESDAO", 4,8, 500);
|
||||
|
||||
public List<GroupMetricPO> listLatestMetricsAggByGroupTopic(Long clusterPhyId, List<GroupTopic> groupTopicList, List<String> metrics, AggTypeEnum aggType){
|
||||
Long latestTime = getLatestMetricTime();
|
||||
Long startTime = latestTime - FIVE_MIN;
|
||||
@@ -50,7 +46,7 @@ public class GroupMetricESDAO extends BaseMetricESDAO {
|
||||
|
||||
List<GroupMetricPO> groupMetricPOS = new CopyOnWriteArrayList<>();
|
||||
for(GroupTopic groupTopic : groupTopicList){
|
||||
queryFuture.runnableTask(
|
||||
esTPService.submitSearchTask(
|
||||
String.format("class=GroupMetricESDAO||method=listLatestMetricsAggByGroupTopic||ClusterPhyId=%d||groupName=%s||topicName=%s",
|
||||
clusterPhyId, groupTopic.getGroupName(), groupTopic.getTopicName()),
|
||||
5000,
|
||||
@@ -59,7 +55,7 @@ public class GroupMetricESDAO extends BaseMetricESDAO {
|
||||
String topic = groupTopic.getTopicName();
|
||||
try {
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.LIST_GROUP_LATEST_METRICS_BY_GROUP_TOPIC, clusterPhyId, group, topic,
|
||||
DslConstant.LIST_GROUP_LATEST_METRICS_BY_GROUP_TOPIC, clusterPhyId, group, topic,
|
||||
startTime, latestTime, aggDsl);
|
||||
|
||||
String routing = routing(clusterPhyId, group);
|
||||
@@ -74,7 +70,7 @@ public class GroupMetricESDAO extends BaseMetricESDAO {
|
||||
});
|
||||
}
|
||||
|
||||
queryFuture.waitExecute();
|
||||
esTPService.waitExecute();
|
||||
return groupMetricPOS;
|
||||
}
|
||||
|
||||
@@ -86,7 +82,7 @@ public class GroupMetricESDAO extends BaseMetricESDAO {
|
||||
String realIndex = realIndex(startTime, latestTime);
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.LIST_GROUP_LATEST_METRICS_OF_PARTITION, clusterPhyId, group, topic, latestTime);
|
||||
DslConstant.LIST_GROUP_LATEST_METRICS_OF_PARTITION, clusterPhyId, group, topic, latestTime);
|
||||
|
||||
List<GroupMetricPO> groupMetricPOS = esOpClient.performRequest(realIndex, dsl, GroupMetricPO.class);
|
||||
return filterMetrics(groupMetricPOS, metrics);
|
||||
@@ -101,8 +97,8 @@ public class GroupMetricESDAO extends BaseMetricESDAO {
|
||||
String matchDsl = buildTermsDsl(Arrays.asList(match));
|
||||
|
||||
String dsl = match.isEqual()
|
||||
? dslLoaderUtil.getFormatDslByFileName(DslsConstant.COUNT_GROUP_METRIC_VALUE, clusterPhyId, groupName, startTime, endTime, matchDsl)
|
||||
: dslLoaderUtil.getFormatDslByFileName(DslsConstant.COUNT_GROUP_NOT_METRIC_VALUE, clusterPhyId, groupName, startTime, endTime, matchDsl);
|
||||
? dslLoaderUtil.getFormatDslByFileName( DslConstant.COUNT_GROUP_METRIC_VALUE, clusterPhyId, groupName, startTime, endTime, matchDsl)
|
||||
: dslLoaderUtil.getFormatDslByFileName( DslConstant.COUNT_GROUP_NOT_METRIC_VALUE, clusterPhyId, groupName, startTime, endTime, matchDsl);
|
||||
|
||||
return esOpClient.performRequestWithRouting(clusterPhyId.toString() + "@" + groupName, realIndex, dsl,
|
||||
s -> handleESQueryResponseCount(s), 3);
|
||||
@@ -127,13 +123,26 @@ public class GroupMetricESDAO extends BaseMetricESDAO {
|
||||
Integer partition = tp.getPartition();
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.LIST_GROUP_METRICS, clusterId, groupName, topic, partition, startTime, endTime, interval, aggDsl);
|
||||
DslConstant.LIST_GROUP_METRICS,
|
||||
clusterId,
|
||||
groupName,
|
||||
topic,
|
||||
partition,
|
||||
startTime,
|
||||
endTime,
|
||||
interval,
|
||||
aggDsl
|
||||
);
|
||||
|
||||
Map<String/*metric*/, List<MetricPointVO>> metricMap = esOpClient.performRequest(realIndex, dsl,
|
||||
s -> handleGroupMetrics(s, aggType, metrics), 3);
|
||||
Map<String/*metric*/, List<MetricPointVO>> metricMap = esOpClient.performRequest(
|
||||
realIndex,
|
||||
dsl,
|
||||
s -> handleGroupMetrics(s, aggType, metrics),
|
||||
DEFAULT_RETRY_TIME
|
||||
);
|
||||
|
||||
for(String metric : metricMap.keySet()){
|
||||
table.put(metric, topic + "&" + partition, metricMap.get(metric));
|
||||
for(Map.Entry<String/*metric*/, List<MetricPointVO>> entry: metricMap.entrySet()){
|
||||
table.put(entry.getKey(), topic + "&" + partition, entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -148,7 +157,7 @@ public class GroupMetricESDAO extends BaseMetricESDAO {
|
||||
String realIndex = realIndex(startTime, endTime);
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_GROUP_TOPIC_PARTITION, clusterPhyId, groupName, startTime, endTime);
|
||||
DslConstant.GET_GROUP_TOPIC_PARTITION, clusterPhyId, groupName, startTime, endTime);
|
||||
|
||||
List<GroupMetricPO> groupMetricPOS = esOpClient.performRequestWithRouting(routing(clusterPhyId, groupName), realIndex, dsl, GroupMetricPO.class);
|
||||
return groupMetricPOS.stream().map(g -> new TopicPartitionKS(g.getTopic(), g.getPartitionId().intValue())).collect( Collectors.toSet());
|
||||
@@ -189,23 +198,27 @@ public class GroupMetricESDAO extends BaseMetricESDAO {
|
||||
for(String metric : metrics){
|
||||
List<MetricPointVO> metricPoints = new ArrayList<>();
|
||||
|
||||
esAggrMap.get(HIST).getBucketList().forEach( esBucket -> {
|
||||
esAggrMap.get(HIST).getBucketList().forEach(esBucket -> {
|
||||
try {
|
||||
if (null != esBucket.getUnusedMap().get(KEY)) {
|
||||
Long timestamp = Long.valueOf(esBucket.getUnusedMap().get(KEY).toString());
|
||||
Object value = esBucket.getAggrMap().get(metric).getUnusedMap().get(VALUE);
|
||||
if(value == null){return;}
|
||||
|
||||
MetricPointVO metricPoint = new MetricPointVO();
|
||||
metricPoint.setAggType(aggType);
|
||||
metricPoint.setTimeStamp(timestamp);
|
||||
metricPoint.setValue(value.toString());
|
||||
metricPoint.setName(metric);
|
||||
|
||||
metricPoints.add(metricPoint);
|
||||
if (null == esBucket.getUnusedMap().get(KEY)) {
|
||||
return;
|
||||
}
|
||||
|
||||
Long timestamp = Long.valueOf(esBucket.getUnusedMap().get(KEY).toString());
|
||||
Object value = esBucket.getAggrMap().get(metric).getUnusedMap().get(VALUE);
|
||||
if(value == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
MetricPointVO metricPoint = new MetricPointVO();
|
||||
metricPoint.setAggType(aggType);
|
||||
metricPoint.setTimeStamp(timestamp);
|
||||
metricPoint.setValue(value.toString());
|
||||
metricPoint.setName(metric);
|
||||
|
||||
metricPoints.add(metricPoint);
|
||||
}catch (Exception e){
|
||||
LOGGER.error("method=handleESQueryResponse||metric={}||errMsg=exception!", metric, e);
|
||||
LOGGER.error("method=handleGroupMetrics||metric={}||errMsg=exception!", metric, e);
|
||||
}
|
||||
} );
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.es.dao;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.PartitionMetricPO;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslsConstant;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslConstant;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.*;
|
||||
import static com.xiaojukeji.know.streaming.km.persistence.es.template.TemplateConstant.PARTITION_INDEX;
|
||||
|
||||
/**
|
||||
* @author didi
|
||||
@@ -18,10 +18,9 @@ public class PartitionMetricESDAO extends BaseMetricESDAO {
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
super.indexName = PARTITION_INDEX;
|
||||
super.indexTemplate = PARTITION_TEMPLATE;
|
||||
super.indexName = PARTITION_INDEX;
|
||||
checkCurrentDayIndexExist();
|
||||
BaseMetricESDAO.register(indexName, this);
|
||||
register(this);
|
||||
}
|
||||
|
||||
public PartitionMetricPO getPartitionLatestMetrics(Long clusterPhyId, String topic,
|
||||
@@ -31,7 +30,7 @@ public class PartitionMetricESDAO extends BaseMetricESDAO {
|
||||
Long startTime = endTime - FIVE_MIN;
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_PARTITION_LATEST_METRICS, clusterPhyId, topic, brokerId, partitionId, startTime, endTime);
|
||||
DslConstant.GET_PARTITION_LATEST_METRICS, clusterPhyId, topic, brokerId, partitionId, startTime, endTime);
|
||||
|
||||
PartitionMetricPO partitionMetricPO = esOpClient.performRequestAndTakeFirst(
|
||||
partitionId.toString(), realIndex(startTime, endTime), dsl, PartitionMetricPO.class);
|
||||
@@ -45,7 +44,7 @@ public class PartitionMetricESDAO extends BaseMetricESDAO {
|
||||
Long startTime = endTime - FIVE_MIN;
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.LIST_PARTITION_LATEST_METRICS_BY_TOPIC, clusterPhyId, topic, startTime, endTime);
|
||||
DslConstant.LIST_PARTITION_LATEST_METRICS_BY_TOPIC, clusterPhyId, topic, startTime, endTime);
|
||||
|
||||
List<PartitionMetricPO> partitionMetricPOS = esOpClient.performRequest(
|
||||
realIndex(startTime, endTime), dsl, PartitionMetricPO.class);
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.es.dao;
|
||||
|
||||
import com.didiglobal.logi.elasticsearch.client.response.query.query.ESQueryResponse;
|
||||
import com.didiglobal.logi.elasticsearch.client.response.query.query.aggs.ESAggr;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.ReplicationMetricPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslsConstant;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.VALUE;
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.*;
|
||||
|
||||
/**
|
||||
* @author didi
|
||||
*/
|
||||
@Component
|
||||
public class ReplicationMetricESDAO extends BaseMetricESDAO {
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
super.indexName = REPLICATION_INDEX;
|
||||
super.indexTemplate = REPLICATION_TEMPLATE;
|
||||
checkCurrentDayIndexExist();
|
||||
BaseMetricESDAO.register(indexName, this);
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取集群 clusterId 中 brokerId 最新的统计指标
|
||||
*/
|
||||
public ReplicationMetricPO getReplicationLatestMetrics(Long clusterPhyId, Integer brokerId, String topic,
|
||||
Integer partitionId, List<String> metricNames){
|
||||
Long endTime = getLatestMetricTime();
|
||||
Long startTime = endTime - FIVE_MIN;
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_REPLICATION_LATEST_METRICS, clusterPhyId, brokerId, topic, partitionId, startTime, endTime);
|
||||
|
||||
ReplicationMetricPO replicationMetricPO = esOpClient.performRequestAndTakeFirst(
|
||||
realIndex(startTime, endTime), dsl, ReplicationMetricPO.class);
|
||||
|
||||
return (null == replicationMetricPO) ? new ReplicationMetricPO(clusterPhyId, topic, brokerId, partitionId)
|
||||
: filterMetrics(replicationMetricPO, metricNames);
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取集群 clusterPhyId 中每个 metric 的指定 partitionId 在指定时间[startTime、endTime]区间内聚合计算(avg、max)之后的统计值
|
||||
*/
|
||||
public Map<String/*metric*/, MetricPointVO> getReplicationMetricsPoint(Long clusterPhyId, String topic,
|
||||
Integer brokerId, Integer partitionId, List<String> metrics,
|
||||
String aggType, Long startTime, Long endTime){
|
||||
//1、获取需要查下的索引
|
||||
String realIndex = realIndex(startTime, endTime);
|
||||
|
||||
//2、构造agg查询条件
|
||||
String aggDsl = buildAggsDSL(metrics, aggType);
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_REPLICATION_AGG_SINGLE_METRICS, clusterPhyId, topic, brokerId, partitionId, startTime, endTime, aggDsl);
|
||||
|
||||
return esOpClient.performRequestWithRouting(String.valueOf(brokerId), realIndex, dsl,
|
||||
s -> handleSingleESQueryResponse(s, metrics, aggType), 3);
|
||||
}
|
||||
|
||||
/**************************************************** private method ****************************************************/
|
||||
private Map<String/*metric*/, MetricPointVO> handleSingleESQueryResponse(ESQueryResponse response, List<String> metrics, String aggType){
|
||||
Map<String/*metric*/, MetricPointVO> metricMap = new HashMap<>();
|
||||
|
||||
if(null == response || null == response.getAggs()){
|
||||
return metricMap;
|
||||
}
|
||||
|
||||
Map<String, ESAggr> esAggrMap = response.getAggs().getEsAggrMap();
|
||||
if (null == esAggrMap) {
|
||||
return metricMap;
|
||||
}
|
||||
|
||||
for(String metric : metrics){
|
||||
String value = esAggrMap.get(metric).getUnusedMap().get(VALUE).toString();
|
||||
|
||||
MetricPointVO metricPoint = new MetricPointVO();
|
||||
metricPoint.setAggType(aggType);
|
||||
metricPoint.setValue(value);
|
||||
metricPoint.setName(metric);
|
||||
|
||||
metricMap.put(metric, metricPoint);
|
||||
}
|
||||
|
||||
return metricMap;
|
||||
}
|
||||
}
|
||||
@@ -4,16 +4,20 @@ import com.didiglobal.logi.elasticsearch.client.response.query.query.ESQueryResp
|
||||
import com.didiglobal.logi.elasticsearch.client.response.query.query.aggs.ESAggr;
|
||||
import com.google.common.collect.HashBasedTable;
|
||||
import com.google.common.collect.Table;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BaseMetrics;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.TopicMetrics;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchFuzzy;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchShould;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchTerm;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchSort;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.TopicMetricPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.FutureWaitUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.SortTypeEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.MetricsUtils;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.PaginationMetricsUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslsConstant;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.cache.DataBaseDataLocalCache;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslConstant;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
@@ -22,21 +26,17 @@ import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.*;
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.*;
|
||||
import static com.xiaojukeji.know.streaming.km.persistence.es.template.TemplateConstant.TOPIC_INDEX;
|
||||
|
||||
@Component
|
||||
public class TopicMetricESDAO extends BaseMetricESDAO {
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
super.indexName = TOPIC_INDEX;
|
||||
super.indexTemplate = TOPIC_TEMPLATE;
|
||||
super.indexName = TOPIC_INDEX;
|
||||
checkCurrentDayIndexExist();
|
||||
BaseMetricESDAO.register(indexName, this);
|
||||
register(this);
|
||||
}
|
||||
|
||||
protected FutureWaitUtil<Void> queryFuture = FutureWaitUtil.init("TopicMetricESDAO", 4,8, 500);
|
||||
|
||||
public List<TopicMetricPO> listTopicMaxMinMetrics(Long clusterPhyId, List<String> topics, String metric, boolean max, Long startTime, Long endTime){
|
||||
//1、获取需要查下的索引
|
||||
String realIndex = realIndex(startTime, endTime);
|
||||
@@ -47,7 +47,7 @@ public class TopicMetricESDAO extends BaseMetricESDAO {
|
||||
String sortDsl = buildSortDsl(sort, SearchSort.DEFAULT);
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_TOPIC_MAX_OR_MIN_SINGLE_METRIC, clusterPhyId, startTime, endTime, topic, sortDsl);
|
||||
DslConstant.GET_TOPIC_MAX_OR_MIN_SINGLE_METRIC, clusterPhyId, startTime, endTime, topic, sortDsl);
|
||||
TopicMetricPO topicMetricPO = esOpClient.performRequestAndTakeFirst(topic, realIndex, dsl, TopicMetricPO.class);
|
||||
ret.add(topicMetricPO);
|
||||
}
|
||||
@@ -74,7 +74,7 @@ public class TopicMetricESDAO extends BaseMetricESDAO {
|
||||
}
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_TOPIC_AGG_SINGLE_METRICS, clusterPhyId, startTime, endTime, appendQueryDsl.toString(), aggDsl);
|
||||
DslConstant.GET_TOPIC_AGG_SINGLE_METRICS, clusterPhyId, startTime, endTime, appendQueryDsl.toString(), aggDsl);
|
||||
|
||||
return esOpClient.performRequest(realIndex, dsl,
|
||||
s -> handleSingleESQueryResponse(s, metrics, aggType), 3);
|
||||
@@ -112,7 +112,7 @@ public class TopicMetricESDAO extends BaseMetricESDAO {
|
||||
String realIndex = realIndex(startTime, latestMetricTime);
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.LIST_TOPIC_WITH_LATEST_METRICS, clusterId, latestMetricTime, appendQueryDsl.toString(), sortDsl);
|
||||
DslConstant.LIST_TOPIC_WITH_LATEST_METRICS, clusterId, latestMetricTime, appendQueryDsl.toString(), sortDsl);
|
||||
|
||||
return esOpClient.performRequest(realIndex, dsl, TopicMetricPO.class);
|
||||
}
|
||||
@@ -126,11 +126,16 @@ public class TopicMetricESDAO extends BaseMetricESDAO {
|
||||
String termDsl = buildTermsDsl(Arrays.asList(term));
|
||||
|
||||
String dsl = term.isEqual()
|
||||
? dslLoaderUtil.getFormatDslByFileName(DslsConstant.COUNT_TOPIC_METRIC_VALUE, clusterPhyId, topic, startTime, endTime, termDsl)
|
||||
: dslLoaderUtil.getFormatDslByFileName(DslsConstant.COUNT_TOPIC_NOT_METRIC_VALUE, clusterPhyId, topic, startTime, endTime, termDsl);
|
||||
? dslLoaderUtil.getFormatDslByFileName( DslConstant.COUNT_TOPIC_METRIC_VALUE, clusterPhyId, topic, startTime, endTime, termDsl)
|
||||
: dslLoaderUtil.getFormatDslByFileName( DslConstant.COUNT_TOPIC_NOT_METRIC_VALUE, clusterPhyId, topic, startTime, endTime, termDsl);
|
||||
|
||||
return esOpClient.performRequestWithRouting(topic, realIndex, dsl,
|
||||
s -> handleESQueryResponseCount(s), 3);
|
||||
return esOpClient.performRequestWithRouting(
|
||||
topic,
|
||||
realIndex,
|
||||
dsl,
|
||||
s -> handleESQueryResponseCount(s),
|
||||
DEFAULT_RETRY_TIME
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -141,7 +146,7 @@ public class TopicMetricESDAO extends BaseMetricESDAO {
|
||||
Long startTime = endTime - FIVE_MIN;
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_TOPIC_BROKER_LATEST_METRICS, clusterPhyId, topic, brokerId, startTime, endTime);
|
||||
DslConstant.GET_TOPIC_BROKER_LATEST_METRICS, clusterPhyId, topic, brokerId, startTime, endTime);
|
||||
|
||||
TopicMetricPO topicMetricPO = esOpClient.performRequestAndTakeFirst(topic, realIndex(startTime, endTime), dsl, TopicMetricPO.class);
|
||||
|
||||
@@ -165,7 +170,7 @@ public class TopicMetricESDAO extends BaseMetricESDAO {
|
||||
}
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_TOPIC_LATEST_METRICS, clusterPhyId, startTime, endTime, appendQueryDsl.toString());
|
||||
DslConstant.GET_TOPIC_LATEST_METRICS, clusterPhyId, startTime, endTime, appendQueryDsl.toString());
|
||||
|
||||
//topicMetricPOS 已经按照 timeStamp 倒序排好序了
|
||||
List<TopicMetricPO> topicMetricPOS = esOpClient.performRequest(realIndex(startTime, endTime), dsl, TopicMetricPO.class);
|
||||
@@ -197,7 +202,7 @@ public class TopicMetricESDAO extends BaseMetricESDAO {
|
||||
}
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_TOPIC_LATEST_METRICS, clusterPhyId, startTime, endTime, appendQueryDsl.toString());
|
||||
DslConstant.GET_TOPIC_LATEST_METRICS, clusterPhyId, startTime, endTime, appendQueryDsl.toString());
|
||||
|
||||
TopicMetricPO topicMetricPO = esOpClient.performRequestAndTakeFirst(topic, realIndex(startTime, endTime), dsl, TopicMetricPO.class);
|
||||
|
||||
@@ -207,28 +212,55 @@ public class TopicMetricESDAO extends BaseMetricESDAO {
|
||||
/**
|
||||
* 获取每个 metric 的 topN 个 topic 的指标,如果获取不到 topN 的topics, 则默认返回 defaultTopics 的指标
|
||||
*/
|
||||
public Table<String/*metric*/, String/*topics*/, List<MetricPointVO>> listTopicMetricsByTopN(Long clusterPhyId, List<String> defaultTopics,
|
||||
List<String> metrics, String aggType, int topN,
|
||||
Long startTime, Long endTime){
|
||||
public Table<String/*metric*/, String/*topics*/, List<MetricPointVO>> listTopicMetricsByTopN(Long clusterPhyId,
|
||||
List<String> defaultTopicNameList,
|
||||
List<String> metricNameList,
|
||||
String aggType,
|
||||
int topN,
|
||||
Long startTime,
|
||||
Long endTime){
|
||||
//1、获取topN要查询的topic,每一个指标的topN的topic可能不一样
|
||||
Map<String, List<String>> metricTopics = getTopNTopics(clusterPhyId, metrics, aggType, topN, startTime, endTime);
|
||||
Map<String, List<String>> metricTopicsMap = this.getTopNTopics(clusterPhyId, metricNameList, aggType, topN, startTime, endTime);
|
||||
|
||||
Table<String, String, List<MetricPointVO>> table = HashBasedTable.create();
|
||||
//2、获取topics列表
|
||||
Set<String> topicNameSet = new HashSet<>(defaultTopicNameList);
|
||||
metricTopicsMap.values().forEach(elem -> topicNameSet.addAll(elem));
|
||||
|
||||
for(String metric : metrics){
|
||||
table.putAll(listTopicMetricsByTopics(clusterPhyId, Arrays.asList(metric),
|
||||
aggType, metricTopics.getOrDefault(metric, defaultTopics), startTime, endTime));
|
||||
//3、批量获取信息
|
||||
Table<String, String, List<MetricPointVO>> allMetricsTable = this.listTopicMetricsByTopics(
|
||||
clusterPhyId,
|
||||
metricNameList,
|
||||
aggType,
|
||||
new ArrayList<>(topicNameSet),
|
||||
startTime,
|
||||
endTime
|
||||
);
|
||||
|
||||
//4、获取Top-Metric
|
||||
Table<String, String, List<MetricPointVO>> metricsTable = HashBasedTable.create();
|
||||
for(String metricName: metricNameList) {
|
||||
for (String topicName: metricTopicsMap.getOrDefault(metricName, defaultTopicNameList)) {
|
||||
List<MetricPointVO> voList = allMetricsTable.get(metricName, topicName);
|
||||
if (voList == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
metricsTable.put(metricName, topicName, voList);
|
||||
}
|
||||
}
|
||||
|
||||
return table;
|
||||
return metricsTable;
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取每个 metric 指定个 topic 的指标
|
||||
*/
|
||||
public Table<String/*metric*/, String/*topics*/, List<MetricPointVO>> listTopicMetricsByTopics(Long clusterPhyId, List<String> metrics,
|
||||
String aggType, List<String> topics,
|
||||
Long startTime, Long endTime){
|
||||
public Table<String/*metric*/, String/*topics*/, List<MetricPointVO>> listTopicMetricsByTopics(Long clusterPhyId,
|
||||
List<String> metricNameList,
|
||||
String aggType,
|
||||
List<String> topicNameList,
|
||||
Long startTime,
|
||||
Long endTime){
|
||||
//1、获取需要查下的索引
|
||||
String realIndex = realIndex(startTime, endTime);
|
||||
|
||||
@@ -236,60 +268,77 @@ public class TopicMetricESDAO extends BaseMetricESDAO {
|
||||
String interval = MetricsUtils.getInterval(endTime - startTime);
|
||||
|
||||
//3、构造agg查询条件
|
||||
String aggDsl = buildAggsDSL(metrics, aggType);
|
||||
String aggDsl = buildAggsDSL(metricNameList, aggType);
|
||||
|
||||
final Table<String, String, List<MetricPointVO>> table = HashBasedTable.create();
|
||||
|
||||
//4、构造dsl查询条件
|
||||
for(String topic : topics){
|
||||
for(String topicName : topicNameList){
|
||||
try {
|
||||
queryFuture.runnableTask(
|
||||
String.format("class=TopicMetricESDAO||method=listTopicMetricsByTopics||ClusterPhyId=%d||topicName=%s",
|
||||
clusterPhyId, topic),
|
||||
esTPService.submitSearchTask(
|
||||
String.format("class=TopicMetricESDAO||method=listTopicMetricsByTopics||ClusterPhyId=%d||topicName=%s", clusterPhyId, topicName),
|
||||
3000,
|
||||
() -> {
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_TOPIC_AGG_LIST_METRICS, clusterPhyId, topic, startTime, endTime, interval, aggDsl);
|
||||
DslConstant.GET_TOPIC_AGG_LIST_METRICS,
|
||||
clusterPhyId,
|
||||
topicName,
|
||||
startTime,
|
||||
endTime,
|
||||
interval,
|
||||
aggDsl
|
||||
);
|
||||
|
||||
Map<String/*metric*/, List<MetricPointVO>> metricMap = esOpClient.performRequestWithRouting(topic, realIndex, dsl,
|
||||
s -> handleListESQueryResponse(s, metrics, aggType), 3);
|
||||
Map<String/*metric*/, List<MetricPointVO>> metricMap = esOpClient.performRequestWithRouting(
|
||||
topicName,
|
||||
realIndex,
|
||||
dsl,
|
||||
s -> handleListESQueryResponse(s, metricNameList, aggType),
|
||||
DEFAULT_RETRY_TIME
|
||||
);
|
||||
|
||||
synchronized (table){
|
||||
for(String metric : metricMap.keySet()){
|
||||
table.put(metric, topic, metricMap.get(metric));
|
||||
for(Map.Entry<String/*metric*/, List<MetricPointVO>> entry: metricMap.entrySet()){
|
||||
table.put(entry.getKey(), topicName, entry.getValue());
|
||||
}
|
||||
}
|
||||
});
|
||||
}catch (Exception e){
|
||||
LOGGER.error("method=listBrokerMetricsByBrokerIds||clusterPhyId={}||brokerId{}||errMsg=exception!",
|
||||
clusterPhyId, topic, e);
|
||||
LOGGER.error("method=listTopicMetricsByTopics||clusterPhyId={}||topicName={}||errMsg=exception!", clusterPhyId, topicName, e);
|
||||
}
|
||||
}
|
||||
|
||||
queryFuture.waitExecute();
|
||||
esTPService.waitExecute();
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
//public for test
|
||||
public Map<String, List<String>> getTopNTopics(Long clusterPhyId, List<String> metrics,
|
||||
String aggType, int topN,
|
||||
Long startTime, Long endTime){
|
||||
//1、获取需要查下的索引
|
||||
String realIndex = realIndex(startTime, endTime);
|
||||
public Map<String, List<String>> getTopNTopics(Long clusterPhyId,
|
||||
List<String> metricNameList,
|
||||
String aggType,
|
||||
int topN,
|
||||
Long startTime,
|
||||
Long endTime) {
|
||||
Map<String, TopicMetrics> metricsMap = DataBaseDataLocalCache.getTopicMetrics(clusterPhyId);
|
||||
if (metricsMap == null) {
|
||||
return new HashMap<>();
|
||||
}
|
||||
|
||||
//2、根据查询的时间区间大小来确定指标点的聚合区间大小
|
||||
String interval = MetricsUtils.getInterval(endTime - startTime);
|
||||
List<TopicMetrics> metricsList = new ArrayList<>(metricsMap.values());
|
||||
|
||||
//3、构造agg查询条件
|
||||
String aggDsl = buildAggsDSL(metrics, aggType);
|
||||
Map<String, List<String>> resultMap = new HashMap<>();
|
||||
for (String metricName: metricNameList) {
|
||||
metricsList = PaginationMetricsUtil.sortMetrics(
|
||||
metricsList.stream().map(elem -> (BaseMetrics)elem).collect(Collectors.toList()),
|
||||
metricName,
|
||||
"topic",
|
||||
SortTypeEnum.DESC.getSortType()
|
||||
).stream().map(elem -> (TopicMetrics)elem).collect(Collectors.toList());
|
||||
|
||||
//4、查询es
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_TOPIC_AGG_TOP_METRICS, clusterPhyId, startTime, endTime, interval, aggDsl);
|
||||
resultMap.put(metricName, metricsList.subList(0, Math.min(topN, metricsList.size())).stream().map(elem -> elem.getTopic()).collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
return esOpClient.performRequest(realIndex, dsl,
|
||||
s -> handleTopTopicESQueryResponse(s, metrics, topN), 3);
|
||||
return resultMap;
|
||||
}
|
||||
|
||||
/**************************************************** private method ****************************************************/
|
||||
@@ -327,7 +376,7 @@ public class TopicMetricESDAO extends BaseMetricESDAO {
|
||||
private Map<String, List<MetricPointVO>> handleListESQueryResponse(ESQueryResponse response, List<String> metrics, String aggType){
|
||||
Map<String, List<MetricPointVO>> metricMap = new HashMap<>();
|
||||
|
||||
Map<String, ESAggr> esAggrMap = checkBucketsAndHitsOfResponseAggs(response);
|
||||
Map<String, ESAggr> esAggrMap = this.checkBucketsAndHitsOfResponseAggs(response);
|
||||
if(null == esAggrMap){return metricMap;}
|
||||
|
||||
for(String metric : metrics){
|
||||
@@ -340,15 +389,7 @@ public class TopicMetricESDAO extends BaseMetricESDAO {
|
||||
Object value = esBucket.getAggrMap().get(metric).getUnusedMap().get(VALUE);
|
||||
if(value == null){return;}
|
||||
|
||||
MetricPointVO metricPoint = new MetricPointVO();
|
||||
metricPoint.setAggType(aggType);
|
||||
metricPoint.setTimeStamp(timestamp);
|
||||
metricPoint.setValue(value.toString());
|
||||
metricPoint.setName(metric);
|
||||
|
||||
metricPoints.add(metricPoint);
|
||||
}else {
|
||||
LOGGER.info("");
|
||||
metricPoints.add(new MetricPointVO(metric, timestamp, value.toString(), aggType));
|
||||
}
|
||||
}catch (Exception e){
|
||||
LOGGER.error("method=handleListESQueryResponse||metric={}||errMsg=exception!", metric, e);
|
||||
@@ -361,7 +402,7 @@ public class TopicMetricESDAO extends BaseMetricESDAO {
|
||||
return metricMap;
|
||||
}
|
||||
|
||||
private Map<String, List<String>> handleTopTopicESQueryResponse(ESQueryResponse response, List<String> metrics, int topN){
|
||||
private Map<String, List<String>> handleTopTopicESQueryResponse(ESQueryResponse response, List<String> metricNameList, int topN){
|
||||
Map<String, List<String>> ret = new HashMap<>();
|
||||
|
||||
Map<String, ESAggr> esAggrMap = checkBucketsAndHitsOfResponseAggs(response);
|
||||
@@ -370,57 +411,37 @@ public class TopicMetricESDAO extends BaseMetricESDAO {
|
||||
Map<String, List<Tuple<String, Double>>> metricsTopicValueMap = new HashMap<>();
|
||||
|
||||
//1、先获取每个指标对应的所有 topic 以及指标的值
|
||||
for(String metric : metrics) {
|
||||
for(String metricName: metricNameList) {
|
||||
esAggrMap.get(HIST).getBucketList().forEach( esBucket -> {
|
||||
try {
|
||||
if (null != esBucket.getUnusedMap().get(KEY)) {
|
||||
String topic = esBucket.getUnusedMap().get(KEY).toString();
|
||||
Double value = Double.valueOf(esBucket.getAggrMap().get(HIST).getBucketList().get(0).getAggrMap()
|
||||
.get(metric).getUnusedMap().get(VALUE).toString());
|
||||
Double value = Double.valueOf(
|
||||
esBucket.getAggrMap().get(HIST).getBucketList().get(0).getAggrMap().get(metricName).getUnusedMap().get(VALUE).toString()
|
||||
);
|
||||
|
||||
List<Tuple<String, Double>> brokerValue = (null == metricsTopicValueMap.get(metric)) ?
|
||||
new ArrayList<>() : metricsTopicValueMap.get(metric);
|
||||
|
||||
brokerValue.add(new Tuple<>(topic, value));
|
||||
metricsTopicValueMap.put(metric, brokerValue);
|
||||
metricsTopicValueMap.putIfAbsent(metricName, new ArrayList<>());
|
||||
metricsTopicValueMap.get(metricName).add(new Tuple<>(topic, value));
|
||||
}
|
||||
}catch (Exception e){
|
||||
LOGGER.error("method=handleTopBrokerESQueryResponse||metric={}||errMsg=exception!", metric, e);
|
||||
LOGGER.error("method=handleTopTopicESQueryResponse||metricName={}||errMsg=exception!", metricName, e);
|
||||
}
|
||||
} );
|
||||
}
|
||||
|
||||
//2、对每个指标的broker按照指标值排序,并截取前topN个brokerIds
|
||||
for(String metric : metricsTopicValueMap.keySet()){
|
||||
List<Tuple<String, Double>> brokerValue = metricsTopicValueMap.get(metric);
|
||||
for(Map.Entry<String, List<Tuple<String, Double>>> entry: metricsTopicValueMap.entrySet()){
|
||||
entry.getValue().sort((o1, o2) -> {
|
||||
if(null == o1 || null == o2) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
brokerValue.sort((o1, o2) -> {
|
||||
if(null == o1 || null == o2){return 0;}
|
||||
return o2.getV2().compareTo(o1.getV2());
|
||||
} );
|
||||
|
||||
List<Tuple<String, Double>> temp = (brokerValue.size() > topN) ? brokerValue.subList(0, topN) : brokerValue;
|
||||
List<String> topics = temp.stream().map(t -> t.getV1()).collect(Collectors.toList());
|
||||
List<String> topicNameList = entry.getValue().subList(0, Math.min(entry.getValue().size(), topN)).stream().map(t -> t.getV1()).collect(Collectors.toList());
|
||||
|
||||
ret.put(metric, topics);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
private Map<String/*metric*/, Map<String/*topic*/, List<MetricPointVO>>> topicMetricMap2MetricTopicMap(
|
||||
Map<String/*topic*/, Map<String/*metric*/, List<MetricPointVO>>> topicMetricMap){
|
||||
Map<String/*metric*/, Map<String/*topic*/, List<MetricPointVO>>> ret = new HashMap<>();
|
||||
|
||||
for(String topic : topicMetricMap.keySet()){
|
||||
Map<String/*metric*/, List<MetricPointVO>> metricMap = topicMetricMap.get(topic);
|
||||
|
||||
for(String metric : metricMap.keySet()){
|
||||
Map<String/*topic*/, List<MetricPointVO>> brokerMap = (null == ret.get(metric)) ? new HashMap<>() : ret.get(metric);
|
||||
|
||||
brokerMap.put(topic, metricMap.get(metric));
|
||||
ret.put(metric, brokerMap);
|
||||
}
|
||||
ret.put(entry.getKey(), topicNameList);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -5,7 +5,7 @@ import com.didiglobal.logi.elasticsearch.client.response.query.query.aggs.ESAggr
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.ESConstant;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.MetricsUtils;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslsConstant;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslConstant;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
@@ -15,18 +15,16 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.*;
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.ZOOKEEPER_INDEX;
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.ZOOKEEPER_TEMPLATE;
|
||||
import static com.xiaojukeji.know.streaming.km.persistence.es.template.TemplateConstant.ZOOKEEPER_INDEX;
|
||||
|
||||
@Component
|
||||
public class ZookeeperMetricESDAO extends BaseMetricESDAO {
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
super.indexName = ZOOKEEPER_INDEX;
|
||||
super.indexTemplate = ZOOKEEPER_TEMPLATE;
|
||||
super.indexName = ZOOKEEPER_INDEX;
|
||||
checkCurrentDayIndexExist();
|
||||
BaseMetricESDAO.register(indexName, this);
|
||||
register(this);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -49,7 +47,7 @@ public class ZookeeperMetricESDAO extends BaseMetricESDAO {
|
||||
//4、构造dsl查询条件,开始查询
|
||||
try {
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslsConstant.GET_ZOOKEEPER_AGG_LIST_METRICS, clusterPhyId, startTime, endTime, interval, aggDsl);
|
||||
DslConstant.GET_ZOOKEEPER_AGG_LIST_METRICS, clusterPhyId, startTime, endTime, interval, aggDsl);
|
||||
|
||||
return esOpClient.performRequestWithRouting(
|
||||
String.valueOf(clusterPhyId),
|
||||
@@ -59,7 +57,7 @@ public class ZookeeperMetricESDAO extends BaseMetricESDAO {
|
||||
ESConstant.DEFAULT_RETRY_TIME
|
||||
);
|
||||
} catch (Exception e){
|
||||
LOGGER.error("class=ZookeeperMetricESDAO||method=listMetricsByClusterPhyId||clusterPhyId={}||errMsg=exception!",
|
||||
LOGGER.error("method=listMetricsByClusterPhyId||clusterPhyId={}||errMsg=exception!",
|
||||
clusterPhyId, e
|
||||
);
|
||||
}
|
||||
|
||||
@@ -0,0 +1,316 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.es.dao.connect;
|
||||
|
||||
import com.alibaba.druid.util.StringUtils;
|
||||
import com.didiglobal.logi.elasticsearch.client.response.query.query.ESQueryResponse;
|
||||
import com.didiglobal.logi.elasticsearch.client.response.query.query.aggs.ESAggr;
|
||||
import com.google.common.collect.HashBasedTable;
|
||||
import com.google.common.collect.Table;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.search.SearchTerm;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.connect.ConnectorMetricPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.MetricsUtils;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.Triple;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dao.BaseMetricESDAO;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslConstant;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.*;
|
||||
|
||||
public class BaseConnectorMetricESDAO extends BaseMetricESDAO {
|
||||
|
||||
/**
|
||||
* 获取每个 metric 的 topN 个 connector 的指标,如果获取不到 topN 的 connectors, 则默认返回 defaultConnectorList 的指标
|
||||
*/
|
||||
public Table<String/*metric*/, Tuple<Long, String>, List<MetricPointVO>> listMetricsByTopN(Long clusterPhyId,
|
||||
List<Tuple<Long, String>> defaultConnectorList,
|
||||
List<String> metricNameList,
|
||||
String aggType,
|
||||
int topN,
|
||||
Long startTime,
|
||||
Long endTime) {
|
||||
//1、获取topN要查询的topic,每一个指标的topN的topic可能不一样
|
||||
Map<String, List<Tuple<Long, String>>> metricsMap = this.getTopNConnectors(clusterPhyId, metricNameList, aggType, topN, startTime, endTime);
|
||||
|
||||
//2、获取connector列表
|
||||
Set<Tuple<Long, String>> connectorSet = new HashSet<>(defaultConnectorList);
|
||||
metricsMap.values().forEach(elem -> connectorSet.addAll(elem));
|
||||
|
||||
//3、批量获取信息
|
||||
Table<String, Tuple<Long, String>, List<MetricPointVO>> allMetricsTable = this.listMetricsByConnectors(
|
||||
clusterPhyId,
|
||||
metricNameList,
|
||||
aggType,
|
||||
new ArrayList<>(connectorSet),
|
||||
startTime,
|
||||
endTime
|
||||
);
|
||||
|
||||
//4、获取Top-Metric
|
||||
Table<String, Tuple<Long, String>, List<MetricPointVO>> metricTable = HashBasedTable.create();
|
||||
for (String metricName: metricNameList) {
|
||||
for (Tuple<Long, String> connector: metricsMap.getOrDefault(metricName, defaultConnectorList)) {
|
||||
List<MetricPointVO> voList = allMetricsTable.get(metricName, connector);
|
||||
if (voList == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
metricTable.put(metricName, connector, voList);
|
||||
}
|
||||
}
|
||||
|
||||
// 返回结果
|
||||
return metricTable;
|
||||
}
|
||||
|
||||
public List<ConnectorMetricPO> getConnectorLatestMetric(Long clusterPhyId, List<Tuple<Long, String>> connectClusterIdAndConnectorNameList, List<String> metricsNames){
|
||||
List<ConnectorMetricPO> connectorMetricPOS = new CopyOnWriteArrayList<>();
|
||||
|
||||
for(Tuple<Long, String> connectClusterIdAndConnectorName : connectClusterIdAndConnectorNameList){
|
||||
esTPService.submitSearchTask(
|
||||
"getConnectorLatestMetric",
|
||||
30000,
|
||||
() -> {
|
||||
ConnectorMetricPO connectorMetricPO = this.getConnectorLatestMetric(clusterPhyId, connectClusterIdAndConnectorName.getV1(), connectClusterIdAndConnectorName.getV2(), metricsNames);
|
||||
connectorMetricPOS.add(connectorMetricPO);
|
||||
});
|
||||
}
|
||||
|
||||
esTPService.waitExecute();
|
||||
return connectorMetricPOS;
|
||||
}
|
||||
|
||||
public ConnectorMetricPO getConnectorLatestMetric(Long clusterPhyId, Long connectClusterId, String connectorName, List<String> metricsNames){
|
||||
Long endTime = getLatestMetricTime();
|
||||
Long startTime = endTime - FIVE_MIN;
|
||||
|
||||
SearchTerm searchClusterIdTerm = new SearchTerm("connectClusterId", connectClusterId.toString());
|
||||
searchClusterIdTerm.setField(true);
|
||||
|
||||
SearchTerm searchClusterNameTerm = new SearchTerm("connectorName", connectorName);
|
||||
searchClusterNameTerm.setField(true);
|
||||
|
||||
String termDsl = buildTermsDsl(Arrays.asList(searchClusterIdTerm, searchClusterNameTerm));
|
||||
StringBuilder appendQueryDsl = new StringBuilder();
|
||||
if(!StringUtils.isEmpty(termDsl)){
|
||||
appendQueryDsl.append(",").append(termDsl);
|
||||
}
|
||||
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslConstant.GET_CONNECTOR_LATEST_METRICS, connectClusterId, connectorName, startTime, endTime, appendQueryDsl.toString());
|
||||
|
||||
ConnectorMetricPO connectorMetricPO = esOpClient.performRequestAndTakeFirst(
|
||||
connectClusterId.toString(), realIndex(startTime, endTime), dsl, ConnectorMetricPO.class);
|
||||
|
||||
return (null == connectorMetricPO) ? new ConnectorMetricPO(clusterPhyId, connectClusterId, connectorName)
|
||||
: filterMetrics(connectorMetricPO, metricsNames);
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取每个 metric 指定个 topic 的指标
|
||||
*/
|
||||
public Table<String/*metric*/, Tuple<Long, String>, List<MetricPointVO>> listMetricsByConnectors(Long clusterPhyId,
|
||||
List<String> metricNameList,
|
||||
String aggType,
|
||||
List<Tuple<Long, String>> connectorList,
|
||||
Long startTime,
|
||||
Long endTime) {
|
||||
//1、获取需要查下的索引
|
||||
String realIndex = realIndex(startTime, endTime);
|
||||
|
||||
//2、根据查询的时间区间大小来确定指标点的聚合区间大小
|
||||
String interval = MetricsUtils.getInterval(endTime - startTime);
|
||||
|
||||
//3、构造agg查询条件
|
||||
String aggDsl = buildAggsDSL(metricNameList, aggType);
|
||||
|
||||
final Table<String, Tuple<Long, String>, List<MetricPointVO>> table = HashBasedTable.create();
|
||||
|
||||
//4、构造dsl查询条件
|
||||
for(Tuple<Long, String> connector : connectorList) {
|
||||
try {
|
||||
esTPService.submitSearchTask(
|
||||
String.format("class=BaseConnectorMetricESDAO||method=listMetricsByConnectors||ClusterPhyId=%d||connectorName=%s", clusterPhyId, connector.getV2()),
|
||||
3000,
|
||||
() -> {
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslConstant.GET_CONNECTOR_AGG_LIST_METRICS,
|
||||
clusterPhyId,
|
||||
connector.getV1(),
|
||||
connector.getV2(),
|
||||
startTime,
|
||||
endTime,
|
||||
interval,
|
||||
aggDsl
|
||||
);
|
||||
|
||||
Map<String/*metric*/, List<MetricPointVO>> metricMap = esOpClient.performRequestWithRouting(
|
||||
connector.getV1().toString(),
|
||||
realIndex,
|
||||
dsl,
|
||||
s -> handleListESQueryResponse(s, metricNameList, aggType),
|
||||
3
|
||||
);
|
||||
|
||||
synchronized (table) {
|
||||
for(Map.Entry<String/*metricName*/, List<MetricPointVO>> entry: metricMap.entrySet()){
|
||||
table.put(entry.getKey(), connector, entry.getValue());
|
||||
}
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(
|
||||
"method=listMetricsByConnectors||clusterPhyId={}||connectClusterId={}||connectorName{}||errMsg=exception!",
|
||||
clusterPhyId, connector.getV1(), connector.getV2(), e
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
esTPService.waitExecute();
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
//public for test
|
||||
public Map<String, List<Tuple<Long, String>>> getTopNConnectors(Long clusterPhyId,
|
||||
List<String> metricNameList,
|
||||
String aggType,
|
||||
int topN,
|
||||
Long startTime,
|
||||
Long endTime) {
|
||||
//1、获取需要查下的索引
|
||||
String realIndex = realIndex(startTime, endTime);
|
||||
|
||||
//2、根据查询的时间区间大小来确定指标点的聚合区间大小
|
||||
String interval = MetricsUtils.getInterval(endTime - startTime);
|
||||
|
||||
//3、构造agg查询条件
|
||||
String aggDsl = buildAggsDSL(metricNameList, aggType);
|
||||
|
||||
//4、查询es
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslConstant.GET_CONNECTOR_AGG_TOP_METRICS,
|
||||
clusterPhyId,
|
||||
startTime,
|
||||
endTime,
|
||||
interval,
|
||||
aggDsl
|
||||
);
|
||||
|
||||
return esOpClient.performRequest(
|
||||
realIndex,
|
||||
dsl,
|
||||
s -> handleTopConnectorESQueryResponse(s, metricNameList, topN),
|
||||
3
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
/**************************************************** private method ****************************************************/
|
||||
|
||||
|
||||
private Map<String, List<MetricPointVO>> handleListESQueryResponse(ESQueryResponse response, List<String> metrics, String aggType){
|
||||
Map<String, List<MetricPointVO>> metricMap = new HashMap<>();
|
||||
|
||||
Map<String, ESAggr> esAggrMap = this.checkBucketsAndHitsOfResponseAggs(response);
|
||||
if(null == esAggrMap) {
|
||||
return metricMap;
|
||||
}
|
||||
|
||||
for(String metric : metrics) {
|
||||
List<MetricPointVO> metricPoints = new ArrayList<>();
|
||||
|
||||
esAggrMap.get(HIST).getBucketList().forEach( esBucket -> {
|
||||
try {
|
||||
if (null != esBucket.getUnusedMap().get(KEY)) {
|
||||
Long timestamp = Long.valueOf(esBucket.getUnusedMap().get(KEY).toString());
|
||||
Object value = esBucket.getAggrMap().get(metric).getUnusedMap().get(VALUE);
|
||||
if(value == null){
|
||||
return;
|
||||
}
|
||||
|
||||
metricPoints.add(new MetricPointVO(metric, timestamp, value.toString(), aggType));
|
||||
}
|
||||
}catch (Exception e){
|
||||
LOGGER.error("method=handleListESQueryResponse||metric={}||errMsg=exception!", metric, e);
|
||||
}
|
||||
} );
|
||||
|
||||
metricMap.put(metric, optimizeMetricPoints(metricPoints));
|
||||
}
|
||||
|
||||
return metricMap;
|
||||
}
|
||||
|
||||
private Map<String, List<Tuple<Long, String>>> handleTopConnectorESQueryResponse(ESQueryResponse response,
|
||||
List<String> metricNameList,
|
||||
int topN) {
|
||||
Map<String, List<Tuple<Long, String>>> ret = new HashMap<>();
|
||||
|
||||
Map<String, ESAggr> esAggrMap = this.checkBucketsAndHitsOfResponseAggs(response);
|
||||
if(null == esAggrMap) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
Map<String, List<Triple<Long, String, Double>>> metricValueMap = new HashMap<>();
|
||||
|
||||
// 1、先获取每个指标对应的所有 connector 以及指标的值
|
||||
for(String metricName : metricNameList) {
|
||||
esAggrMap.get(HIST).getBucketList().forEach(esBucket -> {
|
||||
try {
|
||||
if (null != esBucket.getUnusedMap().get(KEY)) {
|
||||
String connectorNameAndClusterId = esBucket.getUnusedMap().get(KEY).toString();
|
||||
Object value = esBucket.getAggrMap().get(HIST).getBucketList().get(0).getAggrMap().get(metricName).getUnusedMap().get(VALUE);
|
||||
if (value == null) {
|
||||
return;
|
||||
}
|
||||
Double metricValue = Double.valueOf(value.toString());
|
||||
|
||||
Tuple<String, Long> tuple = splitConnectorNameAndClusterId(connectorNameAndClusterId);
|
||||
if (null == tuple) {
|
||||
return;
|
||||
}
|
||||
|
||||
metricValueMap.putIfAbsent(metricName, new ArrayList<>());
|
||||
metricValueMap.get(metricName).add(new Triple<>(tuple.getV2(), tuple.getV1(), metricValue));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("method=handleTopConnectorESQueryResponse||metricName={}||errMsg=exception!", metricName, e);
|
||||
}
|
||||
} );
|
||||
}
|
||||
|
||||
//2、对每个指标的connector按照指标值排序,并截取前topN个connectors
|
||||
for(Map.Entry<String, List<Triple<Long, String, Double>>> entry : metricValueMap.entrySet()){
|
||||
entry.getValue().sort((o1, o2) -> {
|
||||
if(null == o1 || null == o2) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return o2.v3().compareTo(o1.v3());
|
||||
} );
|
||||
|
||||
List<Triple<Long, String, Double>> temp = (entry.getValue().size() > topN) ? entry.getValue().subList(0, topN) : entry.getValue();
|
||||
|
||||
List<Tuple<Long, String>> connectorList = new ArrayList<>();
|
||||
for (Triple<Long, String, Double> triple: temp) {
|
||||
connectorList.add(new Tuple<>(triple.v1(), triple.v2()));
|
||||
}
|
||||
|
||||
ret.put(entry.getKey(), connectorList);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
private Tuple<String, Long> splitConnectorNameAndClusterId(String connectorNameAndClusterId){
|
||||
String[] ss = connectorNameAndClusterId.split("#");
|
||||
if(null == ss || ss.length != 2) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return new Tuple<>(ss[0], Long.valueOf(ss[1]));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,253 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.es.dao.connect;
|
||||
|
||||
import com.didiglobal.logi.elasticsearch.client.response.query.query.ESQueryResponse;
|
||||
import com.didiglobal.logi.elasticsearch.client.response.query.query.aggs.ESAggr;
|
||||
import com.google.common.collect.HashBasedTable;
|
||||
import com.google.common.collect.Table;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.MetricsUtils;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dao.BaseMetricESDAO;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dsls.DslConstant;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.*;
|
||||
import static com.xiaojukeji.know.streaming.km.persistence.es.template.TemplateConstant.CONNECT_CLUSTER_INDEX;
|
||||
|
||||
@Component
|
||||
public class ConnectClusterMetricESDAO extends BaseMetricESDAO {
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
super.indexName = CONNECT_CLUSTER_INDEX;
|
||||
checkCurrentDayIndexExist();
|
||||
register( this);
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取集群 clusterPhyId 中每个 metric 的 topN 的 connectCluster 在指定时间[startTime、endTime]区间内所有的指标
|
||||
* topN 按照[startTime, endTime] 时间段内最后一个值来排序
|
||||
*/
|
||||
public Table<String/*metric*/, Long/*connectClusterId*/, List<MetricPointVO>> listMetricsByTop(Long clusterPhyId,
|
||||
List<Long> connectClusterIdList,
|
||||
List<String> metricNameList,
|
||||
String aggType,
|
||||
int topN,
|
||||
Long startTime,
|
||||
Long endTime){
|
||||
// 1、获取TopN
|
||||
Map<String, List<Long>> topNConnectClusterIds = getTopNConnectClusterIds(clusterPhyId, metricNameList, aggType, topN, startTime, endTime);
|
||||
|
||||
Table<String, Long, List<MetricPointVO>> table = HashBasedTable.create();
|
||||
|
||||
// 2、查询指标
|
||||
for(String metric : metricNameList) {
|
||||
table.putAll(
|
||||
this.listMetricsByConnectClusterIdList(
|
||||
clusterPhyId,
|
||||
Arrays.asList(metric),
|
||||
aggType,
|
||||
topNConnectClusterIds.getOrDefault(metric, connectClusterIdList),
|
||||
startTime,
|
||||
endTime
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取集群 clusterPhyId 中每个 metric 的指定 connectClusters 在指定时间[startTime、endTime]区间内所有的指标
|
||||
*/
|
||||
public Table<String/*metric*/, Long/*connectClusterId*/, List<MetricPointVO>> listMetricsByConnectClusterIdList(Long clusterPhyId,
|
||||
List<String> metricNameList,
|
||||
String aggType,
|
||||
List<Long> connectClusterIdList,
|
||||
Long startTime,
|
||||
Long endTime){
|
||||
//1、获取需要查下的索引
|
||||
String realIndex = realIndex(startTime, endTime);
|
||||
|
||||
//2、根据查询的时间区间大小来确定指标点的聚合区间大小
|
||||
String interval = MetricsUtils.getInterval(endTime - startTime);
|
||||
|
||||
//3、构造agg查询条件
|
||||
String aggDsl = buildAggsDSL(metricNameList, aggType);
|
||||
|
||||
final Table<String, Long, List<MetricPointVO>> table = HashBasedTable.create();
|
||||
|
||||
//4、构造dsl查询条件
|
||||
for(Long connectClusterId : connectClusterIdList){
|
||||
try {
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslConstant.GET_CONNECT_CLUSTER_AGG_LIST_METRICS,
|
||||
clusterPhyId,
|
||||
connectClusterId,
|
||||
startTime,
|
||||
endTime,
|
||||
interval,
|
||||
aggDsl
|
||||
);
|
||||
|
||||
esTPService.submitSearchTask(
|
||||
String.format("class=ConnectClusterMetricESDAO||method=listMetricsByConnectClusterIdList||ClusterPhyId=%d", clusterPhyId),
|
||||
5000,
|
||||
() -> {
|
||||
Map<String, List<MetricPointVO>> metricMap = esOpClient.performRequestWithRouting(
|
||||
String.valueOf(connectClusterId),
|
||||
realIndex,
|
||||
dsl,
|
||||
s -> handleListESQueryResponse(s, metricNameList, aggType),
|
||||
DEFAULT_RETRY_TIME
|
||||
);
|
||||
|
||||
synchronized (table) {
|
||||
for(Map.Entry<String, List<MetricPointVO>> entry : metricMap.entrySet()){
|
||||
table.put(entry.getKey(), connectClusterId, entry.getValue());
|
||||
}
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(
|
||||
"method=listMetricsByConnectClusterIdList||clusterPhyId={}||connectClusterId={}||errMsg=exception!",
|
||||
clusterPhyId, connectClusterId, e
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
esTPService.waitExecute();
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取集群 clusterPhyId 中每个 metric 的 topN 的 broker
|
||||
*/
|
||||
//public for test
|
||||
public Map<String, List<Long>> getTopNConnectClusterIds(Long clusterPhyId,
|
||||
List<String> metricNameList,
|
||||
String aggType,
|
||||
int topN,
|
||||
Long startTime,
|
||||
Long endTime){
|
||||
//1、获取需要查下的索引
|
||||
String realIndex = realIndex(startTime, endTime);
|
||||
|
||||
//2、根据查询的时间区间大小来确定指标点的聚合区间大小
|
||||
String interval = MetricsUtils.getInterval(endTime - startTime);
|
||||
|
||||
//3、构造agg查询条件
|
||||
String aggDsl = buildAggsDSL(metricNameList, aggType);
|
||||
|
||||
//4、查询es
|
||||
String dsl = dslLoaderUtil.getFormatDslByFileName(
|
||||
DslConstant.GET_CONNECT_CLUSTER_AGG_TOP_METRICS,
|
||||
clusterPhyId,
|
||||
startTime,
|
||||
endTime,
|
||||
interval,
|
||||
aggDsl
|
||||
);
|
||||
|
||||
return esOpClient.performRequest(
|
||||
realIndex,
|
||||
dsl,
|
||||
s -> handleTopConnectClusterESQueryResponse(s, metricNameList, topN),
|
||||
3
|
||||
);
|
||||
}
|
||||
|
||||
/**************************************************** private method ****************************************************/
|
||||
|
||||
private Map<String, List<MetricPointVO>> handleListESQueryResponse(ESQueryResponse response, List<String> metricNameList, String aggType){
|
||||
Map<String, List<MetricPointVO>> metricMap = new HashMap<>();
|
||||
|
||||
Map<String, ESAggr> esAggrMap = this.checkBucketsAndHitsOfResponseAggs(response);
|
||||
if(null == esAggrMap) {
|
||||
return metricMap;
|
||||
}
|
||||
|
||||
for(String metricName : metricNameList){
|
||||
List<MetricPointVO> metricPoints = new ArrayList<>();
|
||||
|
||||
esAggrMap.get(HIST).getBucketList().forEach(esBucket -> {
|
||||
try {
|
||||
if (null != esBucket.getUnusedMap().get(KEY)) {
|
||||
Long timestamp = Long.valueOf(esBucket.getUnusedMap().get(KEY).toString());
|
||||
Object value = esBucket.getAggrMap().get(metricName).getUnusedMap().get(VALUE);
|
||||
if(null == value) {
|
||||
return;
|
||||
}
|
||||
|
||||
metricPoints.add(new MetricPointVO(metricName, timestamp, value.toString(), aggType));
|
||||
}
|
||||
}catch (Exception e){
|
||||
LOGGER.error("method=handleListESQueryResponse||metricName={}||errMsg=exception!", metricName, e);
|
||||
}
|
||||
} );
|
||||
|
||||
metricMap.put(metricName, optimizeMetricPoints(metricPoints));
|
||||
}
|
||||
|
||||
return metricMap;
|
||||
}
|
||||
|
||||
private Map<String, List<Long>> handleTopConnectClusterESQueryResponse(ESQueryResponse response, List<String> metricNameList, int topN){
|
||||
Map<String, List<Long>> ret = new HashMap<>();
|
||||
|
||||
Map<String, ESAggr> esAggrMap = this.checkBucketsAndHitsOfResponseAggs(response);
|
||||
if(null == esAggrMap) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
Map<String, List<Tuple<Long, Double>>> metricConnectClusterValueMap = new HashMap<>();
|
||||
|
||||
//1、先获取每个指标对应的所有brokerIds以及指标的值
|
||||
for(String metricName : metricNameList) {
|
||||
esAggrMap.get(HIST).getBucketList().forEach(esBucket -> {
|
||||
try {
|
||||
if (null == esBucket.getUnusedMap().get(KEY)) {
|
||||
return;
|
||||
}
|
||||
|
||||
Long connectorClusterId = Long.valueOf(esBucket.getUnusedMap().get(KEY).toString());
|
||||
Object value = esBucket.getAggrMap().get(HIST).getBucketList().get(0).getAggrMap().get(metricName).getUnusedMap().get(VALUE);
|
||||
if(null == value) {
|
||||
return;
|
||||
}
|
||||
|
||||
metricConnectClusterValueMap.putIfAbsent(metricName, new ArrayList<>());
|
||||
metricConnectClusterValueMap.get(metricName).add(new Tuple<>(connectorClusterId, Double.valueOf(value.toString())));
|
||||
}catch (Exception e){
|
||||
LOGGER.error("method=handleTopConnectClusterESQueryResponse||metricName={}||errMsg=exception!", metricName, e);
|
||||
}
|
||||
} );
|
||||
}
|
||||
|
||||
//2、对每个指标的connect按照指标值排序,并截取前topN个connectIds
|
||||
for(Map.Entry<String, List<Tuple<Long, Double>>> entry : metricConnectClusterValueMap.entrySet()){
|
||||
|
||||
entry.getValue().sort((o1, o2) -> {
|
||||
if(null == o1 || null == o2) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return o2.getV2().compareTo(o1.getV2());
|
||||
});
|
||||
|
||||
List<Long> connectorClusterIdList = entry.getValue()
|
||||
.subList(0, Math.min(entry.getValue().size(), topN))
|
||||
.stream()
|
||||
.map(t -> t.getV1())
|
||||
.collect(Collectors.toList());
|
||||
|
||||
ret.put(entry.getKey(), connectorClusterIdList);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.connector;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.BaseConnectorMetricESDAO;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
|
||||
import static com.xiaojukeji.know.streaming.km.persistence.es.template.TemplateConstant.CONNECT_CONNECTOR_INDEX;
|
||||
|
||||
@Component
|
||||
public class ConnectorMetricESDAO extends BaseConnectorMetricESDAO {
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
super.indexName = CONNECT_CONNECTOR_INDEX;
|
||||
checkCurrentDayIndexExist();
|
||||
register( this);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.mm2;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.BaseConnectorMetricESDAO;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import static com.xiaojukeji.know.streaming.km.persistence.es.template.TemplateConstant.CONNECT_MM2_INDEX;
|
||||
|
||||
@Component
|
||||
public class MirrorMakerMetricESDAO extends BaseConnectorMetricESDAO {
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
super.indexName = CONNECT_MM2_INDEX;
|
||||
checkCurrentDayIndexExist();
|
||||
register( this);
|
||||
}
|
||||
}
|
||||
@@ -13,9 +13,9 @@ package com.xiaojukeji.know.streaming.km.persistence.es.dsls;
|
||||
* 在dslFiles目录下新建以类名为名称的文件夹,以方法名为名称的文件名
|
||||
*
|
||||
*/
|
||||
public class DslsConstant {
|
||||
public class DslConstant {
|
||||
|
||||
private DslsConstant() {}
|
||||
private DslConstant() {}
|
||||
|
||||
/**************************************************** Base ****************************************************/
|
||||
public static final String GET_LATEST_METRIC_TIME = "BaseMetricESDAO/getLatestMetricTime";
|
||||
@@ -62,11 +62,6 @@ public class DslsConstant {
|
||||
|
||||
public static final String LIST_PARTITION_LATEST_METRICS_BY_TOPIC = "PartitionMetricESDAO/listPartitionLatestMetricsByTopic";
|
||||
|
||||
/**************************************************** REPLICATION ****************************************************/
|
||||
public static final String GET_REPLICATION_AGG_SINGLE_METRICS = "ReplicationMetricESDAO/getAggSingleReplicationMetrics";
|
||||
|
||||
public static final String GET_REPLICATION_LATEST_METRICS = "ReplicationMetricESDAO/getReplicationLatestMetrics";
|
||||
|
||||
/**************************************************** Group ****************************************************/
|
||||
public static final String GET_GROUP_TOPIC_PARTITION = "GroupMetricESDAO/getTopicPartitionOfGroup";
|
||||
|
||||
@@ -82,4 +77,16 @@ public class DslsConstant {
|
||||
|
||||
/**************************************************** Zookeeper ****************************************************/
|
||||
public static final String GET_ZOOKEEPER_AGG_LIST_METRICS = "ZookeeperMetricESDAO/getAggListZookeeperMetrics";
|
||||
|
||||
/**************************************************** Connect-Cluster ****************************************************/
|
||||
public static final String GET_CONNECT_CLUSTER_AGG_LIST_METRICS = "ConnectClusterMetricESDAO/getAggListConnectClusterMetrics";
|
||||
|
||||
public static final String GET_CONNECT_CLUSTER_AGG_TOP_METRICS = "ConnectClusterMetricESDAO/getAggTopMetricsConnectClusters";
|
||||
|
||||
/**************************************************** Connect-Connector ****************************************************/
|
||||
public static final String GET_CONNECTOR_LATEST_METRICS = "ConnectorMetricESDAO/getConnectorLatestMetric";
|
||||
|
||||
public static final String GET_CONNECTOR_AGG_LIST_METRICS = "ConnectorMetricESDAO/getConnectorAggListMetric";
|
||||
|
||||
public static final String GET_CONNECTOR_AGG_TOP_METRICS = "ConnectorMetricESDAO/getConnectorAggTopMetric";
|
||||
}
|
||||
@@ -1,26 +1,14 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.es.dsls;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.alibaba.fastjson.JSONObject;
|
||||
import com.alibaba.fastjson.parser.DefaultJSONParser;
|
||||
import com.alibaba.fastjson.parser.Feature;
|
||||
import com.alibaba.fastjson.parser.ParserConfig;
|
||||
import com.alibaba.fastjson.serializer.SerializerFeature;
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.EnvUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.LoggerUtil;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.ESFileLoader;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
@@ -33,50 +21,25 @@ import java.util.Map;
|
||||
*
|
||||
*/
|
||||
@Component
|
||||
public class DslLoaderUtil {
|
||||
private static final ILog LOGGER = LogFactory.getLog("ES_LOGGER");
|
||||
public class DslLoaderUtil extends ESFileLoader {
|
||||
private static final ILog LOGGER = LoggerUtil.getESLogger();
|
||||
|
||||
private static final String FILE_PATH = "es/dsl/";
|
||||
|
||||
/**
|
||||
* 查询语句容器
|
||||
* key : fileRelativePath
|
||||
* value : dslContent
|
||||
*/
|
||||
private Map<String/*fileRelativePath*/, String/*dslContent*/> dslsMap = Maps.newHashMap();
|
||||
private Map<String, String> dslsMap = Maps.newHashMap();
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
LOGGER.info("class=DslLoaderUtil||method=init||DslLoaderUtil init start.");
|
||||
List<String> dslFileNames = Lists.newLinkedList();
|
||||
|
||||
// 反射获取接口中定义的变量中的值
|
||||
Field[] fields = DslsConstant.class.getDeclaredFields();
|
||||
for (int i = 0; i < fields.length; ++i) {
|
||||
fields[i].setAccessible(true);
|
||||
try {
|
||||
dslFileNames.add(fields[i].get(null).toString());
|
||||
} catch (IllegalAccessException e) {
|
||||
LOGGER.error("class=DslLoaderUtil||method=init||errMsg=fail to read {} error. ", fields[i].getName(),
|
||||
e);
|
||||
}
|
||||
}
|
||||
|
||||
// 加载dsl文件及内容
|
||||
for (String fileName : dslFileNames) {
|
||||
dslsMap.put(fileName, readDslFileInJarFile(fileName));
|
||||
}
|
||||
|
||||
// 输出加载的查询语句
|
||||
LOGGER.info("class=DslLoaderUtil||method=init||msg=dsl files count {}", dslsMap.size());
|
||||
for (Map.Entry<String/*fileRelativePath*/, String/*dslContent*/> entry : dslsMap.entrySet()) {
|
||||
LOGGER.info("class=DslLoaderUtil||method=init||msg=file name {}, dsl content {}", entry.getKey(),
|
||||
entry.getValue());
|
||||
}
|
||||
|
||||
LOGGER.info("class=DslLoaderUtil||method=init||DslLoaderUtil init finished.");
|
||||
dslsMap.putAll(loaderFileContext(FILE_PATH, DslConstant.class.getDeclaredFields()));
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取查询语句
|
||||
*
|
||||
* @param fileName
|
||||
* @return
|
||||
*/
|
||||
public String getDslByFileName(String fileName) {
|
||||
return dslsMap.get(fileName);
|
||||
@@ -84,16 +47,12 @@ public class DslLoaderUtil {
|
||||
|
||||
/**
|
||||
* 获取格式化的查询语句
|
||||
*
|
||||
* @param fileName
|
||||
* @param args
|
||||
* @return
|
||||
*/
|
||||
public String getFormatDslByFileName(String fileName, Object... args) {
|
||||
String loadDslContent = getDslByFileName(fileName);
|
||||
|
||||
if (StringUtils.isBlank(loadDslContent)) {
|
||||
LOGGER.error("class=DslLoaderUtil||method=getFormatDslByFileName||errMsg=dsl file {} content is empty",
|
||||
LOGGER.error("method=getFormatDslByFileName||errMsg=dsl file {} content is empty",
|
||||
fileName);
|
||||
return "";
|
||||
}
|
||||
@@ -102,133 +61,9 @@ public class DslLoaderUtil {
|
||||
String dsl = trimJsonBank( String.format(loadDslContent, args));
|
||||
// 如果不是线上环境,则输出dsl语句
|
||||
if (!EnvUtil.isOnline()) {
|
||||
LOGGER.info("class=DslLoaderUtil||method=getFormatDslByFileName||dsl={}", dsl);
|
||||
LOGGER.info("method=getFormatDslByFileName||dsl={}", dsl);
|
||||
}
|
||||
|
||||
return dsl;
|
||||
}
|
||||
|
||||
public String getFormatDslForCatIndexByCondition(String fileName, String boolMustDsl, Object... args) {
|
||||
String formatDslByFileName = getFormatDslByFileName(fileName, args);
|
||||
|
||||
return formatDslByFileName.replace("\"boolMustDsl\"", boolMustDsl);
|
||||
}
|
||||
|
||||
public String getFormatDslByFileNameByAggParam(String fileName, String clusterPhyMetrics, String interval,
|
||||
String aggType, Object... args) {
|
||||
String formatDslByFileName = getFormatDslByFileName(fileName, args);
|
||||
|
||||
return formatDslByFileName
|
||||
.replace("{interval}", interval)
|
||||
.replace("{clusterPhyMetrics}", clusterPhyMetrics)
|
||||
.replace("{aggType}", aggType);
|
||||
}
|
||||
|
||||
public String getFormatDslByFileNameAndOtherParam(String fileName, String interval, String aggsDsl,
|
||||
Object... args) {
|
||||
String formatDslByFileName = getFormatDslByFileName(fileName, args);
|
||||
return formatDslByFileName
|
||||
.replace("{interval}", interval)
|
||||
.replace("\"aggsDsl\":1", aggsDsl);
|
||||
}
|
||||
|
||||
|
||||
public String getDslByTopNNameInfo(String fileName, String interval, String topNameStr, String aggsDsl,
|
||||
Object... args) {
|
||||
String formatDslByFileName = getFormatDslByFileName(fileName, args);
|
||||
return formatDslByFileName
|
||||
.replace("{interval}", interval)
|
||||
.replace("\"aggsDsl\":1", aggsDsl)
|
||||
.replace("\"topNameListStr\"", topNameStr);
|
||||
}
|
||||
|
||||
/**************************************************** private method ****************************************************/
|
||||
/**
|
||||
* 去除json中的空格
|
||||
*
|
||||
* @param sourceDsl
|
||||
* @return
|
||||
*/
|
||||
private String trimJsonBank(String sourceDsl) {
|
||||
List<String> dslList = Lists.newArrayList();
|
||||
|
||||
DefaultJSONParser parser = null;
|
||||
Object obj = null;
|
||||
String dsl = sourceDsl;
|
||||
|
||||
// 解析多个json,直到pos为0
|
||||
for (;;) {
|
||||
try {
|
||||
// 这里需要Feature.OrderedField.getMask()保持有序
|
||||
parser = new DefaultJSONParser(dsl, ParserConfig.getGlobalInstance(),
|
||||
JSON.DEFAULT_PARSER_FEATURE | Feature.OrderedField.getMask());
|
||||
obj = parser.parse();
|
||||
} catch (Exception t) {
|
||||
LOGGER.error("class=DslLoaderUtil||method=trimJsonBank||errMsg=parse json {} error. ", dsl, t);
|
||||
}
|
||||
if (obj == null) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (obj instanceof JSONObject) {
|
||||
dslList.add( JSON.toJSONString(obj, SerializerFeature.WriteMapNullValue));
|
||||
int pos = parser.getLexer().pos();
|
||||
if (pos <= 0) {
|
||||
break;
|
||||
}
|
||||
dsl = dsl.substring(pos);
|
||||
parser.getLexer().close();
|
||||
} else {
|
||||
parser.getLexer().close();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// 格式化异常或者有多个查询语句,返回原来的查询语句
|
||||
if (dslList.isEmpty() || dslList.size() > 1) {
|
||||
return sourceDsl;
|
||||
}
|
||||
|
||||
return dslList.get(0);
|
||||
}
|
||||
|
||||
/**
|
||||
* 从jar包中读取dsl语句文件
|
||||
*
|
||||
* @param fileName
|
||||
* @return
|
||||
*/
|
||||
private String readDslFileInJarFile(String fileName) {
|
||||
InputStream inputStream = this.getClass().getClassLoader()
|
||||
.getResourceAsStream( String.format("dsl/%s", fileName));
|
||||
if (inputStream != null) {
|
||||
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
|
||||
String line = null;
|
||||
List<String> lines = Lists.newLinkedList();
|
||||
try {
|
||||
while ((line = bufferedReader.readLine()) != null) {
|
||||
lines.add(line);
|
||||
}
|
||||
return StringUtils.join(lines, "");
|
||||
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("class=DslLoaderUtil||method=readDslFileInJarFile||errMsg=read file {} error. ", fileName,
|
||||
e);
|
||||
|
||||
return "";
|
||||
} finally {
|
||||
try {
|
||||
inputStream.close();
|
||||
} catch (IOException e) {
|
||||
LOGGER.error(
|
||||
"class=DslLoaderUtil||method=readDslFileInJarFile||errMsg=fail to close file {} error. ",
|
||||
fileName, e);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOGGER.error("class=DslLoaderUtil||method=readDslFileInJarFile||errMsg=fail to read file {} content",
|
||||
fileName);
|
||||
return "";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.es.template;
|
||||
|
||||
/**
|
||||
* @author didi
|
||||
*/
|
||||
public class TemplateConstant {
|
||||
public static final String TOPIC_INDEX = "ks_kafka_topic_metric";
|
||||
public static final String CLUSTER_INDEX = "ks_kafka_cluster_metric";
|
||||
public static final String BROKER_INDEX = "ks_kafka_broker_metric";
|
||||
public static final String PARTITION_INDEX = "ks_kafka_partition_metric";
|
||||
public static final String GROUP_INDEX = "ks_kafka_group_metric";
|
||||
public static final String ZOOKEEPER_INDEX = "ks_kafka_zookeeper_metric";
|
||||
public static final String CONNECT_CLUSTER_INDEX = "ks_kafka_connect_cluster_metric";
|
||||
public static final String CONNECT_CONNECTOR_INDEX = "ks_kafka_connect_connector_metric";
|
||||
public static final String CONNECT_MM2_INDEX = "ks_kafka_connect_mirror_maker_metric";
|
||||
|
||||
private TemplateConstant() {
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.es.template;
|
||||
|
||||
import com.google.common.collect.Maps;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.ESFileLoader;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import java.util.Map;
|
||||
|
||||
@Component
|
||||
public class TemplateLoaderUtil extends ESFileLoader {
|
||||
|
||||
private static final String FILE_PATH = "es/template/";
|
||||
|
||||
/**
|
||||
* 查询语句容器
|
||||
*/
|
||||
private Map<String, String> templateMap = Maps.newHashMap();
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
templateMap.putAll(loaderFileContext(FILE_PATH, TemplateConstant.class.getDeclaredFields()));
|
||||
}
|
||||
|
||||
public String getContextByFileName(String fileName) {
|
||||
return templateMap.get(fileName);
|
||||
}
|
||||
}
|
||||
@@ -26,7 +26,7 @@ public class JmxDAOImpl implements JmxDAO {
|
||||
public Object getJmxValue(Long clusterPhyId, String jmxHost, Integer jmxPort, JmxConfig jmxConfig, ObjectName objectName, String attribute) {
|
||||
JmxConnectorWrap jmxConnectorWrap = null;
|
||||
try {
|
||||
jmxConnectorWrap = new JmxConnectorWrap(clusterPhyId, null, null, jmxHost, jmxPort, jmxConfig);
|
||||
jmxConnectorWrap = new JmxConnectorWrap("clusterPhyId: " + clusterPhyId, null, jmxHost, jmxPort, jmxConfig);
|
||||
if (!jmxConnectorWrap.checkJmxConnectionAndInitIfNeed()) {
|
||||
log.error(
|
||||
"method=getJmxValue||clusterPhyId={}||jmxHost={}||jmxPort={}||jmxConfig={}||errMgs=create jmx client failed",
|
||||
@@ -65,7 +65,7 @@ public class JmxDAOImpl implements JmxDAO {
|
||||
return object == null? null: (Long) object;
|
||||
} catch (Exception e) {
|
||||
log.error(
|
||||
"class=JmxDAOImpl||method=getServerStartTime||clusterPhyId={}||jmxHost={}||jmxPort={}||jmxConfig={}||errMsg=exception!",
|
||||
"method=getServerStartTime||clusterPhyId={}||jmxHost={}||jmxPort={}||jmxConfig={}||errMsg=exception!",
|
||||
clusterPhyId, jmxHost, jmxPort, jmxConfig, e
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,31 +1,39 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.kafka;
|
||||
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
|
||||
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.AbstractClusterLoadedChangedHandler;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
import org.apache.kafka.clients.admin.AdminClientConfig;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
@Slf4j
|
||||
@Component
|
||||
public class KafkaAdminClient extends AbstractClusterLoadedChangedHandler {
|
||||
private static final Map<Long, AdminClient> KAFKA_ADMIN_CLIENT_MAP = new ConcurrentHashMap<>();
|
||||
private static final ILog LOGGER = LogFactory.getLog(KafkaAdminClient.class);
|
||||
|
||||
@Value("${client-pool.kafka-admin.client-cnt:1}")
|
||||
private Integer clientCnt;
|
||||
|
||||
private static final Map<Long, List<AdminClient>> KAFKA_ADMIN_CLIENT_MAP = new ConcurrentHashMap<>();
|
||||
|
||||
public AdminClient getClient(Long clusterPhyId) throws NotExistException {
|
||||
AdminClient adminClient = KAFKA_ADMIN_CLIENT_MAP.get(clusterPhyId);
|
||||
if (adminClient != null) {
|
||||
return adminClient;
|
||||
List<AdminClient> adminClientList = KAFKA_ADMIN_CLIENT_MAP.get(clusterPhyId);
|
||||
if (adminClientList != null) {
|
||||
return adminClientList.get((int)(System.currentTimeMillis() % clientCnt));
|
||||
}
|
||||
|
||||
adminClient = this.createKafkaAdminClient(clusterPhyId);
|
||||
AdminClient adminClient = this.createKafkaAdminClient(clusterPhyId);
|
||||
if (adminClient == null) {
|
||||
throw new NotExistException("kafka admin-client not exist due to create failed");
|
||||
}
|
||||
@@ -61,18 +69,20 @@ public class KafkaAdminClient extends AbstractClusterLoadedChangedHandler {
|
||||
try {
|
||||
modifyClientMapLock.lock();
|
||||
|
||||
AdminClient adminClient = KAFKA_ADMIN_CLIENT_MAP.remove(clusterPhyId);
|
||||
if (adminClient == null) {
|
||||
List<AdminClient> adminClientList = KAFKA_ADMIN_CLIENT_MAP.remove(clusterPhyId);
|
||||
if (adminClientList == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
log.info("close kafka AdminClient starting, clusterPhyId:{}", clusterPhyId);
|
||||
LOGGER.info("close kafka AdminClient starting, clusterPhyId:{}", clusterPhyId);
|
||||
|
||||
adminClient.close();
|
||||
boolean allSuccess = this.closeAdminClientList(adminClientList);
|
||||
|
||||
log.info("close kafka AdminClient success, clusterPhyId:{}", clusterPhyId);
|
||||
if (allSuccess) {
|
||||
LOGGER.info("close kafka AdminClient success, clusterPhyId:{}", clusterPhyId);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error("close kafka AdminClient failed, clusterPhyId:{}", clusterPhyId, e);
|
||||
LOGGER.error("close kafka AdminClient failed, clusterPhyId:{}", clusterPhyId, e);
|
||||
} finally {
|
||||
modifyClientMapLock.unlock();
|
||||
}
|
||||
@@ -88,29 +98,56 @@ public class KafkaAdminClient extends AbstractClusterLoadedChangedHandler {
|
||||
}
|
||||
|
||||
private AdminClient createKafkaAdminClient(Long clusterPhyId, String bootstrapServers, Properties props) {
|
||||
List<AdminClient> adminClientList = null;
|
||||
try {
|
||||
modifyClientMapLock.lock();
|
||||
|
||||
AdminClient adminClient = KAFKA_ADMIN_CLIENT_MAP.get(clusterPhyId);
|
||||
if (adminClient != null) {
|
||||
return adminClient;
|
||||
adminClientList = KAFKA_ADMIN_CLIENT_MAP.get(clusterPhyId);
|
||||
if (adminClientList != null) {
|
||||
return adminClientList.get((int)(System.currentTimeMillis() % clientCnt));
|
||||
}
|
||||
|
||||
log.debug("create kafka AdminClient starting, clusterPhyId:{} props:{}", clusterPhyId, props);
|
||||
LOGGER.debug("create kafka AdminClient starting, clusterPhyId:{} props:{}", clusterPhyId, props);
|
||||
|
||||
if (props == null) {
|
||||
props = new Properties();
|
||||
}
|
||||
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
|
||||
KAFKA_ADMIN_CLIENT_MAP.put(clusterPhyId, AdminClient.create(props));
|
||||
|
||||
log.info("create kafka AdminClient success, clusterPhyId:{}", clusterPhyId);
|
||||
adminClientList = new ArrayList<>();
|
||||
for (int i = 0; i < clientCnt; ++i) {
|
||||
adminClientList.add(AdminClient.create(props));
|
||||
}
|
||||
|
||||
KAFKA_ADMIN_CLIENT_MAP.put(clusterPhyId, adminClientList);
|
||||
|
||||
LOGGER.info("create kafka AdminClient success, clusterPhyId:{}", clusterPhyId);
|
||||
} catch (Exception e) {
|
||||
log.error("create kafka AdminClient failed, clusterPhyId:{} props:{}", clusterPhyId, props, e);
|
||||
LOGGER.error("create kafka AdminClient failed, clusterPhyId:{} props:{}", clusterPhyId, props, e);
|
||||
|
||||
this.closeAdminClientList(adminClientList);
|
||||
} finally {
|
||||
modifyClientMapLock.unlock();
|
||||
}
|
||||
|
||||
return KAFKA_ADMIN_CLIENT_MAP.get(clusterPhyId);
|
||||
return KAFKA_ADMIN_CLIENT_MAP.get(clusterPhyId).get((int)(System.currentTimeMillis() % clientCnt));
|
||||
}
|
||||
|
||||
private boolean closeAdminClientList(List<AdminClient> adminClientList) {
|
||||
if (adminClientList == null) {
|
||||
return true;
|
||||
}
|
||||
|
||||
boolean allSuccess = true;
|
||||
for (AdminClient adminClient: adminClientList) {
|
||||
try {
|
||||
adminClient.close();
|
||||
} catch (Exception e) {
|
||||
// ignore
|
||||
allSuccess = false;
|
||||
}
|
||||
}
|
||||
|
||||
return allSuccess;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,9 +3,10 @@ package com.xiaojukeji.know.streaming.km.persistence.kafka;
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.ZKConfig;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
|
||||
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.AbstractClusterLoadedChangedHandler;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache;
|
||||
@@ -17,11 +18,12 @@ import org.springframework.stereotype.Component;
|
||||
import scala.Option;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
@Component
|
||||
public class KafkaAdminZKClient extends AbstractClusterLoadedChangedHandler implements KafkaClient<KafkaZkClient> {
|
||||
private static final ILog log = LogFactory.getLog(KafkaAdminZKClient.class);
|
||||
private static final ILog LOGGER = LogFactory.getLog(KafkaAdminZKClient.class);
|
||||
|
||||
/**
|
||||
* Kafka提供的KafkaZkClient
|
||||
@@ -92,13 +94,13 @@ public class KafkaAdminZKClient extends AbstractClusterLoadedChangedHandler impl
|
||||
return;
|
||||
}
|
||||
|
||||
log.info("close ZK Client starting, clusterPhyId:{}", clusterPhyId);
|
||||
LOGGER.info("method=closeZKClient||clusterPhyId={}||msg=close ZK Client starting", clusterPhyId);
|
||||
|
||||
kafkaZkClient.close();
|
||||
|
||||
log.info("close ZK Client success, clusterPhyId:{}", clusterPhyId);
|
||||
LOGGER.info("method=closeZKClient||clusterPhyId={}||msg=close ZK Client success", clusterPhyId);
|
||||
} catch (Exception e) {
|
||||
log.error("close ZK Client failed, clusterPhyId:{}", clusterPhyId, e);
|
||||
LOGGER.error("method=closeZKClient||clusterPhyId={}||msg=close ZK Client failed||errMsg=exception!", clusterPhyId, e);
|
||||
} finally {
|
||||
modifyClientMapLock.unlock();
|
||||
}
|
||||
@@ -107,19 +109,19 @@ public class KafkaAdminZKClient extends AbstractClusterLoadedChangedHandler impl
|
||||
private KafkaZkClient createZKClient(Long clusterPhyId) throws NotExistException {
|
||||
ClusterPhy clusterPhy = LoadedClusterPhyCache.getByPhyId(clusterPhyId);
|
||||
if (clusterPhy == null) {
|
||||
log.warn("create ZK Client failed, cluster not exist, clusterPhyId:{}", clusterPhyId);
|
||||
LOGGER.warn("method=createZKClient||clusterPhyId={}||msg=create ZK Client failed, cluster not exist", clusterPhyId);
|
||||
throw new NotExistException(MsgConstant.getClusterPhyNotExist(clusterPhyId));
|
||||
}
|
||||
|
||||
if (ValidateUtils.isBlank(clusterPhy.getZookeeper())) {
|
||||
log.warn("create ZK Client failed, zookeeper not exist, clusterPhyId:{}", clusterPhyId);
|
||||
LOGGER.warn("method=createZKClient||clusterPhyId={}||msg=create ZK Client failed, zookeeper not exist", clusterPhyId);
|
||||
return null;
|
||||
}
|
||||
|
||||
return this.createZKClient(clusterPhyId, clusterPhy.getZookeeper());
|
||||
return this.createZKClient(clusterPhyId, clusterPhy);
|
||||
}
|
||||
|
||||
private KafkaZkClient createZKClient(Long clusterPhyId, String zookeeperAddress) {
|
||||
private KafkaZkClient createZKClient(Long clusterPhyId, ClusterPhy clusterPhy) {
|
||||
try {
|
||||
modifyClientMapLock.lock();
|
||||
|
||||
@@ -128,33 +130,54 @@ public class KafkaAdminZKClient extends AbstractClusterLoadedChangedHandler impl
|
||||
return kafkaZkClient;
|
||||
}
|
||||
|
||||
log.debug("create ZK Client starting, clusterPhyId:{} zookeeperAddress:{}", clusterPhyId, zookeeperAddress);
|
||||
ZKConfig zkConfig = this.getZKConfig(clusterPhy);
|
||||
|
||||
LOGGER.debug("method=createZKClient||clusterPhyId={}||clusterPhy={}||msg=create ZK Client starting", clusterPhyId, clusterPhy);
|
||||
|
||||
kafkaZkClient = KafkaZkClient.apply(
|
||||
zookeeperAddress,
|
||||
false,
|
||||
// 添加支持zk的Kerberos认证
|
||||
// true,
|
||||
Constant.DEFAULT_SESSION_TIMEOUT_UNIT_MS,
|
||||
Constant.DEFAULT_SESSION_TIMEOUT_UNIT_MS,
|
||||
clusterPhy.getZookeeper(),
|
||||
zkConfig.getOpenSecure(),
|
||||
zkConfig.getSessionTimeoutUnitMs(),
|
||||
zkConfig.getRequestTimeoutUnitMs(),
|
||||
5,
|
||||
Time.SYSTEM,
|
||||
"KnowStreaming-clusterPhyId-" + clusterPhyId,
|
||||
"SessionExpireListener",
|
||||
Option.apply("KnowStreaming-clusterPhyId-" + clusterPhyId),
|
||||
Option.apply(new ZKClientConfig())
|
||||
"KS-ZK-ClusterPhyId-" + clusterPhyId,
|
||||
"KS-ZK-SessionExpireListener-clusterPhyId-" + clusterPhyId,
|
||||
Option.apply("KS-ZK-ClusterPhyId-" + clusterPhyId),
|
||||
Option.apply(this.getZKConfig(clusterPhyId, zkConfig.getOtherProps()))
|
||||
);
|
||||
|
||||
KAFKA_ZK_CLIENT_MAP.put(clusterPhyId, kafkaZkClient);
|
||||
KAFKA_ZK_CLIENT_CREATE_TIME.put(clusterPhyId, System.currentTimeMillis());
|
||||
|
||||
log.info("create ZK Client success, clusterPhyId:{}", clusterPhyId);
|
||||
LOGGER.info("method=createZKClient||clusterPhyId={}||msg=create ZK Client success", clusterPhyId);
|
||||
} catch (Exception e) {
|
||||
log.error("create ZK Client failed, clusterPhyId:{} zookeeperAddress:{}", clusterPhyId, zookeeperAddress, e);
|
||||
LOGGER.error("method=createZKClient||clusterPhyId={}||clusterPhy={}||msg=create ZK Client failed||errMsg=exception", clusterPhyId, clusterPhy, e);
|
||||
} finally {
|
||||
modifyClientMapLock.unlock();
|
||||
}
|
||||
|
||||
return KAFKA_ZK_CLIENT_MAP.get(clusterPhyId);
|
||||
}
|
||||
|
||||
private ZKConfig getZKConfig(ClusterPhy clusterPhy) {
|
||||
ZKConfig zkConfig = ConvertUtil.str2ObjByJson(clusterPhy.getZkProperties(), ZKConfig.class);
|
||||
if (zkConfig == null) {
|
||||
return new ZKConfig();
|
||||
}
|
||||
|
||||
return zkConfig;
|
||||
}
|
||||
|
||||
private ZKClientConfig getZKConfig(Long clusterPhyId, Properties props) {
|
||||
ZKClientConfig zkClientConfig = new ZKClientConfig();
|
||||
|
||||
try {
|
||||
props.entrySet().forEach(elem -> zkClientConfig.setProperty((String) elem.getKey(), (String) elem.getValue()));
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("method=getZKConfig||clusterPhyId={}||props={}||errMsg=exception", clusterPhyId, props);
|
||||
}
|
||||
|
||||
return zkClientConfig;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ import java.util.stream.Collectors;
|
||||
|
||||
@Component
|
||||
public class KafkaJMXClient extends AbstractClusterLoadedChangedHandler {
|
||||
private static final ILog log = LogFactory.getLog(KafkaAdminZKClient.class);
|
||||
private static final ILog log = LogFactory.getLog(KafkaJMXClient.class);
|
||||
|
||||
@Autowired
|
||||
private BrokerDAO brokerDAO;
|
||||
@@ -45,10 +45,6 @@ public class KafkaJMXClient extends AbstractClusterLoadedChangedHandler {
|
||||
|
||||
public JmxConnectorWrap getClient(Long clusterPhyId, Integer brokerId) {
|
||||
Map<Integer, JmxConnectorWrap> jmxMap = JMX_MAP.getOrDefault(clusterPhyId, new ConcurrentHashMap<>());
|
||||
if (jmxMap == null) {
|
||||
// 集群不存在, 直接返回null
|
||||
return null;
|
||||
}
|
||||
|
||||
JmxConnectorWrap jmxConnectorWrap = jmxMap.get(brokerId);
|
||||
if (jmxConnectorWrap != null) {
|
||||
@@ -107,7 +103,8 @@ public class KafkaJMXClient extends AbstractClusterLoadedChangedHandler {
|
||||
protected void modify(ClusterPhy newClusterPhy, ClusterPhy oldClusterPhy) {
|
||||
if (newClusterPhy.getClientProperties().equals(oldClusterPhy.getClientProperties())
|
||||
&& newClusterPhy.getZookeeper().equals(oldClusterPhy.getZookeeper())
|
||||
&& newClusterPhy.getBootstrapServers().equals(oldClusterPhy.getBootstrapServers())) {
|
||||
&& newClusterPhy.getBootstrapServers().equals(oldClusterPhy.getBootstrapServers())
|
||||
&& newClusterPhy.getJmxProperties().equals(oldClusterPhy.getJmxProperties())) {
|
||||
// 集群信息虽然变化,但是相关没有变化,则直接返回
|
||||
return;
|
||||
}
|
||||
@@ -162,8 +159,7 @@ public class KafkaJMXClient extends AbstractClusterLoadedChangedHandler {
|
||||
}
|
||||
|
||||
JmxConnectorWrap jmxConnectorWrap = new JmxConnectorWrap(
|
||||
clusterPhy.getId(),
|
||||
brokerId,
|
||||
"clusterPhyId: " + clusterPhy.getId() + " brokerId: " + brokerId,
|
||||
broker.getStartTimestamp(),
|
||||
jmxConfig != null ? broker.getJmxHost(jmxConfig.getUseWhichEndpoint()) : broker.getHost(),
|
||||
broker.getJmxPort() != null ? broker.getJmxPort() : jmxConfig.getJmxPort(),
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.mysql.connect;
|
||||
|
||||
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectClusterPO;
|
||||
import org.springframework.stereotype.Repository;
|
||||
|
||||
@Repository
|
||||
public interface ConnectClusterDAO extends BaseMapper<ConnectClusterPO> {
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.mysql.connect;
|
||||
|
||||
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectWorkerPO;
|
||||
import org.springframework.stereotype.Repository;
|
||||
|
||||
@Repository
|
||||
public interface ConnectWorkerDAO extends BaseMapper<ConnectWorkerPO> {
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.mysql.connect;
|
||||
|
||||
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
|
||||
import org.springframework.stereotype.Repository;
|
||||
|
||||
@Repository
|
||||
public interface ConnectorDAO extends BaseMapper<ConnectorPO> {
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.mysql.connect;
|
||||
|
||||
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.WorkerConnectorPO;
|
||||
import org.springframework.stereotype.Repository;
|
||||
|
||||
@Repository
|
||||
public interface WorkerConnectorDAO extends BaseMapper<WorkerConnectorPO> {
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.mysql.ha;
|
||||
|
||||
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.ha.HaActiveStandbyRelationPO;
|
||||
import org.springframework.stereotype.Repository;
|
||||
|
||||
@Repository
|
||||
public interface HaActiveStandbyRelationDAO extends BaseMapper<HaActiveStandbyRelationPO> {
|
||||
}
|
||||
@@ -8,5 +8,5 @@ import org.springframework.stereotype.Repository;
|
||||
public interface TopicDAO extends BaseMapper<TopicPO> {
|
||||
int replaceAll(TopicPO topicPO);
|
||||
|
||||
int updateConfig(TopicPO topicPO);
|
||||
int updateConfigById(TopicPO topicPO);
|
||||
}
|
||||
|
||||
@@ -0,0 +1,55 @@
|
||||
package com.xiaojukeji.know.streaming.km.persistence.utils;
|
||||
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
|
||||
|
||||
public class LoadSQLUtil {
|
||||
private static final ILog LOGGER = LogFactory.getLog(LoadSQLUtil.class);
|
||||
|
||||
public static final String SQL_DDL_KS_KM = "sql/ddl-ks-km.sql";
|
||||
public static final String SQL_DDL_LOGI_JOB = "sql/ddl-logi-job.sql";
|
||||
public static final String SQL_DDL_LOGI_SECURITY = "sql/ddl-logi-security.sql";
|
||||
public static final String SQL_DML_KS_KM = "sql/dml-ks-km.sql";
|
||||
public static final String SQL_DML_LOGI = "sql/dml-logi.sql";
|
||||
|
||||
public static String loadSQL(String sqlFileName) {
|
||||
InputStream inputStream = LoadSQLUtil.class.getClassLoader().getResourceAsStream(sqlFileName);
|
||||
if (inputStream == null) {
|
||||
LOGGER.error("method=loadSQL||fileName={}||msg=read script failed", sqlFileName);
|
||||
return "";
|
||||
}
|
||||
|
||||
try {
|
||||
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
|
||||
String line = null;
|
||||
|
||||
StringBuilder sb = new StringBuilder();
|
||||
while ((line = bufferedReader.readLine()) != null) {
|
||||
sb.append(line).append("\n");
|
||||
}
|
||||
|
||||
return sb.toString();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("method=loadSQL||fileName={}||errMsg={}||msg=read script failed", sqlFileName, e.getMessage());
|
||||
} finally {
|
||||
try {
|
||||
inputStream.close();
|
||||
|
||||
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("method=loadSQL||fileName={}||errMsg={}||msg=close reading script failed", sqlFileName, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
return "";
|
||||
}
|
||||
|
||||
private LoadSQLUtil() {
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,7 @@
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"brokerId": {
|
||||
"connectClusterId": {
|
||||
"value": %d
|
||||
}
|
||||
}
|
||||
@@ -29,6 +29,16 @@
|
||||
}
|
||||
},
|
||||
"aggs": {
|
||||
%s
|
||||
"hist": {
|
||||
"date_histogram": {
|
||||
"field": "timestamp",
|
||||
"fixed_interval": "%s",
|
||||
"time_zone": "Asia/Shanghai",
|
||||
"min_doc_count": 0
|
||||
},
|
||||
"aggs": {
|
||||
%s
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
{
|
||||
"size": 0,
|
||||
"query": {
|
||||
"bool": {
|
||||
"must": [
|
||||
{
|
||||
"term": {
|
||||
"clusterPhyId": {
|
||||
"value": %d
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"range": {
|
||||
"timestamp": {
|
||||
"gte": %d,
|
||||
"lte": %d
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"aggs": {
|
||||
"hist": {
|
||||
"terms": {
|
||||
"field": "connectClusterId",
|
||||
"collect_mode": "breadth_first"
|
||||
},
|
||||
"aggs": {
|
||||
"hist": {
|
||||
"date_histogram": {
|
||||
"field": "timestamp",
|
||||
"fixed_interval": "%s",
|
||||
"time_zone": "Asia/Shanghai",
|
||||
"min_doc_count": 0
|
||||
},
|
||||
"aggs": {
|
||||
%s
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,51 @@
|
||||
{
|
||||
"size": 0,
|
||||
"query": {
|
||||
"bool": {
|
||||
"must": [
|
||||
{
|
||||
"term": {
|
||||
"clusterPhyId": {
|
||||
"value": %d
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"connectClusterId": {
|
||||
"value": %d
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"connectorName": {
|
||||
"value": "%s"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"range": {
|
||||
"timestamp": {
|
||||
"gte": %d,
|
||||
"lte": %d
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"aggs": {
|
||||
"hist": {
|
||||
"date_histogram": {
|
||||
"field": "timestamp",
|
||||
"fixed_interval": "%s",
|
||||
"time_zone": "Asia/Shanghai",
|
||||
"min_doc_count": 0
|
||||
},
|
||||
"aggs": {
|
||||
%s
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
{
|
||||
"size": 0,
|
||||
"query": {
|
||||
"bool": {
|
||||
"must": [
|
||||
{
|
||||
"term": {
|
||||
"clusterPhyId": {
|
||||
"value": %d
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"range": {
|
||||
"timestamp": {
|
||||
"gte": %d,
|
||||
"lte": %d
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"aggs": {
|
||||
"hist": {
|
||||
"terms": {
|
||||
"field": "connectorNameAndClusterId",
|
||||
"collect_mode": "breadth_first"
|
||||
},
|
||||
"aggs": {
|
||||
"hist": {
|
||||
"date_histogram": {
|
||||
"field": "timestamp",
|
||||
"fixed_interval": "%s",
|
||||
"time_zone": "Asia/Shanghai",
|
||||
"min_doc_count": 0
|
||||
},
|
||||
"aggs": {
|
||||
%s
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,44 +1,31 @@
|
||||
{
|
||||
"size": 1,
|
||||
"size":1000,
|
||||
"query": {
|
||||
"bool": {
|
||||
"must": [
|
||||
{
|
||||
"term": {
|
||||
"clusterPhyId": {
|
||||
"connectClusterId": {
|
||||
"value": %d
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"brokerId": {
|
||||
"value": %d
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"topic": {
|
||||
"connectorName": {
|
||||
"value": "%s"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"partitionId": {
|
||||
"value": %d
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
{
|
||||
"range": {
|
||||
"timestamp": {
|
||||
"gte": %d,
|
||||
"lte": %d
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
%s
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -0,0 +1,101 @@
|
||||
{
|
||||
"order" : 10,
|
||||
"index_patterns" : [
|
||||
"ks_kafka_broker_metric*"
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "2"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
"properties" : {
|
||||
"brokerId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"routingValue" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"clusterPhyId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"metrics" : {
|
||||
"properties" : {
|
||||
"NetworkProcessorAvgIdle" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"UnderReplicatedPartitions" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"BytesIn_min_15" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"HealthCheckTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"RequestHandlerAvgIdle" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"connectionsCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"BytesIn_min_5" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"HealthScore" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"BytesOut" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"BytesOut_min_15" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"BytesIn" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"BytesOut_min_5" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TotalRequestQueueSize" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"MessagesIn" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TotalProduceRequests" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"HealthCheckPassed" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TotalResponseQueueSize" : {
|
||||
"type" : "float"
|
||||
}
|
||||
}
|
||||
},
|
||||
"key" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"timestamp" : {
|
||||
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
|
||||
"index" : true,
|
||||
"type" : "date",
|
||||
"doc_values" : true
|
||||
}
|
||||
}
|
||||
},
|
||||
"aliases" : { }
|
||||
}
|
||||
@@ -0,0 +1,186 @@
|
||||
{
|
||||
"order" : 10,
|
||||
"index_patterns" : [
|
||||
"ks_kafka_cluster_metric*"
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "2"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
"properties" : {
|
||||
"routingValue" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"clusterPhyId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"metrics" : {
|
||||
"properties" : {
|
||||
"Connections" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"BytesIn_min_15" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"PartitionURP" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"HealthScore_Topics" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"EventQueueSize" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"ActiveControllerCount" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"GroupDeads" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"BytesIn_min_5" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"HealthCheckTotal_Topics" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"Partitions" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"BytesOut" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"Groups" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"BytesOut_min_15" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"TotalRequestQueueSize" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"HealthCheckPassed_Groups" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"TotalProduceRequests" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"HealthCheckPassed" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"TotalLogSize" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"GroupEmptys" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"PartitionNoLeader" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"HealthScore_Brokers" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"Messages" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"Topics" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"PartitionMinISR_E" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"HealthCheckTotal" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"Brokers" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"Replicas" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"HealthCheckTotal_Groups" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"GroupRebalances" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"MessageIn" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"HealthScore" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"HealthCheckPassed_Topics" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"HealthCheckTotal_Brokers" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"PartitionMinISR_S" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"BytesIn" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"BytesOut_min_5" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"GroupActives" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"MessagesIn" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"GroupReBalances" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"HealthCheckPassed_Brokers" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"HealthScore_Groups" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"TotalResponseQueueSize" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"Zookeepers" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"LeaderMessages" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"HealthScore_Cluster" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"HealthCheckPassed_Cluster" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"HealthCheckTotal_Cluster" : {
|
||||
"type" : "double"
|
||||
}
|
||||
}
|
||||
},
|
||||
"key" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"timestamp" : {
|
||||
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
|
||||
"type" : "date"
|
||||
}
|
||||
}
|
||||
},
|
||||
"aliases" : { }
|
||||
}
|
||||
@@ -0,0 +1,86 @@
|
||||
{
|
||||
"order" : 10,
|
||||
"index_patterns" : [
|
||||
"ks_kafka_connect_cluster_metric*"
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "2"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
"properties" : {
|
||||
"connectClusterId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"routingValue" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"clusterPhyId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"metrics" : {
|
||||
"properties" : {
|
||||
"ConnectorCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TaskCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorStartupAttemptsTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorStartupFailurePercentage" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorStartupFailureTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorStartupSuccessPercentage" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorStartupSuccessTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TaskStartupAttemptsTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TaskStartupFailurePercentage" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TaskStartupFailureTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TaskStartupSuccessPercentage" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TaskStartupSuccessTotal" : {
|
||||
"type" : "float"
|
||||
}
|
||||
}
|
||||
},
|
||||
"key" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"timestamp" : {
|
||||
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
|
||||
"index" : true,
|
||||
"type" : "date",
|
||||
"doc_values" : true
|
||||
}
|
||||
}
|
||||
},
|
||||
"aliases" : { }
|
||||
}
|
||||
@@ -0,0 +1,194 @@
|
||||
{
|
||||
"order" : 10,
|
||||
"index_patterns" : [
|
||||
"ks_kafka_connect_connector_metric*"
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "2"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
"properties" : {
|
||||
"connectClusterId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"routingValue" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"connectorName" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"connectorNameAndClusterId" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"clusterPhyId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"metrics" : {
|
||||
"properties" : {
|
||||
"HealthState" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorTotalTaskCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"HealthCheckPassed" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"HealthCheckTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorRunningTaskCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorPausedTaskCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorFailedTaskCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ConnectorUnassignedTaskCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"BatchSizeAvg" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"BatchSizeMax" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"OffsetCommitAvgTimeMs" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"OffsetCommitMaxTimeMs" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"OffsetCommitFailurePercentage" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"OffsetCommitSuccessPercentage" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"PollBatchAvgTimeMs" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"PollBatchMaxTimeMs" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SourceRecordActiveCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SourceRecordActiveCountAvg" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SourceRecordActiveCountMax" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SourceRecordPollRate" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SourceRecordPollTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SourceRecordWriteRate" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SourceRecordWriteTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"OffsetCommitCompletionRate" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"OffsetCommitCompletionTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"OffsetCommitSkipRate" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"OffsetCommitSkipTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"PartitionCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"PutBatchAvgTimeMs" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"PutBatchMaxTimeMs" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SinkRecordActiveCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SinkRecordActiveCountAvg" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SinkRecordActiveCountMax" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SinkRecordLagMax" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SinkRecordReadRate" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SinkRecordReadTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SinkRecordSendRate" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"SinkRecordSendTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"DeadletterqueueProduceFailures" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"DeadletterqueueProduceRequests" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"LastErrorTimestamp" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TotalErrorsLogged" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TotalRecordErrors" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TotalRecordFailures" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TotalRecordsSkipped" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TotalRetries" : {
|
||||
"type" : "float"
|
||||
}
|
||||
}
|
||||
},
|
||||
"key" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"timestamp" : {
|
||||
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
|
||||
"index" : true,
|
||||
"type" : "date",
|
||||
"doc_values" : true
|
||||
}
|
||||
}
|
||||
},
|
||||
"aliases" : { }
|
||||
}
|
||||
@@ -0,0 +1,98 @@
|
||||
{
|
||||
"order" : 10,
|
||||
"index_patterns" : [
|
||||
"ks_kafka_connect_mirror_maker_metric*"
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "2"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
"properties" : {
|
||||
"connectClusterId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"routingValue" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"connectorName" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"connectorNameAndClusterId" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"clusterPhyId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"metrics" : {
|
||||
"properties" : {
|
||||
"HealthState" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"HealthCheckTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ByteCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ByteRate" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"RecordAgeMs" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"RecordAgeMsAvg" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"RecordAgeMsMax" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"RecordAgeMsMin" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"RecordCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"RecordRate" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ReplicationLatencyMs" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ReplicationLatencyMsAvg" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ReplicationLatencyMsMax" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ReplicationLatencyMsMin" : {
|
||||
"type" : "float"
|
||||
}
|
||||
}
|
||||
},
|
||||
"key" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"timestamp" : {
|
||||
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
|
||||
"index" : true,
|
||||
"type" : "date",
|
||||
"doc_values" : true
|
||||
}
|
||||
}
|
||||
},
|
||||
"aliases" : { }
|
||||
}
|
||||
@@ -0,0 +1,74 @@
|
||||
{
|
||||
"order" : 10,
|
||||
"index_patterns" : [
|
||||
"ks_kafka_group_metric*"
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "6"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
"properties" : {
|
||||
"group" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"partitionId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"routingValue" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"clusterPhyId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"topic" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"metrics" : {
|
||||
"properties" : {
|
||||
"HealthScore" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"Lag" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"OffsetConsumed" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"HealthCheckTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"HealthCheckPassed" : {
|
||||
"type" : "float"
|
||||
}
|
||||
}
|
||||
},
|
||||
"groupMetric" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"key" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"timestamp" : {
|
||||
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
|
||||
"index" : true,
|
||||
"type" : "date",
|
||||
"doc_values" : true
|
||||
}
|
||||
}
|
||||
},
|
||||
"aliases" : { }
|
||||
}
|
||||
@@ -0,0 +1,65 @@
|
||||
{
|
||||
"order" : 10,
|
||||
"index_patterns" : [
|
||||
"ks_kafka_partition_metric*"
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "6"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
"properties" : {
|
||||
"brokerId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"partitionId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"routingValue" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"clusterPhyId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"topic" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"metrics" : {
|
||||
"properties" : {
|
||||
"LogStartOffset" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"Messages" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"LogEndOffset" : {
|
||||
"type" : "float"
|
||||
}
|
||||
}
|
||||
},
|
||||
"key" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"timestamp" : {
|
||||
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
|
||||
"index" : true,
|
||||
"type" : "date",
|
||||
"doc_values" : true
|
||||
}
|
||||
}
|
||||
},
|
||||
"aliases" : { }
|
||||
}
|
||||
@@ -0,0 +1,116 @@
|
||||
{
|
||||
"order" : 10,
|
||||
"index_patterns" : [
|
||||
"ks_kafka_topic_metric*"
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "6"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
"properties" : {
|
||||
"brokerId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"routingValue" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"topic" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"clusterPhyId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"metrics" : {
|
||||
"properties" : {
|
||||
"BytesIn_min_15" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"Messages" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"BytesRejected" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"PartitionURP" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"HealthCheckTotal" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ReplicationCount" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ReplicationBytesOut" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"ReplicationBytesIn" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"FailedFetchRequests" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"BytesIn_min_5" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"HealthScore" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"LogSize" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"BytesOut" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"BytesOut_min_15" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"FailedProduceRequests" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"BytesIn" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"BytesOut_min_5" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"MessagesIn" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"TotalProduceRequests" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"HealthCheckPassed" : {
|
||||
"type" : "float"
|
||||
}
|
||||
}
|
||||
},
|
||||
"brokerAgg" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"key" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"timestamp" : {
|
||||
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
|
||||
"index" : true,
|
||||
"type" : "date",
|
||||
"doc_values" : true
|
||||
}
|
||||
}
|
||||
},
|
||||
"aliases" : { }
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
{
|
||||
"order" : 10,
|
||||
"index_patterns" : [
|
||||
"ks_kafka_zookeeper_metric*"
|
||||
],
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : "2"
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
"properties" : {
|
||||
"routingValue" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"clusterPhyId" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"metrics" : {
|
||||
"properties" : {
|
||||
"AvgRequestLatency" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"MinRequestLatency" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"MaxRequestLatency" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"OutstandingRequests" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"NodeCount" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"WatchCount" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"NumAliveConnections" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"PacketsReceived" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"PacketsSent" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"EphemeralsCount" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"ApproximateDataSize" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"OpenFileDescriptorCount" : {
|
||||
"type" : "double"
|
||||
},
|
||||
"MaxFileDescriptorCount" : {
|
||||
"type" : "double"
|
||||
}
|
||||
}
|
||||
},
|
||||
"key" : {
|
||||
"type" : "text",
|
||||
"fields" : {
|
||||
"keyword" : {
|
||||
"ignore_above" : 256,
|
||||
"type" : "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"timestamp" : {
|
||||
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
|
||||
"type" : "date"
|
||||
}
|
||||
}
|
||||
},
|
||||
"aliases" : { }
|
||||
}
|
||||
@@ -25,8 +25,8 @@
|
||||
(#{clusterPhyId}, #{topicName}, #{replicaNum}, #{partitionNum}, #{brokerIds}, #{partitionMap}, #{retentionMs}, #{type}, #{description})
|
||||
</insert>
|
||||
|
||||
<update id="updateConfig" parameterType="com.xiaojukeji.know.streaming.km.common.bean.po.topic.TopicPO">
|
||||
UPDATE ks_km_topic SET retention_ms = #{retentionMs} WHERE cluster_phy_id = #{clusterPhyId} AND topic_name = #{topicName}
|
||||
<update id="updateConfigById" parameterType="com.xiaojukeji.know.streaming.km.common.bean.po.topic.TopicPO">
|
||||
UPDATE ks_km_topic SET retention_ms = #{retentionMs} WHERE id=#{id}
|
||||
</update>
|
||||
|
||||
</mapper>
|
||||
483
km-persistence/src/main/resources/sql/ddl-ks-km.sql
Normal file
483
km-persistence/src/main/resources/sql/ddl-ks-km.sql
Normal file
@@ -0,0 +1,483 @@
|
||||
-- KS-KM自身的SQL,KS-KM依赖 Logi-Job 和 Logi-Security,因此另外两个ddl sql文件也需要执行
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_broker`;
|
||||
CREATE TABLE `ks_km_broker` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '物理集群ID',
|
||||
`broker_id` int(16) NOT NULL DEFAULT '-1' COMMENT 'brokerId',
|
||||
`host` varchar(128) NOT NULL DEFAULT '' COMMENT 'broker主机名',
|
||||
`port` int(16) NOT NULL DEFAULT '-1' COMMENT 'broker端口',
|
||||
`jmx_port` int(16) NOT NULL DEFAULT '-1' COMMENT 'Jmx端口',
|
||||
`start_timestamp` bigint(20) NOT NULL DEFAULT '-1' COMMENT '启动时间',
|
||||
`rack` varchar(128) NOT NULL DEFAULT '' COMMENT 'Rack信息',
|
||||
`status` int(16) NOT NULL DEFAULT '0' COMMENT '状态: 1存活,0未存活',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
`endpoint_map` varchar(1024) NOT NULL DEFAULT '' COMMENT '监听信息',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_phy_id_broker_id` (`cluster_phy_id`,`broker_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Broker信息表';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_broker_config`;
|
||||
CREATE TABLE `ks_km_broker_config` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群ID',
|
||||
`broker_id` int(16) NOT NULL DEFAULT '-1' COMMENT 'brokerId',
|
||||
`config_name` varchar(192) NOT NULL DEFAULT '' COMMENT '配置名称',
|
||||
`config_value` text COMMENT '配置值',
|
||||
`diff_type` int(16) NOT NULL DEFAULT '-1' COMMENT '差异类型',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_broker_name` (`cluster_phy_id`,`broker_id`,`config_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Broker配置信息表';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_cluster_balance_job`;
|
||||
CREATE TABLE `ks_km_cluster_balance_job` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id',
|
||||
`brokers` varchar(1024) NOT NULL DEFAULT '' COMMENT '均衡节点',
|
||||
`topic_black_list` varchar(4096) NOT NULL DEFAULT '' COMMENT 'topic黑名单',
|
||||
`type` int(16) NOT NULL DEFAULT '0' COMMENT '1:周期均衡,2:立即均衡',
|
||||
`balance_interval_json` text COMMENT '均衡区间详情',
|
||||
`metric_calculation_period` int(16) NOT NULL DEFAULT '0' COMMENT '指标计算周期,单位分钟',
|
||||
`reassignment_json` text COMMENT '迁移脚本',
|
||||
`parallel_num` int(16) NOT NULL DEFAULT '0' COMMENT '任务并行数',
|
||||
`execution_strategy` int(16) NOT NULL DEFAULT '0' COMMENT '执行策略, 1:优先最大副本,2:优先最小副本',
|
||||
`throttle_unit_b` bigint(20) NOT NULL DEFAULT '0' COMMENT '限流值',
|
||||
`total_reassign_size` double NOT NULL DEFAULT '0' COMMENT '总迁移大小',
|
||||
`total_reassign_replica_num` int(16) NOT NULL DEFAULT '0' COMMENT '总迁移副本数',
|
||||
`move_in_topic_list` varchar(4096) NOT NULL DEFAULT '' COMMENT '移入topic',
|
||||
`broker_balance_detail` text COMMENT '节点均衡详情',
|
||||
`status` int(16) NOT NULL DEFAULT '0' COMMENT '任务状态 1:进行中,2:准备,3,成功,4:失败,5:取消',
|
||||
`creator` varchar(64) NOT NULL DEFAULT '' COMMENT '操作人',
|
||||
`start_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务开始时间',
|
||||
`finished_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务完成时间',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '任务修改时间',
|
||||
`description` text COMMENT '备注',
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='集群均衡任务';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_cluster_balance_job_config`;
|
||||
CREATE TABLE `ks_km_cluster_balance_job_config` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id',
|
||||
`brokers` varchar(256) NOT NULL DEFAULT '' COMMENT '均衡节点',
|
||||
`topic_black_list` varchar(4096) NOT NULL DEFAULT '' COMMENT 'topic黑名单',
|
||||
`task_cron` varchar(64) NOT NULL DEFAULT '' COMMENT '任务周期',
|
||||
`balance_interval_json` text COMMENT '均衡区间详情',
|
||||
`metric_calculation_period` int(16) NOT NULL DEFAULT '0' COMMENT '指标计算周期,单位分钟',
|
||||
`reassignment_json` text COMMENT '迁移脚本',
|
||||
`parallel_num` int(16) NOT NULL DEFAULT '0' COMMENT '任务并行数',
|
||||
`execution_strategy` int(16) NOT NULL DEFAULT '0' COMMENT '执行策略, 1:优先最大副本,2:优先最小副本',
|
||||
`throttle_unit_b` bigint(20) NOT NULL DEFAULT '0' COMMENT '限流值',
|
||||
`creator` varchar(64) NOT NULL DEFAULT '' COMMENT '操作人',
|
||||
`status` int(16) NOT NULL DEFAULT '0' COMMENT '任务状态 0:未开启,1:开启',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '任务修改时间',
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='集群均衡任务';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_cluster_balance_reassign`;
|
||||
CREATE TABLE `ks_km_cluster_balance_reassign` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`job_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '父任务ID',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id',
|
||||
`topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'Topic名称',
|
||||
`partition_id` int(11) NOT NULL DEFAULT '-1' COMMENT '分区ID',
|
||||
`original_broker_ids` text COMMENT '源BrokerId列表',
|
||||
`reassign_broker_ids` text COMMENT '目标BrokerId列表',
|
||||
`start_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务开始时间',
|
||||
`finished_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务完成时间',
|
||||
`extend_data` text COMMENT '扩展数据',
|
||||
`status` int(16) NOT NULL DEFAULT '2' COMMENT '任务状态',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '任务修改时间',
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='集群平衡迁移详情';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_group_member`;
|
||||
CREATE TABLE `ks_km_group_member` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群ID',
|
||||
`topic_name` varchar(192) NOT NULL DEFAULT '' COMMENT 'Topic名称',
|
||||
`group_name` varchar(192) NOT NULL DEFAULT '' COMMENT 'Group名称',
|
||||
`kafka_user` varchar(192) NOT NULL DEFAULT '' COMMENT 'Kafka用户',
|
||||
`state` varchar(64) NOT NULL DEFAULT '' COMMENT '状态',
|
||||
`member_count` int(11) NOT NULL DEFAULT '0' COMMENT '成员数',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_topic_group` (`cluster_phy_id`,`topic_name`,`group_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='GroupMember信息表';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_health_check_result`;
|
||||
CREATE TABLE `ks_km_health_check_result` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`dimension` int(11) NOT NULL DEFAULT '0' COMMENT '检查维度(0:未知,1:Cluster,2:Broker,3:Topic,4:Group)',
|
||||
`config_name` varchar(192) NOT NULL DEFAULT '' COMMENT '配置名',
|
||||
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '物理集群ID',
|
||||
`res_name` varchar(192) NOT NULL DEFAULT '' COMMENT '资源名称',
|
||||
`passed` int(11) NOT NULL DEFAULT '0' COMMENT '检查通过(0:未通过,1:通过)',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_dimension_config_cluster_res` (`dimension`,`config_name`,`cluster_phy_id`,`res_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='健康检查结果';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_job`;
|
||||
CREATE TABLE `ks_km_job` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键自增',
|
||||
`job_name` varchar(1024) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL COMMENT '任务名称',
|
||||
`job_type` tinyint(10) NOT NULL COMMENT '任务类型',
|
||||
`job_status` tinyint(10) NOT NULL COMMENT '任务状态',
|
||||
`job_data` text CHARACTER SET utf8 COLLATE utf8_bin NOT NULL COMMENT '任务的详细信息',
|
||||
`job_desc` varchar(1024) NOT NULL DEFAULT '' COMMENT '任务描述',
|
||||
`cluster_id` int(11) NOT NULL COMMENT 'kafka集群id',
|
||||
`target` varchar(8192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL COMMENT '任务执行对象',
|
||||
`running_status` varchar(256) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '任务运行详细状态(json), Success:7 Fail:1 Doing:2',
|
||||
`creator` varchar(45) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL COMMENT '创建者',
|
||||
`plan_time` timestamp NOT NULL DEFAULT '1971-1-1 00:00:00' COMMENT '计划执行时间',
|
||||
`start_time` timestamp NOT NULL DEFAULT '1971-1-1 00:00:00' COMMENT '实际执行时间',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `index_cluster_id` (`cluster_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Job信息';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_kafka_acl`;
|
||||
CREATE TABLE `ks_km_kafka_acl` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id',
|
||||
`principal` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'Kafka用户',
|
||||
`operation` int(11) NOT NULL DEFAULT '0' COMMENT '操作',
|
||||
`permission_type` int(11) NOT NULL DEFAULT '0' COMMENT '权限类型(0:未知,1:任意,2:拒绝,3:允许)',
|
||||
`host` varchar(192) NOT NULL DEFAULT '127.0.0.1' COMMENT '机器',
|
||||
`resource_type` int(11) NOT NULL DEFAULT '0' COMMENT '资源类型(0:未知,1:任意,2:Topic,3:Group,4:Cluster,5:事务,6:Token)',
|
||||
`resource_name` varchar(192) NOT NULL DEFAULT '' COMMENT '资源名称',
|
||||
`pattern_type` int(11) NOT NULL COMMENT '匹配类型(0:未知,1:任意,2:Match,3:Literal,4:prefixed)',
|
||||
`unique_field` varchar(1024) NOT NULL DEFAULT '' COMMENT '唯一字段,由cluster_phy_id等字段组成',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_uniq_field` (`unique_field`),
|
||||
KEY `idx_cluster_phy_id_principal_res_name` (`cluster_phy_id`,`principal`,`resource_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='ACL信息';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_kafka_change_record`;
|
||||
CREATE TABLE `ks_km_kafka_change_record` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群ID',
|
||||
`res_type` int(11) NOT NULL DEFAULT '-1' COMMENT '资源类型',
|
||||
`res_name` varchar(192) NOT NULL DEFAULT '' COMMENT '资源名称',
|
||||
`operate_type` int(11) NOT NULL DEFAULT '-1' COMMENT '操作类型',
|
||||
`operate_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '操作时间',
|
||||
`unique_field` varchar(1024) NOT NULL DEFAULT '' COMMENT '唯一键字段',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `unique_field` (`unique_field`),
|
||||
KEY `idx_cluster_update_time` (`cluster_phy_id` ASC, `update_time` ASC)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Kafka变更记录表';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_kafka_controller`;
|
||||
CREATE TABLE `ks_km_kafka_controller` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
|
||||
`broker_id` int(16) NOT NULL DEFAULT '-1' COMMENT 'brokerId',
|
||||
`broker_host` varchar(256) NOT NULL DEFAULT '' COMMENT '主机名',
|
||||
`broker_rack` varchar(256) NOT NULL DEFAULT '' COMMENT 'BrokerRack信息',
|
||||
`timestamp` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'controller变更时间,-1表示未存活',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_broker_timestamp` (`cluster_phy_id`,`broker_id`,`timestamp`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='controller记录表';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_kafka_user`;
|
||||
CREATE TABLE `ks_km_kafka_user` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群ID',
|
||||
`name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '名称',
|
||||
`token` varchar(8192) NOT NULL DEFAULT '' COMMENT '密钥',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_phy_id_name` (`cluster_phy_id`,`name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Kafka-User信息表';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_partition`;
|
||||
CREATE TABLE `ks_km_partition` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群ID',
|
||||
`topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'Topic名称',
|
||||
`partition_id` int(11) NOT NULL DEFAULT '-1' COMMENT '分区ID',
|
||||
`leader_broker_id` int(11) NOT NULL DEFAULT '-1' COMMENT '分区的LeaderBroker,-1表示无Leader',
|
||||
`in_sync_replicas` varchar(512) NOT NULL DEFAULT '-1' COMMENT 'ISR',
|
||||
`assign_replicas` varchar(512) NOT NULL DEFAULT '-1' COMMENT 'AR',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_topic_partition` (`cluster_phy_id`,`topic_name`,`partition_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Partition信息表';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_physical_cluster`;
|
||||
CREATE TABLE `ks_km_physical_cluster` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '集群id',
|
||||
`name` varchar(128) NOT NULL DEFAULT '' COMMENT '集群名称',
|
||||
`zookeeper` varchar(2048) NOT NULL DEFAULT '' COMMENT 'zk地址',
|
||||
`bootstrap_servers` varchar(2048) NOT NULL DEFAULT '' COMMENT 'server地址',
|
||||
`kafka_version` varchar(32) NOT NULL DEFAULT '' COMMENT 'kafka版本',
|
||||
`client_properties` text COMMENT 'Kafka客户端配置',
|
||||
`jmx_properties` text COMMENT 'JMX配置',
|
||||
`zk_properties` text COMMENT 'ZK配置',
|
||||
`description` text COMMENT '备注',
|
||||
`auth_type` int(11) NOT NULL DEFAULT '0' COMMENT '认证类型,-1未知,0:无认证,',
|
||||
`run_state` tinyint(4) NOT NULL DEFAULT '1' COMMENT '运行状态, 0表示未监控, 1监控中,有ZK,2:监控中,无ZK',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '接入时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_name` (`name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='物理集群信息表';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_platform_cluster_config`;
|
||||
CREATE TABLE `ks_km_platform_cluster_config` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键自增',
|
||||
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群ID',
|
||||
`value_group` varchar(100) NOT NULL DEFAULT '' COMMENT '配置项组',
|
||||
`value_name` varchar(100) NOT NULL DEFAULT '' COMMENT '配置项名字',
|
||||
`value` text COMMENT '配置项的值',
|
||||
`description` text COMMENT '备注',
|
||||
`operator` varchar(16) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '操作者',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_id_group_name` (`cluster_id`,`value_group`,`value_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='KS平台集群配置';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_reassign_job`;
|
||||
CREATE TABLE `ks_km_reassign_job` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id',
|
||||
`reassignment_json` text COMMENT '迁移计划',
|
||||
`throttle_unit_byte` bigint(20) NOT NULL DEFAULT '0' COMMENT '限流值',
|
||||
`start_time` timestamp NOT NULL DEFAULT '1971-1-1 00:00:00' COMMENT '任务开始时间',
|
||||
`finished_time` timestamp NOT NULL DEFAULT '1971-1-1 00:00:00' COMMENT '任务完成时间',
|
||||
`creator` varchar(64) NOT NULL DEFAULT '' COMMENT '操作人',
|
||||
`description` text COMMENT '备注',
|
||||
`status` int(16) NOT NULL DEFAULT '0' COMMENT '任务状态',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '任务修改时间',
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='迁移Job信息';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_reassign_sub_job`;
|
||||
CREATE TABLE `ks_km_reassign_sub_job` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
|
||||
`job_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '父任务ID',
|
||||
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id',
|
||||
`topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'Topic名称',
|
||||
`partition_id` int(11) NOT NULL DEFAULT '-1' COMMENT '分区ID',
|
||||
`original_broker_ids` text COMMENT '源BrokerId列表',
|
||||
`reassign_broker_ids` text COMMENT '目标BrokerId列表',
|
||||
`start_time` timestamp NOT NULL DEFAULT '1971-1-1 00:00:00' COMMENT '任务开始时间',
|
||||
`finished_time` timestamp NOT NULL DEFAULT '1971-1-1 00:00:00' COMMENT '任务完成时间',
|
||||
`extend_data` text COMMENT '扩展数据',
|
||||
`status` int(16) NOT NULL DEFAULT '0' COMMENT '任务状态',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '任务修改时间',
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='迁移SubJob信息';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_topic`;
|
||||
CREATE TABLE `ks_km_topic` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群ID',
|
||||
`topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'Topic名称',
|
||||
`replica_num` int(11) NOT NULL DEFAULT '-1' COMMENT '副本数',
|
||||
`partition_num` int(11) NOT NULL DEFAULT '-1' COMMENT '分区数',
|
||||
`broker_ids` varchar(2048) NOT NULL DEFAULT '' COMMENT 'BrokerId列表',
|
||||
`partition_map` text COMMENT '分区分布信息',
|
||||
`retention_ms` bigint(20) NOT NULL DEFAULT '-2' COMMENT '保存时间,-2:未知,-1:无限制,>=0对应时间,单位ms',
|
||||
`type` tinyint(4) NOT NULL DEFAULT '0' COMMENT 'Topic类型,默认0,0:普通,1:Kafka内部',
|
||||
`description` text COMMENT '备注信息',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间(尽量与Topic实际创建时间一致)',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间(尽量与Topic实际创建时间一致)',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_phy_id_topic_name` (`cluster_phy_id`,`topic_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Topic信息表';
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_app_node`;
|
||||
CREATE TABLE `ks_km_app_node` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`host_name` varchar(256) NOT NULL DEFAULT '' COMMENT 'host',
|
||||
`ip` varchar(256) NOT NULL DEFAULT '' COMMENT 'ip',
|
||||
`beat_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'node 的心跳时间',
|
||||
`app_name` varchar(128) NOT NULL DEFAULT '' COMMENT 'km 集群的应用名',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `idx_app_host` (`app_name`,`host_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='km集群部署的node信息';
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_zookeeper`;
|
||||
CREATE TABLE `ks_km_zookeeper` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '物理集群ID',
|
||||
`host` varchar(128) NOT NULL DEFAULT '' COMMENT 'zookeeper主机名',
|
||||
`port` int(16) NOT NULL DEFAULT '-1' COMMENT 'zookeeper端口',
|
||||
`role` varchar(16) NOT NULL DEFAULT '-1' COMMENT '角色, leader follower observer',
|
||||
`version` varchar(128) NOT NULL DEFAULT '' COMMENT 'zookeeper版本',
|
||||
`status` int(16) NOT NULL DEFAULT '0' COMMENT '状态: 1存活,0未存活,11存活但是4字命令使用不了',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_phy_id_host_port` (`cluster_phy_id`,`host`, `port`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Zookeeper信息表';
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_km_group`;
|
||||
CREATE TABLE `ks_km_group` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
|
||||
`name` varchar(192) COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'Group名称',
|
||||
`member_count` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '成员数',
|
||||
`topic_members` text CHARACTER SET utf8 COMMENT 'group消费的topic列表',
|
||||
`partition_assignor` varchar(255) CHARACTER SET utf8 NOT NULL COMMENT '分配策略',
|
||||
`coordinator_id` int(11) NOT NULL COMMENT 'group协调器brokerId',
|
||||
`type` int(11) NOT NULL COMMENT 'group类型 0:consumer 1:connector',
|
||||
`state` varchar(64) CHARACTER SET utf8 NOT NULL DEFAULT '' COMMENT '状态',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_phy_id_name` (`cluster_phy_id`,`name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Group信息表';
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_kc_connect_cluster`;
|
||||
CREATE TABLE `ks_kc_connect_cluster` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'Connect集群ID',
|
||||
`kafka_cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Kafka集群ID',
|
||||
`name` varchar(128) NOT NULL DEFAULT '' COMMENT '集群名称',
|
||||
`group_name` varchar(128) NOT NULL DEFAULT '' COMMENT '集群Group名称',
|
||||
`cluster_url` varchar(1024) NOT NULL DEFAULT '' COMMENT '集群地址',
|
||||
`member_leader_url` varchar(1024) NOT NULL DEFAULT '' COMMENT 'URL地址',
|
||||
`version` varchar(64) NOT NULL DEFAULT '' COMMENT 'connect版本',
|
||||
`jmx_properties` text COMMENT 'JMX配置',
|
||||
`state` tinyint(4) NOT NULL DEFAULT '1' COMMENT '集群使用的消费组状态,也表示集群状态:-1 Unknown,0 ReBalance,1 Active,2 Dead,3 Empty',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '接入时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_id_group_name` (`id`,`group_name`),
|
||||
UNIQUE KEY `uniq_name_kafka_cluster` (`name`,`kafka_cluster_phy_id`),
|
||||
KEY `idx_kafka_cluster_phy_id` (`kafka_cluster_phy_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Connect集群信息表';
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_kc_connector`;
|
||||
CREATE TABLE `ks_kc_connector` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`kafka_cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Kafka集群ID',
|
||||
`connect_cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Connect集群ID',
|
||||
`connector_name` varchar(512) NOT NULL DEFAULT '' COMMENT 'Connector名称',
|
||||
`connector_class_name` varchar(512) NOT NULL DEFAULT '' COMMENT 'Connector类',
|
||||
`connector_type` varchar(32) NOT NULL DEFAULT '' COMMENT 'Connector类型',
|
||||
`state` varchar(45) NOT NULL DEFAULT '' COMMENT '状态',
|
||||
`topics` text COMMENT '访问过的Topics',
|
||||
`task_count` int(11) NOT NULL DEFAULT '0' COMMENT '任务数',
|
||||
`heartbeat_connector_name` varchar(512) DEFAULT '' COMMENT '心跳检测connector名称',
|
||||
`checkpoint_connector_name` varchar(512) DEFAULT '' COMMENT '进度确认connector名称',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_connect_cluster_id_connector_name` (`connect_cluster_id`,`connector_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Connector信息表';
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_kc_worker`;
|
||||
CREATE TABLE `ks_kc_worker` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`kafka_cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Kafka集群ID',
|
||||
`connect_cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Connect集群ID',
|
||||
`member_id` varchar(512) NOT NULL DEFAULT '' COMMENT '成员ID',
|
||||
`host` varchar(128) NOT NULL DEFAULT '' COMMENT '主机名',
|
||||
`jmx_port` int(16) NOT NULL DEFAULT '-1' COMMENT 'Jmx端口',
|
||||
`url` varchar(1024) NOT NULL DEFAULT '' COMMENT 'URL信息',
|
||||
`leader_url` varchar(1024) NOT NULL DEFAULT '' COMMENT 'leaderURL信息',
|
||||
`leader` int(16) NOT NULL DEFAULT '0' COMMENT '状态: 1是leader,0不是leader',
|
||||
`worker_id` varchar(128) NOT NULL COMMENT 'worker地址',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_id_member_id` (`connect_cluster_id`,`member_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='worker信息表';
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_kc_worker_connector`;
|
||||
CREATE TABLE `ks_kc_worker_connector` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`kafka_cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Kafka集群ID',
|
||||
`connect_cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Connect集群ID',
|
||||
`connector_name` varchar(512) NOT NULL DEFAULT '' COMMENT 'Connector名称',
|
||||
`worker_member_id` varchar(256) NOT NULL DEFAULT '',
|
||||
`task_id` int(16) NOT NULL DEFAULT '-1' COMMENT 'Task的ID',
|
||||
`state` varchar(128) DEFAULT NULL COMMENT '任务状态',
|
||||
`worker_id` varchar(128) DEFAULT NULL COMMENT 'worker信息',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_relation` (`connect_cluster_id`,`connector_name`,`task_id`,`worker_member_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Worker和Connector关系表';
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `ks_ha_active_standby_relation`;
|
||||
CREATE TABLE `ks_ha_active_standby_relation` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`active_cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '主集群ID',
|
||||
`standby_cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '备集群ID',
|
||||
`res_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '资源名称',
|
||||
`res_type` int(11) NOT NULL DEFAULT '-1' COMMENT '资源类型,0:集群,1:镜像Topic,2:主备Topic',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uniq_cluster_res` (`res_type`,`active_cluster_phy_id`,`standby_cluster_phy_id`,`res_name`),
|
||||
UNIQUE KEY `uniq_res_type_standby_cluster_res_name` (`res_type`,`standby_cluster_phy_id`,`res_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='HA主备关系表';
|
||||
119
km-persistence/src/main/resources/sql/ddl-logi-job.sql
Normal file
119
km-persistence/src/main/resources/sql/ddl-logi-job.sql
Normal file
@@ -0,0 +1,119 @@
|
||||
-- Logi-Job模块的sql,安装KS-KM需要执行该sql
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `logi_job`;
|
||||
CREATE TABLE `logi_job` (
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT,
|
||||
`job_code` varchar(100) NOT NULL DEFAULT '' COMMENT 'task taskCode',
|
||||
`task_code` varchar(255) NOT NULL DEFAULT '' COMMENT '任务code',
|
||||
`class_name` varchar(255) NOT NULL DEFAULT '' COMMENT '类的全限定名',
|
||||
`try_times` int(10) NOT NULL DEFAULT '0' COMMENT '第几次重试',
|
||||
`worker_code` varchar(200) NOT NULL DEFAULT '' COMMENT '执行机器',
|
||||
`app_name` varchar(100) NOT NULL DEFAULT '' COMMENT '被调度的应用名称',
|
||||
`start_time` datetime DEFAULT '1971-01-01 00:00:00' COMMENT '开始时间',
|
||||
`create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `job_code` (`job_code`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='正在执行的job信息';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `logi_job_log`;
|
||||
CREATE TABLE `logi_job_log` (
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT,
|
||||
`job_code` varchar(100) NOT NULL DEFAULT '' COMMENT 'job taskCode',
|
||||
`task_code` varchar(255) NOT NULL DEFAULT '' COMMENT '任务code',
|
||||
`task_name` varchar(255) NOT NULL DEFAULT '' COMMENT '任务名称',
|
||||
`task_desc` varchar(255) NOT NULL DEFAULT '' COMMENT '任务描述',
|
||||
`task_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '任务id',
|
||||
`class_name` varchar(255) NOT NULL DEFAULT '' COMMENT '类的全限定名',
|
||||
`try_times` int(10) NOT NULL DEFAULT '0' COMMENT '第几次重试',
|
||||
`worker_code` varchar(200) NOT NULL DEFAULT '' COMMENT '执行机器',
|
||||
`worker_ip` varchar(200) NOT NULL DEFAULT '' COMMENT '执行机器ip',
|
||||
`start_time` datetime DEFAULT '1971-01-01 00:00:00' COMMENT '开始时间',
|
||||
`end_time` datetime DEFAULT '1971-01-01 00:00:00' COMMENT '结束时间',
|
||||
`status` tinyint(4) NOT NULL DEFAULT '0' COMMENT '执行结果 1成功 2失败 3取消',
|
||||
`error` text NOT NULL COMMENT '错误信息',
|
||||
`result` text NOT NULL COMMENT '执行结果',
|
||||
`app_name` varchar(100) NOT NULL DEFAULT '' COMMENT '被调度的应用名称',
|
||||
`create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `index_job_code` (`job_code`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='job执行历史日志';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `logi_task`;
|
||||
CREATE TABLE `logi_task` (
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT,
|
||||
`task_code` varchar(100) NOT NULL DEFAULT '' COMMENT 'task taskCode',
|
||||
`task_name` varchar(255) NOT NULL DEFAULT '' COMMENT '名称',
|
||||
`task_desc` varchar(1000) NOT NULL DEFAULT '' COMMENT '任务描述',
|
||||
`cron` varchar(100) NOT NULL DEFAULT '' COMMENT 'cron 表达式',
|
||||
`class_name` varchar(255) NOT NULL DEFAULT '' COMMENT '类的全限定名',
|
||||
`params` varchar(1000) NOT NULL DEFAULT '' COMMENT '执行参数 map 形式{key1:value1,key2:value2}',
|
||||
`retry_times` int(10) NOT NULL DEFAULT '0' COMMENT '允许重试次数',
|
||||
`last_fire_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '上次执行时间',
|
||||
`timeout` bigint(20) NOT NULL DEFAULT '0' COMMENT '超时 毫秒',
|
||||
`status` tinyint(4) NOT NULL DEFAULT '0' COMMENT '1等待 2运行中 3暂停',
|
||||
`sub_task_codes` varchar(1000) NOT NULL DEFAULT '' COMMENT '子任务code列表,逗号分隔',
|
||||
`consensual` varchar(200) NOT NULL DEFAULT '' COMMENT '执行策略',
|
||||
`owner` varchar(200) NOT NULL DEFAULT '' COMMENT '责任人',
|
||||
`task_worker_str` varchar(3000) NOT NULL DEFAULT '' COMMENT '机器执行信息',
|
||||
`app_name` varchar(100) NOT NULL DEFAULT '' COMMENT '被调度的应用名称',
|
||||
`create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `task_code` (`task_code`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='任务信息';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `logi_task_lock`;
|
||||
CREATE TABLE `logi_task_lock` (
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT,
|
||||
`task_code` varchar(100) NOT NULL DEFAULT '' COMMENT 'task taskCode',
|
||||
`worker_code` varchar(100) NOT NULL DEFAULT '' COMMENT 'worker taskCode',
|
||||
`app_name` varchar(100) NOT NULL DEFAULT '' COMMENT '被调度的应用名称',
|
||||
`expire_time` bigint(20) NOT NULL DEFAULT '0' COMMENT '过期时间',
|
||||
`create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='任务锁';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `logi_worker`;
|
||||
CREATE TABLE `logi_worker` (
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT,
|
||||
`worker_code` varchar(100) NOT NULL DEFAULT '' COMMENT 'worker taskCode',
|
||||
`worker_name` varchar(100) NOT NULL DEFAULT '' COMMENT 'worker名',
|
||||
`ip` varchar(100) NOT NULL DEFAULT '' COMMENT 'worker的ip',
|
||||
`cpu` int(11) NOT NULL DEFAULT '0' COMMENT 'cpu数量',
|
||||
`cpu_used` double NOT NULL DEFAULT '0' COMMENT 'cpu使用率',
|
||||
`memory` double NOT NULL DEFAULT '0' COMMENT '内存,以M为单位',
|
||||
`memory_used` double NOT NULL DEFAULT '0' COMMENT '内存使用率',
|
||||
`jvm_memory` double NOT NULL DEFAULT '0' COMMENT 'jvm堆大小,以M为单位',
|
||||
`jvm_memory_used` double NOT NULL DEFAULT '0' COMMENT 'jvm堆使用率',
|
||||
`job_num` int(10) NOT NULL DEFAULT '0' COMMENT '正在执行job数',
|
||||
`heartbeat` datetime DEFAULT '1971-01-01 00:00:00' COMMENT '心跳时间',
|
||||
`app_name` varchar(100) NOT NULL DEFAULT '' COMMENT '被调度的应用名称',
|
||||
`create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `worker_code` (`worker_code`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='worker信息';
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `logi_worker_blacklist`;
|
||||
CREATE TABLE `logi_worker_blacklist` (
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT,
|
||||
`worker_code` varchar(100) NOT NULL DEFAULT '' COMMENT 'worker taskCode',
|
||||
`create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `worker_code` (`worker_code`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='worker黑名单列表';
|
||||
206
km-persistence/src/main/resources/sql/ddl-logi-security.sql
Normal file
206
km-persistence/src/main/resources/sql/ddl-logi-security.sql
Normal file
@@ -0,0 +1,206 @@
|
||||
#-----------------------创建表-----------------------
|
||||
DROP TABLE IF EXISTS `logi_security_dept`;
|
||||
CREATE TABLE `logi_security_dept`
|
||||
(
|
||||
id int auto_increment primary key,
|
||||
dept_name varchar(10) not null comment '部门名',
|
||||
parent_id int not null comment '父部门id',
|
||||
leaf tinyint(1) not null comment '是否叶子部门',
|
||||
level tinyint not null comment 'parentId为0的层级为1',
|
||||
description varchar(20) null comment '描述',
|
||||
create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间',
|
||||
update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间',
|
||||
is_delete tinyint(1) default 0 null comment '逻辑删除',
|
||||
app_name varchar(16) null comment '应用名称'
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '部门信息表';
|
||||
|
||||
DROP TABLE IF EXISTS `logi_security_message`;
|
||||
CREATE TABLE `logi_security_message`
|
||||
(
|
||||
id int auto_increment primary key,
|
||||
title varchar(60) not null comment '标题',
|
||||
content varchar(256) null comment '内容',
|
||||
read_tag tinyint(1) default 0 null comment '是否已读',
|
||||
oplog_id int null comment '操作日志id',
|
||||
user_id int null comment '这条消息属于哪个用户的,用户id',
|
||||
create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间',
|
||||
update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间',
|
||||
is_delete tinyint(1) default 0 null comment '逻辑删除',
|
||||
app_name varchar(16) null comment '应用名称'
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '消息中心';
|
||||
|
||||
DROP TABLE IF EXISTS `logi_security_oplog`;
|
||||
CREATE TABLE `logi_security_oplog`
|
||||
(
|
||||
id int auto_increment primary key,
|
||||
operator_ip varchar(64) not null comment '操作者ip',
|
||||
operator varchar(64) null comment '操作者账号',
|
||||
operate_page varchar(64) not null default '' comment '操作页面',
|
||||
operate_type varchar(64) not null comment '操作类型',
|
||||
target_type varchar(64) not null comment '对象分类',
|
||||
target varchar(1024) not null comment '操作对象',
|
||||
operation_methods varchar(64) not null default '' comment '操作方式',
|
||||
detail text null comment '日志详情',
|
||||
create_time timestamp default CURRENT_TIMESTAMP null,
|
||||
update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间',
|
||||
is_delete tinyint(1) default 0 not null comment '逻辑删除',
|
||||
app_name varchar(16) null comment '应用名称'
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '操作日志';
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `logi_security_oplog_extra`;
|
||||
CREATE TABLE `logi_security_oplog_extra`
|
||||
(
|
||||
id int auto_increment primary key,
|
||||
info varchar(16) null comment '信息',
|
||||
type tinyint not null comment '哪种信息:1:操作页面;2:操作类型;3:对象分类',
|
||||
create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间',
|
||||
update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间',
|
||||
is_delete tinyint(1) default 0 null comment '逻辑删除',
|
||||
app_name varchar(16) null comment '应用名称'
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '操作日志信息(操作页面、操作类型、对象分类)';
|
||||
|
||||
DROP TABLE IF EXISTS `logi_security_permission`;
|
||||
CREATE TABLE `logi_security_permission`
|
||||
(
|
||||
id int auto_increment primary key,
|
||||
permission_name varchar(40) not null comment '权限名字',
|
||||
parent_id int not null comment '父权限id',
|
||||
leaf tinyint(1) not null comment '是否叶子权限点(具体的操作)',
|
||||
level tinyint not null comment '权限点的层级(parentId为0的层级为1)',
|
||||
description varchar(64) null comment '权限点描述',
|
||||
create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间',
|
||||
update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间',
|
||||
is_delete tinyint(1) default 0 null comment '逻辑删除',
|
||||
app_name varchar(16) null comment '应用名称'
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '权限表';
|
||||
|
||||
DROP TABLE IF EXISTS `logi_security_project`;
|
||||
CREATE TABLE `logi_security_project`
|
||||
(
|
||||
id int auto_increment comment '项目id' primary key,
|
||||
project_code varchar(128) not null comment '项目编号',
|
||||
project_name varchar(128) not null comment '项目名',
|
||||
description varchar(512) default '' not null comment '项目描述',
|
||||
dept_id int not null comment '部门id',
|
||||
running tinyint(1) default 1 not null comment '启用 or 停用',
|
||||
create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间',
|
||||
update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间',
|
||||
is_delete tinyint(1) default 0 not null comment '逻辑删除',
|
||||
app_name varchar(16) null comment '应用名称'
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '项目表';
|
||||
|
||||
DROP TABLE IF EXISTS `logi_security_resource_type`;
|
||||
CREATE TABLE `logi_security_resource_type`
|
||||
(
|
||||
id int auto_increment primary key,
|
||||
type_name varchar(16) null comment '资源类型名',
|
||||
create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间',
|
||||
update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间',
|
||||
is_delete tinyint(1) default 0 not null comment '逻辑删除',
|
||||
app_name varchar(16) null comment '应用名称'
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '资源类型表';
|
||||
|
||||
DROP TABLE IF EXISTS `logi_security_role`;
|
||||
CREATE TABLE `logi_security_role`
|
||||
(
|
||||
id int auto_increment primary key,
|
||||
role_code varchar(128) not null comment '角色编号',
|
||||
role_name varchar(128) not null comment '名称',
|
||||
description varchar(128) null comment '角色描述',
|
||||
last_reviser varchar(30) null comment '最后修改人',
|
||||
create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间',
|
||||
update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间',
|
||||
is_delete tinyint(1) default 0 not null comment '逻辑删除',
|
||||
app_name varchar(16) null comment '应用名称'
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '角色信息';
|
||||
|
||||
DROP TABLE IF EXISTS `logi_security_role_permission`;
|
||||
CREATE TABLE `logi_security_role_permission`
|
||||
(
|
||||
id int auto_increment primary key,
|
||||
role_id int not null comment '角色id',
|
||||
permission_id int not null comment '权限id',
|
||||
create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间',
|
||||
update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间',
|
||||
is_delete tinyint(1) default 0 not null comment '逻辑删除',
|
||||
app_name varchar(16) null comment '应用名称'
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '角色权限表(只保留叶子权限与角色关系)';
|
||||
|
||||
DROP TABLE IF EXISTS `logi_security_user`;
|
||||
CREATE TABLE `logi_security_user`
|
||||
(
|
||||
id int auto_increment primary key,
|
||||
user_name varchar(64) not null comment '用户账号',
|
||||
pw varchar(2048) not null comment '用户密码',
|
||||
salt char(5) default '' not null comment '密码盐',
|
||||
real_name varchar(128) default '' not null comment '真实姓名',
|
||||
phone char(20) default '' not null comment 'mobile',
|
||||
email varchar(30) default '' not null comment 'email',
|
||||
dept_id int null comment '所属部门id',
|
||||
is_delete tinyint(1) default 0 not null comment '逻辑删除',
|
||||
create_time timestamp default CURRENT_TIMESTAMP null comment '注册时间',
|
||||
update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间',
|
||||
app_name varchar(16) null comment '应用名称'
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '用户信息';
|
||||
|
||||
DROP TABLE IF EXISTS `logi_security_user_project`;
|
||||
CREATE TABLE `logi_security_user_project`
|
||||
(
|
||||
id int auto_increment primary key,
|
||||
user_id int not null comment '用户id',
|
||||
project_id int not null comment '项目id',
|
||||
create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间',
|
||||
update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间',
|
||||
is_delete tinyint(1) default 0 not null comment '逻辑删除',
|
||||
app_name varchar(16) null comment '应用名称'
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '用户项目关系表(项目负责人)';
|
||||
|
||||
DROP TABLE IF EXISTS `logi_security_user_resource`;
|
||||
CREATE TABLE `logi_security_user_resource`
|
||||
(
|
||||
id int auto_increment primary key,
|
||||
user_id int not null comment '用户id',
|
||||
project_id int not null comment '资源所属项目id',
|
||||
resource_type_id int not null comment '资源类别id',
|
||||
resource_id int not null comment '资源id',
|
||||
control_level tinyint not null comment '管理级别:1(查看权限)2(管理权限)',
|
||||
create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间',
|
||||
update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间',
|
||||
is_delete tinyint(1) default 0 not null comment '逻辑删除',
|
||||
app_name varchar(16) null comment '应用名称'
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '用户和资源关系表';
|
||||
|
||||
DROP TABLE IF EXISTS `logi_security_user_role`;
|
||||
CREATE TABLE `logi_security_user_role`
|
||||
(
|
||||
id int auto_increment primary key,
|
||||
user_id int not null comment '用户id',
|
||||
role_id int not null comment '角色id',
|
||||
create_time timestamp default CURRENT_TIMESTAMP null comment '创建时间',
|
||||
update_time timestamp default CURRENT_TIMESTAMP null on update CURRENT_TIMESTAMP comment '更新时间',
|
||||
is_delete tinyint(1) default 0 not null comment '逻辑删除',
|
||||
app_name varchar(16) null comment '应用名称'
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 comment '用户角色表';
|
||||
|
||||
-- ----------------------------
|
||||
-- Table structure for logi_config
|
||||
-- ----------------------------
|
||||
DROP TABLE IF EXISTS `logi_security_config`;
|
||||
CREATE TABLE `logi_security_config`
|
||||
(
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键自增',
|
||||
`value_group` varchar(100) NOT NULL DEFAULT '' COMMENT '配置项组',
|
||||
`value_name` varchar(100) NOT NULL DEFAULT '' COMMENT '配置项名字',
|
||||
`value` text COMMENT '配置项的值',
|
||||
`edit` int(4) NOT NULL DEFAULT '1' COMMENT '是否可以编辑 1 不可编辑(程序获取) 2 可编辑',
|
||||
`status` int(4) NOT NULL DEFAULT '1' COMMENT '1 正常 2 禁用',
|
||||
`memo` varchar(1000) NOT NULL DEFAULT '' COMMENT '备注',
|
||||
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
`is_delete` tinyint(1) NOT NULL DEFAULT '0' COMMENT '逻辑删除',
|
||||
`app_name` varchar(16) COLLATE utf8_bin DEFAULT NULL COMMENT '应用名称',
|
||||
`operator` varchar(16) COLLATE utf8_bin DEFAULT NULL COMMENT '操作者',
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `idx_group_name` (`value_group`,`value_name`)
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 COMMENT='logi配置项';
|
||||
23
km-persistence/src/main/resources/sql/dml-ks-km.sql
Normal file
23
km-persistence/src/main/resources/sql/dml-ks-km.sql
Normal file
@@ -0,0 +1,23 @@
|
||||
-- 检查检查配置
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_CLUSTER_NO_CONTROLLER','{ \"value\": 1, \"weight\": 30 } ','集群Controller数正常','know-streaming');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_BROKER_REQUEST_QUEUE_FULL','{ \"value\": 10, \"weight\": 20 } ','Broker-RequestQueueSize指标','know-streaming');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_BROKER_NETWORK_PROCESSOR_AVG_IDLE_TOO_LOW','{ \"value\": 0.8, \"weight\": 20 } ','Broker-NetworkProcessorAvgIdlePercent指标','know-streaming');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_GROUP_RE_BALANCE_TOO_FREQUENTLY','{\n \"latestMinutes\": 10,\n \"detectedTimes\": 8,\n \"weight\": 10\n}\n','Group的re-balance频率','know-streaming');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_TOPIC_NO_LEADER','{ \"value\": 1, \"weight\": 10 } ','Topic 无Leader数','know-stream');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_TOPIC_UNDER_REPLICA_TOO_LONG','{ \"latestMinutes\": 10, \"detectedTimes\": 8, \"weight\": 10 } ','Topic 未同步持续时间','know-streaming');
|
||||
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_BRAIN_SPLIT', '{ \"value\": 1} ', 'ZK 脑裂', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_OUTSTANDING_REQUESTS', '{ \"amount\": 100, \"ratio\":0.8} ', 'ZK Outstanding 请求堆积数', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_WATCH_COUNT', '{ \"amount\": 100000, \"ratio\": 0.8 } ', 'ZK WatchCount 数', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_ALIVE_CONNECTIONS', '{ \"amount\": 10000, \"ratio\": 0.8 } ', 'ZK 连接数', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_APPROXIMATE_DATA_SIZE', '{ \"amount\": 524288000, \"ratio\": 0.8 } ', 'ZK 数据大小(Byte)', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_SENT_RATE', '{ \"amount\": 500000, \"ratio\": 0.8 } ', 'ZK 发包数', 'admin');
|
||||
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_CONNECTOR_FAILED_TASK_COUNT', '{\"value\" : 1}', 'Connector失败状态的任务数量', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_CONNECTOR_UNASSIGNED_TASK_COUNT', '{\"value\" : 1}', 'Connector未被分配的任务数量', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_CONNECT_CLUSTER_TASK_STARTUP_FAILURE_PERCENTAGE', '{\"value\" : 0.05}', 'Connect集群任务启动失败概率', 'admin');
|
||||
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_MIRROR_MAKER_TOTAL_RECORD_ERRORS', '{\"value\" : 1}', 'MirrorMaker消息处理错误的次数', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_MIRROR_MAKER_REPLICATION_LATENCY_MS_MAX', '{\"value\" : 6000}', 'MirrorMaker消息复制最大延迟时间', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_MIRROR_MAKER_UNASSIGNED_TASK_COUNT', '{\"value\" : 20}', 'MirrorMaker未被分配的任务数量', 'admin');
|
||||
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_MIRROR_MAKER_FAILED_TASK_COUNT', '{\"value\" : 10}', 'MirrorMaker失败状态的任务数量', 'admin');
|
||||
121
km-persistence/src/main/resources/sql/dml-logi.sql
Normal file
121
km-persistence/src/main/resources/sql/dml-logi.sql
Normal file
@@ -0,0 +1,121 @@
|
||||
-- 初始化权限
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1593', '多集群管理', '0', '0', '1', '多集群管理', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1595', '系统管理', '0', '0', '1', '系统管理', '0', 'know-streaming');
|
||||
|
||||
-- 多集群管理权限
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1597', '接入集群', '1593', '1', '2', '接入集群', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1599', '删除集群', '1593', '1', '2', '删除集群', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1601', 'Cluster-修改集群信息', '1593', '1', '2', 'Cluster-修改集群信息', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1603', 'Cluster-修改健康规则', '1593', '1', '2', 'Cluster-修改健康规则', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1605', 'Broker-修改Broker配置', '1593', '1', '2', 'Broker-修改Broker配置', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1607', 'Topic-新增Topic', '1593', '1', '2', 'Topic-新增Topic', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1609', 'Topic-扩分区', '1593', '1', '2', 'Topic-扩分区', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1611', 'Topic-删除Topic', '1593', '1', '2', 'Topic-扩分区', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1613', 'Topic-重置Offset', '1593', '1', '2', 'Topic-重置Offset', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1615', 'Topic-修改Topic配置', '1593', '1', '2', 'Topic-修改Topic配置', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1617', 'Consumers-重置Offset', '1593', '1', '2', 'Consumers-重置Offset', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1619', 'Test-Producer', '1593', '1', '2', 'Test-Producer', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1621', 'Test-Consumer', '1593', '1', '2', 'Test-Consumer', '0', 'know-streaming');
|
||||
|
||||
-- 系统管理权限
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1623', '配置管理-新增配置', '1595', '1', '2', '配置管理-新增配置', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1625', '配置管理-编辑配置', '1595', '1', '2', '配置管理-编辑配置', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1627', '配置管理-删除配置', '1595', '1', '2', '配置管理-删除配置', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1629', '用户管理-新增人员', '1595', '1', '2', '用户管理-新增人员', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1631', '用户管理-编辑人员', '1595', '1', '2', '用户管理-编辑人员', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1633', '用户管理-修改人员密码', '1595', '1', '2', '用户管理-修改人员密码', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1635', '用户管理-删除人员', '1595', '1', '2', '用户管理-删除人员', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1637', '用户管理-新增角色', '1595', '1', '2', '用户管理-新增角色', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1639', '用户管理-编辑角色', '1595', '1', '2', '用户管理-编辑角色', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1641', '用户管理-分配用户角色', '1595', '1', '2', '用户管理-分配用户角色', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1643', '用户管理-删除角色', '1595', '1', '2', '用户管理-删除角色', '0', 'know-streaming');
|
||||
|
||||
-- 多集群管理权限2022-09-06新增
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2000', '多集群管理查看', '1593', '1', '2', '多集群管理查看', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2002', 'Topic-迁移副本', '1593', '1', '2', 'Topic-迁移副本', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2004', 'Topic-扩缩副本', '1593', '1', '2', 'Topic-扩缩副本', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2006', 'Cluster-LoadReBalance-周期均衡', '1593', '1', '2', 'Cluster-LoadReBalance-周期均衡', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2008', 'Cluster-LoadReBalance-立即均衡', '1593', '1', '2', 'Cluster-LoadReBalance-立即均衡', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2010', 'Cluster-LoadReBalance-设置集群规格', '1593', '1', '2', 'Cluster-LoadReBalance-设置集群规格', '0', 'know-streaming');
|
||||
|
||||
|
||||
-- 系统管理权限2022-09-06新增
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('3000', '系统管理查看', '1595', '1', '2', '系统管理查看', '0', 'know-streaming');
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
-- 初始化用户
|
||||
-- INSERT INTO `logi_security_user` (`id`, `user_name`, `pw`, `real_name`, `is_delete`, `app_name`) VALUES ('1', 'admin', 'V1ZkU2RHRlhOSGxOUkVsNVdETjBRVlp0Y0V0T1IwWnlaVEZ6YWxGRVJrRkpNVEU1VTJwYVUySkhlRzlSU0RBOWUwQldha28wWVd0N1d5TkFNa0FqWFgxS05sSnNiR2hBZlE9PXtAVmpKNGFre1sjQDNAI119SjZSbGxoQH0=Mv{#cdRgJ45Lqx}3IubEW87!==', '系统管理员', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_user` (`id`, `user_name`, `pw`, `real_name`, `is_delete`, `app_name`) VALUES ('1', 'admin', 'V1ZkU2RHRlhOVGRSUmxweFUycFNhR0V6ZEdKSk1FRjRVVU5PWkdaVmJ6SlZiWGh6WVVWQ09YdEFWbXBLTkdGcmUxc2pRREpBSTExOVNqWlNiR3hvUUgwPXtAVmpKNGFre1sjQDNAI119SjZSbGxoQH0=Mv{#cdRgJ45Lqx}3IubEW87!==', '系统管理员', '0', 'know-streaming');
|
||||
|
||||
-- 初始化角色
|
||||
INSERT INTO `logi_security_role` (`id`, `role_code`, `role_name`, `description`, `last_reviser`, `is_delete`, `app_name`) VALUES ('1677', 'r15477137', '管理员角色', '包含系统所有权限', 'admin', '0', 'know-streaming');
|
||||
|
||||
-- 初始化角色权限关系
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1597', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1599', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1601', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1603', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1605', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1607', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1609', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1611', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1613', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1615', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1617', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1619', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1621', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1593', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1623', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1625', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1627', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1629', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1631', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1633', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1635', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1637', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1639', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1641', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1643', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '1595', '0', 'know-streaming');
|
||||
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2000', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2002', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2004', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2006', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2008', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2010', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '3000', '0', 'know-streaming');
|
||||
|
||||
-- 初始化 用户角色关系
|
||||
INSERT INTO `logi_security_user_role` (`id`, `user_id`, `role_id`, `is_delete`, `app_name`) VALUES ('1', '1', '1677', '0', 'know-streaming');
|
||||
|
||||
INSERT INTO `logi_security_config`
|
||||
(`value_group`,`value_name`,`value`,`edit`,`status`,`memo`,`is_delete`,`app_name`,`operator`)
|
||||
VALUES
|
||||
('SECURITY.LOGIN','SECURITY.TRICK_USERS','[\n \"admin\"\n]',1,1,'允许跳过登录的用户',0,'know-streaming','admin');
|
||||
|
||||
|
||||
-- 多集群管理权限2023-01-05新增
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2012', 'Topic-新增Topic复制', '1593', '1', '2', 'Topic-新增Topic复制', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2014', 'Topic-详情-取消Topic复制', '1593', '1', '2', 'Topic-详情-取消Topic复制', '0', 'know-streaming');
|
||||
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2012', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2014', '0', 'know-streaming');
|
||||
|
||||
|
||||
-- 多集群管理权限2023-01-18新增
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2016', 'MM2-新增', '1593', '1', '2', 'MM2-新增', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2018', 'MM2-编辑', '1593', '1', '2', 'MM2-编辑', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2020', 'MM2-删除', '1593', '1', '2', 'MM2-删除', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2022', 'MM2-重启', '1593', '1', '2', 'MM2-重启', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2024', 'MM2-暂停&恢复', '1593', '1', '2', 'MM2-暂停&恢复', '0', 'know-streaming');
|
||||
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2016', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2018', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2020', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2022', '0', 'know-streaming');
|
||||
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2024', '0', 'know-streaming');
|
||||
Reference in New Issue
Block a user