[Optimize]统一DB元信息更新格式-Part1 (#1125)

1、引入KafkaMetaService;
2、将Connector的更新按照KafkaMetaService进行更新;
3、简化Connect-MirrorMaker的关联逻辑;
4、GroupService创建的AdminClient中的ClientID增加时间戳,减少Mbean冲突;
This commit is contained in:
EricZeng
2023-08-15 14:24:23 +08:00
committed by GitHub
parent a6abfb3ea8
commit 6e56688a31
12 changed files with 658 additions and 544 deletions

View File

@@ -0,0 +1,44 @@
package com.xiaojukeji.know.streaming.km.common.bean.entity.meta;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
* Kafka元信息服务接口
*/
public interface KafkaMetaService<T> {
/**
* 从Kafka中获取数据
* @param connectCluster connect集群
* @return 全部资源列表, 成功的资源列表
*/
default Result<Tuple<Set<String>, List<T>>> getDataFromKafka(ConnectCluster connectCluster) { return Result.buildSuc(new Tuple<>(new HashSet<>(), new ArrayList<>())); }
/**
* 从Kafka中获取数据
* @param clusterPhy kafka集群
* @return 全部资源集合, 成功的资源列表
*/
default Result<Tuple<Set<String>, List<T>>> getDataFromKafka(ClusterPhy clusterPhy) { return Result.buildSuc(new Tuple<>(new HashSet<>(), new ArrayList<>())); }
/**
* 元信息同步至DB中
* @param clusterId 集群ID
* @param fullNameSet 全部资源列表
* @param dataList 成功的资源列表
*/
default void writeToDB(Long clusterId, Set<String> fullNameSet, List<T> dataList) {}
/**
* 依据kafka集群ID删除数据
* @param clusterPhyId kafka集群ID
*/
default int deleteInDBByKafkaClusterId(Long clusterPhyId) { return 0; }
}

View File

@@ -16,6 +16,8 @@ import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiL
import com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant;
import com.xiaojukeji.know.streaming.km.common.utils.CommonUtils;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.Triple;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import java.util.ArrayList;
import java.util.HashMap;
@@ -24,6 +26,9 @@ import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant.MIRROR_MAKER_SOURCE_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME;
import static com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant.MIRROR_MAKER_TARGET_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME;
public class ConnectConverter {
public static ConnectorBasicCombineExistVO convert2BasicVO(ConnectCluster connectCluster, ConnectorPO connectorPO) {
ConnectorBasicCombineExistVO vo = new ConnectorBasicCombineExistVO();
@@ -153,6 +158,66 @@ public class ConnectConverter {
return ksConnector;
}
public static List<KSConnector> convertAndSupplyMirrorMakerInfo(ConnectCluster connectCluster, List<Triple<KSConnectorInfo, List<String>, KSConnectorStateInfo>> connectorFullInfoList) {
// <connectorName, targetBootstrapServers + "@" + sourceBootstrapServers>
Map<String, String> sourceMap = new HashMap<>();
// <targetBootstrapServers + "@" + sourceBootstrapServers, connectorName>
Map<String, String> heartbeatMap = new HashMap<>();
Map<String, String> checkpointMap = new HashMap<>();
// 获取每个类型的connector的map信息
connectorFullInfoList.forEach(connector -> {
Map<String, String> mm2Map = null;
if (KafkaConnectConstant.MIRROR_MAKER_SOURCE_CONNECTOR_TYPE.equals(connector.v1().getConfig().get(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME))) {
mm2Map = sourceMap;
} else if (KafkaConnectConstant.MIRROR_MAKER_HEARTBEAT_CONNECTOR_TYPE.equals(connector.v1().getConfig().get(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME))) {
mm2Map = heartbeatMap;
} else if (KafkaConnectConstant.MIRROR_MAKER_CHECKPOINT_CONNECTOR_TYPE.equals(connector.v1().getConfig().get(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME))) {
mm2Map = checkpointMap;
}
String targetBootstrapServers = connector.v1().getConfig().get(MIRROR_MAKER_TARGET_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME);
String sourceBootstrapServers = connector.v1().getConfig().get(MIRROR_MAKER_SOURCE_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME);
if (ValidateUtils.anyBlank(targetBootstrapServers, sourceBootstrapServers) || mm2Map == null) {
return;
}
if (KafkaConnectConstant.MIRROR_MAKER_SOURCE_CONNECTOR_TYPE.equals(connector.v1().getConfig().get(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME))) {
// source 类型的格式和 heartbeat & checkpoint 的不一样
mm2Map.put(connector.v1().getName(), targetBootstrapServers + "@" + sourceBootstrapServers);
} else {
mm2Map.put(targetBootstrapServers + "@" + sourceBootstrapServers, connector.v1().getName());
}
});
List<KSConnector> connectorList = new ArrayList<>();
connectorFullInfoList.forEach(connector -> {
// 转化并添加到list中
KSConnector ksConnector = ConnectConverter.convert2KSConnector(
connectCluster.getKafkaClusterPhyId(),
connectCluster.getId(),
connector.v1(),
connector.v3(),
connector.v2()
);
connectorList.add(ksConnector);
// 补充mm2信息
String targetAndSource = sourceMap.get(ksConnector.getConnectorName());
if (ValidateUtils.isBlank(targetAndSource)) {
return;
}
ksConnector.setHeartbeatConnectorName(heartbeatMap.getOrDefault(targetAndSource, ""));
ksConnector.setCheckpointConnectorName(checkpointMap.getOrDefault(targetAndSource, ""));
});
return connectorList;
}
private static String genConnectorKey(Long connectorId, String connectorName){
return connectorId + "#" + connectorName;
}