[Optimize]统一DB元信息更新格式-Part1 (#1125)

1、引入KafkaMetaService;
2、将Connector的更新按照KafkaMetaService进行更新;
3、简化Connect-MirrorMaker的关联逻辑;
4、GroupService创建的AdminClient中的ClientID增加时间戳,减少Mbean冲突;
This commit is contained in:
EricZeng
2023-08-15 14:24:23 +08:00
committed by GitHub
parent a6abfb3ea8
commit 6e56688a31
12 changed files with 658 additions and 544 deletions

View File

@@ -4,49 +4,30 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluste
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorStateInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.meta.KafkaMetaService;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum;
import java.util.List;
import java.util.Properties;
import java.util.Set;
/**
* 查看Connector
*/
public interface ConnectorService {
Result<KSConnectorInfo> createConnector(Long connectClusterId, String connectorName, Properties configs, String operator);
public interface ConnectorService extends KafkaMetaService<KSConnector> {
/**
* 获取所有的连接器名称列表
*/
Result<List<String>> listConnectorsFromCluster(Long connectClusterId);
Result<List<String>> listConnectorsFromCluster(ConnectCluster connectCluster);
/**
* 获取单个连接器信息
*/
Result<KSConnectorInfo> getConnectorInfoFromCluster(Long connectClusterId, String connectorName);
Result<List<String>> getConnectorTopicsFromCluster(Long connectClusterId, String connectorName);
Result<KSConnectorStateInfo> getConnectorStateInfoFromCluster(Long connectClusterId, String connectorName);
Result<KSConnector> getAllConnectorInfoFromCluster(Long connectClusterId, String connectorName);
Result<Void> resumeConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> restartConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> stopConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> deleteConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> updateConnectorConfig(Long connectClusterId, String connectorName, Properties configs, String operator);
void batchReplace(Long kafkaClusterPhyId, Long connectClusterId, List<KSConnector> connectorList, Set<String> allConnectorNameSet);
void addNewToDB(KSConnector connector);
Result<KSConnector> getConnectorFromKafka(Long connectClusterId, String connectorName);
List<ConnectorPO> listByKafkaClusterIdFromDB(Long kafkaClusterPhyId);
@@ -57,6 +38,4 @@ public interface ConnectorService {
ConnectorPO getConnectorFromDB(Long connectClusterId, String connectorName);
ConnectorTypeEnum getConnectorType(Long connectClusterId, String connectorName);
void completeMirrorMakerInfo(ConnectCluster connectCluster, List<KSConnector> connectorList);
}

View File

@@ -0,0 +1,26 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.connector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import java.util.Properties;
/**
* 查看Connector
*/
public interface OpConnectorService {
Result<KSConnectorInfo> createConnector(Long connectClusterId, String connectorName, Properties configs, String operator);
Result<Void> resumeConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> restartConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> stopConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> deleteConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> updateConnectorConfig(Long connectClusterId, String connectorName, Properties configs, String operator);
void addNewToDB(KSConnector connector);
}

View File

@@ -3,7 +3,6 @@ package com.xiaojukeji.know.streaming.km.core.service.connect.connector.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.didiglobal.logi.security.common.dto.oplog.OplogDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorInfo;
@@ -13,19 +12,14 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
import com.xiaojukeji.know.streaming.km.common.component.RestTool;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant;
import com.xiaojukeji.know.streaming.km.common.converter.ConnectConverter;
import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.ModuleEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.utils.BackoffUtils;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.Triple;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.ConnectorDAO;
import org.apache.kafka.connect.runtime.rest.entities.ActiveTopicsInfo;
import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
@@ -34,14 +28,9 @@ import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import java.util.*;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant.MIRROR_MAKER_SOURCE_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME;
import static com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant.MIRROR_MAKER_TARGET_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.SERVICE_OP_CONNECT_CONNECTOR;
@Service
public class ConnectorServiceImpl extends BaseVersionControlService implements ConnectorService {
public class ConnectorServiceImpl implements ConnectorService {
private static final ILog LOGGER = LogFactory.getLog(ConnectorServiceImpl.class);
@Autowired
@@ -53,79 +42,14 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
@Autowired
private ConnectClusterService connectClusterService;
@Autowired
private OpLogWrapService opLogWrapService;
private static final String LIST_CONNECTORS_URI = "/connectors";
private static final String GET_CONNECTOR_INFO_PREFIX_URI = "/connectors";
private static final String GET_CONNECTOR_TOPICS_URI = "/connectors/%s/topics";
private static final String GET_CONNECTOR_STATUS_URI = "/connectors/%s/status";
private static final String CREATE_CONNECTOR_URI = "/connectors";
private static final String RESUME_CONNECTOR_URI = "/connectors/%s/resume";
private static final String RESTART_CONNECTOR_URI = "/connectors/%s/restart";
private static final String PAUSE_CONNECTOR_URI = "/connectors/%s/pause";
private static final String DELETE_CONNECTOR_URI = "/connectors/%s";
private static final String UPDATE_CONNECTOR_CONFIG_URI = "/connectors/%s/config";
@Override
protected VersionItemTypeEnum getVersionItemType() {
return SERVICE_OP_CONNECT_CONNECTOR;
}
@Override
public Result<KSConnectorInfo> createConnector(Long connectClusterId, String connectorName, Properties configs, String operator) {
public Result<List<String>> listConnectorsFromCluster(ConnectCluster connectCluster) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
// 构造参数
Properties props = new Properties();
props.put(KafkaConnectConstant.MIRROR_MAKER_NAME_FIELD_NAME, connectorName);
props.put("config", configs);
ConnectorInfo connectorInfo = restTool.postObjectWithJsonContent(
connectCluster.getSuitableRequestUrl() + CREATE_CONNECTOR_URI,
props,
ConnectorInfo.class
);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.ADD.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
ConvertUtil.obj2Json(configs)
));
KSConnectorInfo connector = new KSConnectorInfo();
connector.setConnectClusterId(connectClusterId);
connector.setConfig(connectorInfo.config());
connector.setName(connectorInfo.name());
connector.setTasks(connectorInfo.tasks());
connector.setType(connectorInfo.type());
return Result.buildSuc(connector);
} catch (Exception e) {
LOGGER.error(
"method=createConnector||connectClusterId={}||connectorName={}||configs={}||operator={}||errMsg=exception",
connectClusterId, connectorName, configs, operator, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<List<String>> listConnectorsFromCluster(Long connectClusterId) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
List<String> nameList = restTool.getArrayObjectWithJsonContent(
connectCluster.getSuitableRequestUrl() + LIST_CONNECTORS_URI,
new HashMap<>(),
@@ -135,8 +59,8 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
return Result.buildSuc(nameList);
} catch (Exception e) {
LOGGER.error(
"method=listConnectorsFromCluster||connectClusterId={}||errMsg=exception",
connectClusterId, e
"method=listConnectorsFromCluster||connectClusterId={}||connectClusterSuitableUrl={}||errMsg=exception",
connectCluster.getId(), connectCluster.getSuitableRequestUrl(), e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
@@ -153,16 +77,6 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
return this.getConnectorInfoFromCluster(connectCluster, connectorName);
}
@Override
public Result<List<String>> getConnectorTopicsFromCluster(Long connectClusterId, String connectorName) {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
return this.getConnectorTopicsFromCluster(connectCluster, connectorName);
}
@Override
public Result<KSConnectorStateInfo> getConnectorStateInfoFromCluster(Long connectClusterId, String connectorName) {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
@@ -174,270 +88,26 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
}
@Override
public Result<KSConnector> getAllConnectorInfoFromCluster(Long connectClusterId, String connectorName) {
public Result<KSConnector> getConnectorFromKafka(Long connectClusterId, String connectorName) {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
Result<KSConnectorInfo> connectorResult = this.getConnectorInfoFromCluster(connectCluster, connectorName);
if (connectorResult.failed()) {
LOGGER.error(
"method=getAllConnectorInfoFromCluster||connectClusterId={}||connectorName={}||result={}",
connectClusterId, connectorName, connectorResult
);
return Result.buildFromIgnoreData(connectorResult);
}
Result<List<String>> topicNameListResult = this.getConnectorTopicsFromCluster(connectCluster, connectorName);
if (topicNameListResult.failed()) {
LOGGER.error(
"method=getAllConnectorInfoFromCluster||connectClusterId={}||connectorName={}||result={}",
connectClusterId, connectorName, connectorResult
);
}
Result<KSConnectorStateInfo> stateInfoResult = this.getConnectorStateInfoFromCluster(connectCluster, connectorName);
if (stateInfoResult.failed()) {
LOGGER.error(
"method=getAllConnectorInfoFromCluster||connectClusterId={}||connectorName={}||result={}",
connectClusterId, connectorName, connectorResult
);
Result<Triple<KSConnectorInfo, List<String>, KSConnectorStateInfo>> fullInfoResult = this.getConnectorFullInfoFromKafka(connectCluster, connectorName);
if (fullInfoResult.failed()) {
return Result.buildFromIgnoreData(fullInfoResult);
}
return Result.buildSuc(ConnectConverter.convert2KSConnector(
connectCluster.getKafkaClusterPhyId(),
connectCluster.getId(),
connectorResult.getData(),
stateInfoResult.getData(),
topicNameListResult.getData()
fullInfoResult.getData().v1(),
fullInfoResult.getData().v3(),
fullInfoResult.getData().v2()
));
}
@Override
public Result<Void> resumeConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.putJsonForObject(
connectCluster.getSuitableRequestUrl() + String.format(RESUME_CONNECTOR_URI, connectorName),
new HashMap<>(),
String.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.ENABLE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"class=ConnectorServiceImpl||method=resumeConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> restartConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.postObjectWithJsonContent(
connectCluster.getSuitableRequestUrl() + String.format(RESTART_CONNECTOR_URI, connectorName),
new HashMap<>(),
String.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.RESTART.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=restartConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> stopConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.putJsonForObject(
connectCluster.getSuitableRequestUrl() + String.format(PAUSE_CONNECTOR_URI, connectorName),
new HashMap<>(),
String.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.DISABLE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=stopConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> deleteConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.deleteWithParamsAndHeader(
connectCluster.getSuitableRequestUrl() + String.format(DELETE_CONNECTOR_URI, connectorName),
new HashMap<>(),
new HashMap<>(),
String.class
);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.DELETE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
this.deleteConnectorInDB(connectClusterId, connectorName);
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=deleteConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> updateConnectorConfig(Long connectClusterId, String connectorName, Properties configs, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
ConnectorInfo connectorInfo = restTool.putJsonForObject(
connectCluster.getSuitableRequestUrl() + String.format(UPDATE_CONNECTOR_CONFIG_URI, connectorName),
configs,
org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.EDIT.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
ConvertUtil.obj2Json(configs)
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=updateConnectorConfig||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public void batchReplace(Long kafkaClusterPhyId, Long connectClusterId, List<KSConnector> connectorList, Set<String> allConnectorNameSet) {
List<ConnectorPO> poList = this.listByConnectClusterIdFromDB(connectClusterId);
Map<String, ConnectorPO> oldPOMap = new HashMap<>();
poList.forEach(elem -> oldPOMap.put(elem.getConnectorName(), elem));
for (KSConnector connector: connectorList) {
try {
ConnectorPO oldPO = oldPOMap.remove(connector.getConnectorName());
if (oldPO == null) {
oldPO = ConvertUtil.obj2Obj(connector, ConnectorPO.class);
connectorDAO.insert(oldPO);
} else {
ConnectorPO newPO = ConvertUtil.obj2Obj(connector, ConnectorPO.class);
newPO.setId(oldPO.getId());
connectorDAO.updateById(newPO);
}
} catch (DuplicateKeyException dke) {
// ignore
}
}
try {
oldPOMap.values().forEach(elem -> {
if (allConnectorNameSet.contains(elem.getConnectorName())) {
// 当前connector还存在
return;
}
// 当前connector不存在了则进行删除
connectorDAO.deleteById(elem.getId());
});
} catch (Exception e) {
// ignore
}
}
@Override
public void addNewToDB(KSConnector connector) {
try {
connectorDAO.insert(ConvertUtil.obj2Obj(connector, ConnectorPO.class));
} catch (DuplicateKeyException dke) {
// ignore
}
}
@Override
public List<ConnectorPO> listByKafkaClusterIdFromDB(Long kafkaClusterPhyId) {
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
@@ -482,53 +152,98 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
}
@Override
public void completeMirrorMakerInfo(ConnectCluster connectCluster, List<KSConnector> connectorList) {
List<KSConnector> sourceConnectorList = connectorList.stream().filter(elem -> elem.getConnectorClassName().equals(KafkaConnectConstant.MIRROR_MAKER_SOURCE_CONNECTOR_TYPE)).collect(Collectors.toList());
if (sourceConnectorList.isEmpty()) {
return;
public Result<Tuple<Set<String>, List<KSConnector>>> getDataFromKafka(ConnectCluster connectCluster) {
Result<List<String>> nameListResult = this.listConnectorsFromCluster(connectCluster);
if (nameListResult.failed()) {
return Result.buildFromIgnoreData(nameListResult);
}
List<KSConnector> heartBeatConnectorList = connectorList.stream().filter(elem -> elem.getConnectorClassName().equals(KafkaConnectConstant.MIRROR_MAKER_HEARTBEAT_CONNECTOR_TYPE)).collect(Collectors.toList());
List<KSConnector> checkpointConnectorList = connectorList.stream().filter(elem -> elem.getConnectorClassName().equals(KafkaConnectConstant.MIRROR_MAKER_CHECKPOINT_CONNECTOR_TYPE)).collect(Collectors.toList());
Map<String, String> heartbeatMap = this.buildMirrorMakerMap(connectCluster, heartBeatConnectorList);
Map<String, String> checkpointMap = this.buildMirrorMakerMap(connectCluster, checkpointConnectorList);
for (KSConnector sourceConnector : sourceConnectorList) {
Result<KSConnectorInfo> ret = this.getConnectorInfoFromCluster(connectCluster, sourceConnector.getConnectorName());
if (!ret.hasData()) {
LOGGER.error(
"method=completeMirrorMakerInfo||connectClusterId={}||connectorName={}||get connectorInfo fail!",
connectCluster.getId(), sourceConnector.getConnectorName()
);
continue;
}
KSConnectorInfo ksConnectorInfo = ret.getData();
String targetServers = ksConnectorInfo.getConfig().get(MIRROR_MAKER_TARGET_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME);
String sourceServers = ksConnectorInfo.getConfig().get(MIRROR_MAKER_SOURCE_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME);
if (ValidateUtils.anyBlank(targetServers, sourceServers)) {
// 逐个获取
List<Triple<KSConnectorInfo, List<String>, KSConnectorStateInfo>> connectorFullInfoList = new ArrayList<>();
for (String connectorName: nameListResult.getData()) {
Result<Triple<KSConnectorInfo, List<String>, KSConnectorStateInfo>> ksConnectorResult = this.getConnectorFullInfoFromKafka(connectCluster, connectorName);
if (ksConnectorResult.failed()) {
continue;
}
String[] targetBrokerList = getBrokerList(targetServers);
String[] sourceBrokerList = getBrokerList(sourceServers);
sourceConnector.setHeartbeatConnectorName(this.findBindConnector(targetBrokerList, sourceBrokerList, heartbeatMap));
sourceConnector.setCheckpointConnectorName(this.findBindConnector(targetBrokerList, sourceBrokerList, checkpointMap));
connectorFullInfoList.add(ksConnectorResult.getData());
}
// 返回结果
return Result.buildSuc(new Tuple<>(
new HashSet<>(nameListResult.getData()),
ConnectConverter.convertAndSupplyMirrorMakerInfo(connectCluster, connectorFullInfoList)) // 转换并补充mm2相关信息
);
}
/**************************************************** private method ****************************************************/
private int deleteConnectorInDB(Long connectClusterId, String connectorName) {
@Override
public void writeToDB(Long connectClusterId, Set<String> fullNameSet, List<KSConnector> dataList) {
List<ConnectorPO> poList = this.listByConnectClusterIdFromDB(connectClusterId);
Map<String, ConnectorPO> oldPOMap = new HashMap<>();
poList.forEach(elem -> oldPOMap.put(elem.getConnectorName(), elem));
for (KSConnector connector: dataList) {
try {
ConnectorPO oldPO = oldPOMap.remove(connector.getConnectorName());
if (oldPO == null) {
oldPO = ConvertUtil.obj2Obj(connector, ConnectorPO.class);
connectorDAO.insert(oldPO);
continue;
}
ConnectorPO newPO = ConvertUtil.obj2Obj(connector, ConnectorPO.class);
newPO.setId(oldPO.getId());
if (!ValidateUtils.isBlank(oldPO.getCheckpointConnectorName())
&& ValidateUtils.isBlank(newPO.getCheckpointConnectorName())
&& fullNameSet.contains(oldPO.getCheckpointConnectorName())) {
// 新的po里面没有checkpoint的信息但是db中的数据显示有且集群中有该connector则保留该checkpoint数据
newPO.setCheckpointConnectorName(oldPO.getCheckpointConnectorName());
}
if (!ValidateUtils.isBlank(oldPO.getHeartbeatConnectorName())
&& ValidateUtils.isBlank(newPO.getHeartbeatConnectorName())
&& fullNameSet.contains(oldPO.getHeartbeatConnectorName())) {
// 新的po里面没有checkpoint的信息但是db中的数据显示有且集群中有该connector则保留该checkpoint数据
newPO.setHeartbeatConnectorName(oldPO.getHeartbeatConnectorName());
}
connectorDAO.updateById(newPO);
} catch (DuplicateKeyException dke) {
// ignore
} catch (Exception e) {
LOGGER.error(
"method=writeToDB||connectClusterId={}||connectorName={}||errMsg=exception",
connector.getConnectClusterId(), connector.getConnectorName(), e
);
}
}
try {
oldPOMap.values().forEach(elem -> {
if (fullNameSet.contains(elem.getConnectorName())) {
// 当前connector还存在
return;
}
// 当前connector不存在了则进行删除
connectorDAO.deleteById(elem.getId());
});
} catch (Exception e) {
// ignore
}
}
@Override
public int deleteInDBByKafkaClusterId(Long clusterPhyId) {
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId);
lambdaQueryWrapper.eq(ConnectorPO::getConnectorName, connectorName);
lambdaQueryWrapper.eq(ConnectorPO::getKafkaClusterPhyId, clusterPhyId);
return connectorDAO.delete(lambdaQueryWrapper);
}
/**************************************************** private method ****************************************************/
private Result<KSConnectorInfo> getConnectorInfoFromCluster(ConnectCluster connectCluster, String connectorName) {
try {
ConnectorInfo connectorInfo = restTool.getForObject(
@@ -594,90 +309,37 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
}
}
private void updateStatus(ConnectCluster connectCluster, Long connectClusterId, String connectorName) {
try {
// 延迟3秒
BackoffUtils.backoff(2000);
Result<KSConnectorStateInfo> stateInfoResult = this.getConnectorStateInfoFromCluster(connectCluster, connectorName);
if (stateInfoResult.failed()) {
return;
}
ConnectorPO po = new ConnectorPO();
po.setConnectClusterId(connectClusterId);
po.setConnectorName(connectorName);
po.setState(stateInfoResult.getData().getConnector().getState());
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId);
lambdaQueryWrapper.eq(ConnectorPO::getConnectorName, connectorName);
connectorDAO.update(po, lambdaQueryWrapper);
} catch (Exception e) {
private Result<Triple<KSConnectorInfo, List<String>, KSConnectorStateInfo>> getConnectorFullInfoFromKafka(ConnectCluster connectCluster, String connectorName) {
Result<KSConnectorInfo> connectorResult = this.getConnectorInfoFromCluster(connectCluster, connectorName);
if (connectorResult.failed()) {
LOGGER.error(
"method=updateStatus||connectClusterId={}||connectorName={}||errMsg=exception",
connectClusterId, connectorName, e
"method=getConnectorAllInfoFromKafka||connectClusterId={}||connectClusterSuitableUrl={}||result={}||errMsg=get connectors info from cluster failed",
connectCluster.getId(), connectCluster.getSuitableRequestUrl(), connectorResult
);
return Result.buildFromIgnoreData(connectorResult);
}
Result<List<String>> topicNameListResult = this.getConnectorTopicsFromCluster(connectCluster, connectorName);
if (topicNameListResult.failed()) {
LOGGER.error(
"method=getConnectorAllInfoFromKafka||connectClusterId={}||connectClusterSuitableUrl={}||result={}||errMsg=get connectors topics from cluster failed",
connectCluster.getId(), connectCluster.getSuitableRequestUrl(), topicNameListResult
);
}
}
private Map<String, String> buildMirrorMakerMap(ConnectCluster connectCluster, List<KSConnector> ksConnectorList) {
Map<String, String> bindMap = new HashMap<>();
for (KSConnector ksConnector : ksConnectorList) {
Result<KSConnectorInfo> ret = this.getConnectorInfoFromCluster(connectCluster, ksConnector.getConnectorName());
if (!ret.hasData()) {
LOGGER.error(
"method=buildMirrorMakerMap||connectClusterId={}||connectorName={}||get connectorInfo fail!",
connectCluster.getId(), ksConnector.getConnectorName()
);
continue;
}
KSConnectorInfo ksConnectorInfo = ret.getData();
String targetServers = ksConnectorInfo.getConfig().get(MIRROR_MAKER_TARGET_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME);
String sourceServers = ksConnectorInfo.getConfig().get(MIRROR_MAKER_SOURCE_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME);
if (ValidateUtils.anyBlank(targetServers, sourceServers)) {
continue;
}
String[] targetBrokerList = getBrokerList(targetServers);
String[] sourceBrokerList = getBrokerList(sourceServers);
for (String targetBroker : targetBrokerList) {
for (String sourceBroker : sourceBrokerList) {
bindMap.put(targetBroker + "@" + sourceBroker, ksConnector.getConnectorName());
}
}
Result<KSConnectorStateInfo> stateInfoResult = this.getConnectorStateInfoFromCluster(connectCluster, connectorName);
if (stateInfoResult.failed()) {
LOGGER.error(
"method=getConnectorAllInfoFromKafka||connectClusterId={}||connectClusterSuitableUrl={}||result={}||errMsg=get connectors state from cluster failed",
connectCluster.getId(), connectCluster.getSuitableRequestUrl(), stateInfoResult
);
}
return bindMap;
}
private String findBindConnector(String[] targetBrokerList, String[] sourceBrokerList, Map<String, String> connectorBindMap) {
for (String targetBroker : targetBrokerList) {
for (String sourceBroker : sourceBrokerList) {
String connectorName = connectorBindMap.get(targetBroker + "@" + sourceBroker);
if (connectorName != null) {
return connectorName;
}
}
}
return "";
}
private String[] getBrokerList(String str) {
if (ValidateUtils.isBlank(str)) {
return new String[0];
}
if (str.contains(";")) {
return str.split(";");
}
if (str.contains(",")) {
return str.split(",");
}
return new String[]{str};
return Result.buildSuc(new Triple<>(
connectorResult.getData(),
topicNameListResult.getData(),
stateInfoResult.getData()
));
}
}

View File

@@ -0,0 +1,352 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.connector.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.didiglobal.logi.security.common.dto.oplog.OplogDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorStateInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
import com.xiaojukeji.know.streaming.km.common.component.RestTool;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.ModuleEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.utils.BackoffUtils;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.OpConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.ConnectorDAO;
import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import java.util.*;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.SERVICE_OP_CONNECT_CONNECTOR;
@Service
public class OpConnectorServiceImpl extends BaseVersionControlService implements OpConnectorService {
private static final ILog LOGGER = LogFactory.getLog(OpConnectorServiceImpl.class);
@Autowired
private RestTool restTool;
@Autowired
private ConnectorDAO connectorDAO;
@Autowired
private ConnectClusterService connectClusterService;
@Autowired
private OpLogWrapService opLogWrapService;
private static final String GET_CONNECTOR_STATUS_URI = "/connectors/%s/status";
private static final String CREATE_CONNECTOR_URI = "/connectors";
private static final String RESUME_CONNECTOR_URI = "/connectors/%s/resume";
private static final String RESTART_CONNECTOR_URI = "/connectors/%s/restart";
private static final String PAUSE_CONNECTOR_URI = "/connectors/%s/pause";
private static final String DELETE_CONNECTOR_URI = "/connectors/%s";
private static final String UPDATE_CONNECTOR_CONFIG_URI = "/connectors/%s/config";
@Override
protected VersionItemTypeEnum getVersionItemType() {
return SERVICE_OP_CONNECT_CONNECTOR;
}
@Override
public Result<KSConnectorInfo> createConnector(Long connectClusterId, String connectorName, Properties configs, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
// 构造参数
Properties props = new Properties();
props.put(KafkaConnectConstant.MIRROR_MAKER_NAME_FIELD_NAME, connectorName);
props.put("config", configs);
ConnectorInfo connectorInfo = restTool.postObjectWithJsonContent(
connectCluster.getSuitableRequestUrl() + CREATE_CONNECTOR_URI,
props,
ConnectorInfo.class
);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.ADD.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
ConvertUtil.obj2Json(configs)
));
KSConnectorInfo connector = new KSConnectorInfo();
connector.setConnectClusterId(connectClusterId);
connector.setConfig(connectorInfo.config());
connector.setName(connectorInfo.name());
connector.setTasks(connectorInfo.tasks());
connector.setType(connectorInfo.type());
return Result.buildSuc(connector);
} catch (Exception e) {
LOGGER.error(
"method=createConnector||connectClusterId={}||connectorName={}||configs={}||operator={}||errMsg=exception",
connectClusterId, connectorName, configs, operator, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> resumeConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.putJsonForObject(
connectCluster.getSuitableRequestUrl() + String.format(RESUME_CONNECTOR_URI, connectorName),
new HashMap<>(),
String.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.ENABLE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"class=ConnectorServiceImpl||method=resumeConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> restartConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.postObjectWithJsonContent(
connectCluster.getSuitableRequestUrl() + String.format(RESTART_CONNECTOR_URI, connectorName),
new HashMap<>(),
String.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.RESTART.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=restartConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> stopConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.putJsonForObject(
connectCluster.getSuitableRequestUrl() + String.format(PAUSE_CONNECTOR_URI, connectorName),
new HashMap<>(),
String.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.DISABLE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=stopConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> deleteConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.deleteWithParamsAndHeader(
connectCluster.getSuitableRequestUrl() + String.format(DELETE_CONNECTOR_URI, connectorName),
new HashMap<>(),
new HashMap<>(),
String.class
);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.DELETE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
this.deleteConnectorInDB(connectClusterId, connectorName);
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=deleteConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> updateConnectorConfig(Long connectClusterId, String connectorName, Properties configs, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
ConnectorInfo connectorInfo = restTool.putJsonForObject(
connectCluster.getSuitableRequestUrl() + String.format(UPDATE_CONNECTOR_CONFIG_URI, connectorName),
configs,
ConnectorInfo.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.EDIT.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
ConvertUtil.obj2Json(configs)
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=updateConnectorConfig||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public void addNewToDB(KSConnector connector) {
try {
connectorDAO.insert(ConvertUtil.obj2Obj(connector, ConnectorPO.class));
} catch (DuplicateKeyException dke) {
// ignore
}
}
/**************************************************** private method ****************************************************/
private int deleteConnectorInDB(Long connectClusterId, String connectorName) {
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId);
lambdaQueryWrapper.eq(ConnectorPO::getConnectorName, connectorName);
return connectorDAO.delete(lambdaQueryWrapper);
}
private Result<KSConnectorStateInfo> getConnectorStateInfoFromCluster(ConnectCluster connectCluster, String connectorName) {
try {
KSConnectorStateInfo connectorStateInfo = restTool.getForObject(
connectCluster.getSuitableRequestUrl() + String.format(GET_CONNECTOR_STATUS_URI, connectorName),
new HashMap<>(),
KSConnectorStateInfo.class
);
return Result.buildSuc(connectorStateInfo);
} catch (Exception e) {
LOGGER.error(
"method=getConnectorStateInfoFromCluster||connectClusterId={}||connectorName={}||errMsg=exception",
connectCluster.getId(), connectorName, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
private void updateStatus(ConnectCluster connectCluster, Long connectClusterId, String connectorName) {
try {
// 延迟3秒
BackoffUtils.backoff(2000);
Result<KSConnectorStateInfo> stateInfoResult = this.getConnectorStateInfoFromCluster(connectCluster, connectorName);
if (stateInfoResult.failed()) {
return;
}
ConnectorPO po = new ConnectorPO();
po.setConnectClusterId(connectClusterId);
po.setConnectorName(connectorName);
po.setState(stateInfoResult.getData().getConnector().getState());
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId);
lambdaQueryWrapper.eq(ConnectorPO::getConnectorName, connectorName);
connectorDAO.update(po, lambdaQueryWrapper);
} catch (Exception e) {
LOGGER.error(
"method=updateStatus||connectClusterId={}||connectorName={}||errMsg=exception",
connectClusterId, connectorName, e
);
}
}
}

View File

@@ -78,7 +78,7 @@ public class GroupServiceImpl extends BaseKafkaVersionControlService implements
}
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers());
props.put(AdminClientConfig.CLIENT_ID_CONFIG, String.format("KSPartialAdminClient||clusterPhyId=%d", clusterPhy.getId()));
props.put(AdminClientConfig.CLIENT_ID_CONFIG, String.format("KSPartialAdminClient||clusterPhyId=%d||timestamp=%d", clusterPhy.getId(), System.currentTimeMillis()));
adminClient = KSPartialKafkaAdminClient.create(props);
KSListGroupsResult listConsumerGroupsResult = adminClient.listConsumerGroups(
@@ -179,7 +179,7 @@ public class GroupServiceImpl extends BaseKafkaVersionControlService implements
}
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers());
props.put(AdminClientConfig.CLIENT_ID_CONFIG, String.format("KSPartialAdminClient||clusterPhyId=%d", clusterPhy.getId()));
props.put(AdminClientConfig.CLIENT_ID_CONFIG, String.format("KSPartialAdminClient||clusterPhyId=%d||timestamp=%d", clusterPhy.getId(), System.currentTimeMillis()));
adminClient = KSPartialKafkaAdminClient.create(props);