合并master分支

This commit is contained in:
qiao.zeng
2023-11-12 15:30:08 +08:00
73 changed files with 1561 additions and 902 deletions

View File

@@ -3,17 +3,15 @@ package com.xiaojukeji.know.streaming.km.task.connect.metadata;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
@Task(name = "SyncConnectorTask",
@@ -23,40 +21,21 @@ import java.util.List;
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class SyncConnectorTask extends AbstractAsyncMetadataDispatchTask {
private static final ILog LOGGER = LogFactory.getLog(SyncConnectorTask.class);
@Autowired
private ConnectorService connectorService;
@Override
public TaskResult processClusterTask(ConnectCluster connectCluster, long triggerTimeUnitMs) {
Result<List<String>> nameListResult = connectorService.listConnectorsFromCluster(connectCluster.getId());
if (nameListResult.failed()) {
return TaskResult.FAIL;
// 获取信息
Result<Tuple<Set<String>, List<KSConnector>>> dataResult = connectorService.getDataFromKafka(connectCluster);
if (dataResult.failed()) {
return new TaskResult(TaskResult.FAIL_CODE, dataResult.getMessage());
}
boolean allSuccess = true;
// 更新到DB
connectorService.writeToDB( connectCluster.getId(), dataResult.getData().v1(), dataResult.getData().v2());
List<KSConnector> connectorList = new ArrayList<>();
for (String connectorName: nameListResult.getData()) {
Result<KSConnector> ksConnectorResult = connectorService.getAllConnectorInfoFromCluster(connectCluster.getId(), connectorName);
if (ksConnectorResult.failed()) {
LOGGER.error(
"method=processClusterTask||connectClusterId={}||connectorName={}||result={}",
connectCluster.getId(), connectorName, ksConnectorResult
);
allSuccess = false;
continue;
}
connectorList.add(ksConnectorResult.getData());
}
//mm2相关信息的添加
connectorService.completeMirrorMakerInfo(connectCluster, connectorList);
connectorService.batchReplace(connectCluster.getKafkaClusterPhyId(), connectCluster.getId(), connectorList, new HashSet<>(nameListResult.getData()));
return allSuccess? TaskResult.SUCCESS: TaskResult.FAIL;
// 返回结果
return dataResult.getData().v1().size() == dataResult.getData().v2().size()? TaskResult.SUCCESS: TaskResult.FAIL;
}
}

View File

@@ -3,19 +3,13 @@ package com.xiaojukeji.know.streaming.km.task.kafka.metadata;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.KafkaAclPO;
import com.xiaojukeji.know.streaming.km.common.converter.KafkaAclConverter;
import com.xiaojukeji.know.streaming.km.core.service.acl.KafkaAclService;
import com.xiaojukeji.know.streaming.km.core.service.acl.OpKafkaAclService;
import org.apache.kafka.common.acl.AclBinding;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.List;
import java.util.stream.Collectors;
@Task(name = "SyncKafkaAclTask",
description = "KafkaAcl信息同步到DB",
@@ -24,32 +18,18 @@ import java.util.stream.Collectors;
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class SyncKafkaAclTask extends AbstractAsyncMetadataDispatchTask {
private static final ILog log = LogFactory.getLog(SyncKafkaAclTask.class);
@Autowired
private KafkaAclService kafkaAclService;
@Autowired
private OpKafkaAclService opKafkaAclService;
@Override
public TaskResult processClusterTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) {
Result<List<AclBinding>> aclBindingListResult = kafkaAclService.getAclFromKafka(clusterPhy.getId());
Result<List<AclBinding>> aclBindingListResult = kafkaAclService.getDataFromKafka(clusterPhy);
if (aclBindingListResult.failed()) {
return TaskResult.FAIL;
}
if (!aclBindingListResult.hasData()) {
return TaskResult.SUCCESS;
}
kafkaAclService.writeToDB(clusterPhy.getId(), aclBindingListResult.getData());
// 更新DB数据
List<KafkaAclPO> poList = aclBindingListResult.getData()
.stream()
.map(elem -> KafkaAclConverter.convert2KafkaAclPO(clusterPhy.getId(), elem, triggerTimeUnitMs))
.collect(Collectors.toList());
opKafkaAclService.batchUpdateAcls(clusterPhy.getId(), poList);
return TaskResult.SUCCESS;
}
}

View File

@@ -3,12 +3,8 @@ package com.xiaojukeji.know.streaming.km.task.kafka.metadata;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.ZKConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.ZookeeperInfo;
import com.xiaojukeji.know.streaming.km.core.service.zookeeper.ZookeeperService;
import org.springframework.beans.factory.annotation.Autowired;
@@ -23,24 +19,17 @@ import java.util.List;
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class SyncZookeeperTask extends AbstractAsyncMetadataDispatchTask {
private static final ILog log = LogFactory.getLog(SyncZookeeperTask.class);
@Autowired
private ZookeeperService zookeeperService;
@Override
public TaskResult processClusterTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) {
Result<List<ZookeeperInfo>> infoResult = zookeeperService.listFromZookeeper(
clusterPhy.getId(),
clusterPhy.getZookeeper(),
ConvertUtil.str2ObjByJson(clusterPhy.getZkProperties(), ZKConfig.class)
);
Result<List<ZookeeperInfo>> infoResult = zookeeperService.getDataFromKafka(clusterPhy);
if (infoResult.failed()) {
return new TaskResult(TaskResult.FAIL_CODE, infoResult.getMessage());
}
zookeeperService.batchReplaceDataInDB(clusterPhy.getId(), infoResult.getData());
zookeeperService.writeToDB(clusterPhy.getId(), infoResult.getData());
return TaskResult.SUCCESS;
}

View File

@@ -0,0 +1,53 @@
package com.xiaojukeji.know.streaming.km.task.service.listener;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.event.cluster.connect.ClusterPhyDeletedEvent;
import com.xiaojukeji.know.streaming.km.common.component.SpringTool;
import com.xiaojukeji.know.streaming.km.common.utils.BackoffUtils;
import com.xiaojukeji.know.streaming.km.common.utils.FutureUtil;
import com.xiaojukeji.know.streaming.km.core.service.meta.MetaDataService;
import org.springframework.context.ApplicationListener;
import org.springframework.stereotype.Service;
@Service
public class TaskClusterDeletedListener implements ApplicationListener<ClusterPhyDeletedEvent> {
private static final ILog LOGGER = LogFactory.getLog(TaskClusterDeletedListener.class);
@Override
public void onApplicationEvent(ClusterPhyDeletedEvent event) {
LOGGER.info("method=onApplicationEvent||clusterPhyId={}||msg=listened delete cluster", event.getClusterPhyId());
// 交由KS自定义的线程池异步执行任务
FutureUtil.quickStartupFutureUtil.submitTask(
() -> {
// 延迟60秒避免正在运行的任务将数据写入DB中
BackoffUtils.backoff(60000);
for (MetaDataService metaDataService: SpringTool.getBeansOfType(MetaDataService.class).values()) {
LOGGER.info(
"method=onApplicationEvent||clusterPhyId={}||className={}||msg=delete cluster data in db starting",
event.getClusterPhyId(), metaDataService.getClass().getSimpleName()
);
try {
// 删除数据
metaDataService.deleteInDBByKafkaClusterId(event.getClusterPhyId());
LOGGER.info(
"method=onApplicationEvent||clusterPhyId={}||className={}||msg=delete cluster data in db finished",
event.getClusterPhyId(), metaDataService.getClass().getSimpleName()
);
} catch (Exception e) {
LOGGER.error(
"method=onApplicationEvent||clusterPhyId={}||className={}||msg=delete cluster data in db failed||errMsg=exception",
event.getClusterPhyId(), metaDataService.getClass().getSimpleName(), e
);
}
}
}
);
}
}