[Bugfix]修复删除Kafka集群后,Connect集群任务出现NPE问题 (#1129)

原因:

首先,删除Kafka集群后,没有将DB中的Connect集群进行删除。随后,进行Connect集群指标采集时,由于所在的Kafka集群已经不存在了。最终,导致NPE;

解决:
发布一个Kafka集群删除事件,触发MetaDataService子类,将其在DB中的数据进行删除。

遗留:

当前MetaDataService仅在部分元信息同步类中实现,导致当前DB中的脏数据清理不彻底,后续等MetaDataService在所有元信息同步类中实现后,便可彻底清理数据。

PS:当前修复已保证NPE问题不会再出现。
This commit is contained in:
EricZeng
2023-08-16 10:54:58 +08:00
committed by GitHub
parent a7309612d5
commit d1417bef8c
5 changed files with 84 additions and 1 deletions

View File

@@ -8,6 +8,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.event.cluster.ClusterPhyAddedEvent;
import com.xiaojukeji.know.streaming.km.common.bean.event.cluster.connect.ClusterPhyDeletedEvent;
import com.xiaojukeji.know.streaming.km.common.bean.po.cluster.ClusterPhyPO;
import com.xiaojukeji.know.streaming.km.common.component.SpringTool;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
@@ -146,6 +147,9 @@ public class ClusterPhyServiceImpl implements ClusterPhyService {
String.format("删除集群:%s",clusterPhy.toString()));
opLogWrapService.saveOplogAndIgnoreException(oplogDTO);
// 发布删除集群事件
SpringTool.publish(new ClusterPhyDeletedEvent(this, clusterPhyId));
return Result.buildSuc();
} catch (Exception e) {
log.error("method=removeClusterPhyById||clusterPhyId={}||operator={}||msg=remove cluster failed||errMsg=exception!",

View File

@@ -4,14 +4,16 @@ package com.xiaojukeji.know.streaming.km.core.service.connect.cluster;
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.cluster.ConnectClusterDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectClusterMetadata;
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafka.KSGroupDescription;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.core.service.meta.MetaDataService;
import java.util.List;
/**
* Connect-Cluster
*/
public interface ConnectClusterService {
public interface ConnectClusterService extends MetaDataService<KSGroupDescription> {
Long replaceAndReturnIdInDB(ConnectClusterMetadata metadata);
List<ConnectCluster> listByKafkaCluster(Long kafkaClusterPhyId);

View File

@@ -38,6 +38,14 @@ public class ConnectClusterServiceImpl implements ConnectClusterService {
@Autowired
private OpLogWrapService opLogWrapService;
@Override
public int deleteInDBByKafkaClusterId(Long clusterPhyId) {
LambdaQueryWrapper<ConnectClusterPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectClusterPO::getKafkaClusterPhyId, clusterPhyId);
return connectClusterDAO.deleteById(lambdaQueryWrapper);
}
@Override
public Long replaceAndReturnIdInDB(ConnectClusterMetadata metadata) {
ConnectClusterPO oldPO = this.getPOFromDB(metadata.getKafkaClusterPhyId(), metadata.getGroupName());