mirror of
https://github.com/didi/KnowStreaming.git
synced 2025-12-24 11:52:08 +08:00
合并Master分支
This commit is contained in:
@@ -5,13 +5,13 @@
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.xiaojukeji.kafka</groupId>
|
||||
<artifactId>km-core</artifactId>
|
||||
<version>${km.revision}</version>
|
||||
<version>${revision}</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<parent>
|
||||
<artifactId>km</artifactId>
|
||||
<groupId>com.xiaojukeji.kafka</groupId>
|
||||
<version>${km.revision}</version>
|
||||
<version>${revision}</version>
|
||||
</parent>
|
||||
|
||||
<properties>
|
||||
|
||||
@@ -360,7 +360,7 @@ public class BrokerServiceImpl extends BaseKafkaVersionControlService implements
|
||||
|
||||
private Broker getStartTimeAndBuildBroker(Long clusterPhyId, Node newNode, JmxConfig jmxConfig) {
|
||||
try {
|
||||
Long startTime = jmxDAO.getServerStartTime(clusterPhyId, newNode.host(), jmxConfig.getJmxPort(), jmxConfig);
|
||||
Long startTime = jmxDAO.getServerStartTime(clusterPhyId, newNode.host(), jmxConfig.getFinallyJmxPort(String.valueOf(newNode.id())), jmxConfig);
|
||||
|
||||
return Broker.buildFrom(clusterPhyId, newNode, startTime);
|
||||
} catch (Exception e) {
|
||||
|
||||
@@ -11,10 +11,17 @@ import org.springframework.stereotype.Service;
|
||||
*/
|
||||
@Getter
|
||||
@Service
|
||||
public class ConfigUtils {
|
||||
private ConfigUtils() {
|
||||
public class KSConfigUtils {
|
||||
private KSConfigUtils() {
|
||||
}
|
||||
|
||||
@Value("${cluster-balance.ignored-topics.time-second:300}")
|
||||
private Integer clusterBalanceIgnoredTopicsTimeSecond;
|
||||
|
||||
@Value(value = "${request.api-call.timeout-unit-ms:8000}")
|
||||
private Integer apiCallTimeoutUnitMs;
|
||||
|
||||
public Integer getApiCallLeftTimeUnitMs(Long costedUnitMs) {
|
||||
return Math.max(1000, (int)(apiCallTimeoutUnitMs - costedUnitMs));
|
||||
}
|
||||
}
|
||||
@@ -40,12 +40,6 @@ public class ConnectClusterServiceImpl implements ConnectClusterService {
|
||||
|
||||
@Override
|
||||
public Long replaceAndReturnIdInDB(ConnectClusterMetadata metadata) {
|
||||
//url去斜杠
|
||||
String clusterUrl = metadata.getMemberLeaderUrl();
|
||||
if (clusterUrl.charAt(clusterUrl.length() - 1) == '/') {
|
||||
clusterUrl = clusterUrl.substring(0, clusterUrl.length() - 1);
|
||||
}
|
||||
|
||||
ConnectClusterPO oldPO = this.getPOFromDB(metadata.getKafkaClusterPhyId(), metadata.getGroupName());
|
||||
if (oldPO == null) {
|
||||
oldPO = new ConnectClusterPO();
|
||||
@@ -54,7 +48,7 @@ public class ConnectClusterServiceImpl implements ConnectClusterService {
|
||||
oldPO.setName(metadata.getGroupName());
|
||||
oldPO.setState(metadata.getState().getCode());
|
||||
oldPO.setMemberLeaderUrl(metadata.getMemberLeaderUrl());
|
||||
oldPO.setClusterUrl(clusterUrl);
|
||||
oldPO.setClusterUrl("");
|
||||
oldPO.setVersion(KafkaConstant.DEFAULT_CONNECT_VERSION);
|
||||
connectClusterDAO.insert(oldPO);
|
||||
|
||||
@@ -69,11 +63,11 @@ public class ConnectClusterServiceImpl implements ConnectClusterService {
|
||||
if (ValidateUtils.isBlank(oldPO.getVersion())) {
|
||||
oldPO.setVersion(KafkaConstant.DEFAULT_CONNECT_VERSION);
|
||||
}
|
||||
if (!ValidateUtils.isBlank(clusterUrl)) {
|
||||
oldPO.setClusterUrl(clusterUrl);
|
||||
if (ValidateUtils.isNull(oldPO.getClusterUrl())) {
|
||||
oldPO.setClusterUrl("");
|
||||
}
|
||||
connectClusterDAO.updateById(oldPO);
|
||||
|
||||
connectClusterDAO.updateById(oldPO);
|
||||
return oldPO.getId();
|
||||
}
|
||||
|
||||
|
||||
@@ -87,7 +87,7 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
|
||||
props.put("config", configs);
|
||||
|
||||
ConnectorInfo connectorInfo = restTool.postObjectWithJsonContent(
|
||||
connectCluster.getClusterUrl() + CREATE_CONNECTOR_URI,
|
||||
connectCluster.getSuitableRequestUrl() + CREATE_CONNECTOR_URI,
|
||||
props,
|
||||
ConnectorInfo.class
|
||||
);
|
||||
@@ -127,7 +127,7 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
|
||||
}
|
||||
|
||||
List<String> nameList = restTool.getArrayObjectWithJsonContent(
|
||||
connectCluster.getClusterUrl() + LIST_CONNECTORS_URI,
|
||||
connectCluster.getSuitableRequestUrl() + LIST_CONNECTORS_URI,
|
||||
new HashMap<>(),
|
||||
String.class
|
||||
);
|
||||
@@ -224,7 +224,7 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
|
||||
}
|
||||
|
||||
restTool.putJsonForObject(
|
||||
connectCluster.getClusterUrl() + String.format(RESUME_CONNECTOR_URI, connectorName),
|
||||
connectCluster.getSuitableRequestUrl() + String.format(RESUME_CONNECTOR_URI, connectorName),
|
||||
new HashMap<>(),
|
||||
String.class
|
||||
);
|
||||
@@ -259,7 +259,7 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
|
||||
}
|
||||
|
||||
restTool.postObjectWithJsonContent(
|
||||
connectCluster.getClusterUrl() + String.format(RESTART_CONNECTOR_URI, connectorName),
|
||||
connectCluster.getSuitableRequestUrl() + String.format(RESTART_CONNECTOR_URI, connectorName),
|
||||
new HashMap<>(),
|
||||
String.class
|
||||
);
|
||||
@@ -294,7 +294,7 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
|
||||
}
|
||||
|
||||
restTool.putJsonForObject(
|
||||
connectCluster.getClusterUrl() + String.format(PAUSE_CONNECTOR_URI, connectorName),
|
||||
connectCluster.getSuitableRequestUrl() + String.format(PAUSE_CONNECTOR_URI, connectorName),
|
||||
new HashMap<>(),
|
||||
String.class
|
||||
);
|
||||
@@ -329,7 +329,7 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
|
||||
}
|
||||
|
||||
restTool.deleteWithParamsAndHeader(
|
||||
connectCluster.getClusterUrl() + String.format(DELETE_CONNECTOR_URI, connectorName),
|
||||
connectCluster.getSuitableRequestUrl() + String.format(DELETE_CONNECTOR_URI, connectorName),
|
||||
new HashMap<>(),
|
||||
new HashMap<>(),
|
||||
String.class
|
||||
@@ -365,7 +365,7 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
|
||||
}
|
||||
|
||||
ConnectorInfo connectorInfo = restTool.putJsonForObject(
|
||||
connectCluster.getClusterUrl() + String.format(UPDATE_CONNECTOR_CONFIG_URI, connectorName),
|
||||
connectCluster.getSuitableRequestUrl() + String.format(UPDATE_CONNECTOR_CONFIG_URI, connectorName),
|
||||
configs,
|
||||
org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo.class
|
||||
);
|
||||
@@ -532,7 +532,7 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
|
||||
private Result<KSConnectorInfo> getConnectorInfoFromCluster(ConnectCluster connectCluster, String connectorName) {
|
||||
try {
|
||||
ConnectorInfo connectorInfo = restTool.getForObject(
|
||||
connectCluster.getClusterUrl() + GET_CONNECTOR_INFO_PREFIX_URI + "/" + connectorName,
|
||||
connectCluster.getSuitableRequestUrl() + GET_CONNECTOR_INFO_PREFIX_URI + "/" + connectorName,
|
||||
new HashMap<>(),
|
||||
ConnectorInfo.class
|
||||
);
|
||||
@@ -558,7 +558,7 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
|
||||
private Result<List<String>> getConnectorTopicsFromCluster(ConnectCluster connectCluster, String connectorName) {
|
||||
try {
|
||||
Properties properties = restTool.getForObject(
|
||||
connectCluster.getClusterUrl() + String.format(GET_CONNECTOR_TOPICS_URI, connectorName),
|
||||
connectCluster.getSuitableRequestUrl() + String.format(GET_CONNECTOR_TOPICS_URI, connectorName),
|
||||
new HashMap<>(),
|
||||
Properties.class
|
||||
);
|
||||
@@ -578,7 +578,7 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
|
||||
private Result<KSConnectorStateInfo> getConnectorStateInfoFromCluster(ConnectCluster connectCluster, String connectorName) {
|
||||
try {
|
||||
KSConnectorStateInfo connectorStateInfo = restTool.getForObject(
|
||||
connectCluster.getClusterUrl() + String.format(GET_CONNECTOR_STATUS_URI, connectorName),
|
||||
connectCluster.getSuitableRequestUrl() + String.format(GET_CONNECTOR_STATUS_URI, connectorName),
|
||||
new HashMap<>(),
|
||||
KSConnectorStateInfo.class
|
||||
);
|
||||
|
||||
@@ -66,7 +66,7 @@ public class PluginServiceImpl extends BaseVersionControlService implements Plug
|
||||
|
||||
// 通过参数检查接口,获取插件配置
|
||||
ConfigInfos configInfos = restTool.putJsonForObject(
|
||||
connectCluster.getClusterUrl() + String.format(GET_PLUGIN_CONFIG_DESC_URI, props.getProperty(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME)),
|
||||
connectCluster.getSuitableRequestUrl() + String.format(GET_PLUGIN_CONFIG_DESC_URI, props.getProperty(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME)),
|
||||
props,
|
||||
ConfigInfos.class
|
||||
);
|
||||
@@ -94,7 +94,7 @@ public class PluginServiceImpl extends BaseVersionControlService implements Plug
|
||||
|
||||
// 通过参数检查接口,获取插件配置
|
||||
List<ConnectPluginBasic> pluginList = restTool.getArrayObjectWithJsonContent(
|
||||
connectCluster.getClusterUrl() + GET_ALL_PLUGINS_URI,
|
||||
connectCluster.getSuitableRequestUrl() + GET_ALL_PLUGINS_URI,
|
||||
new HashMap<>(),
|
||||
ConnectPluginBasic.class
|
||||
);
|
||||
|
||||
@@ -105,7 +105,7 @@ public class WorkerConnectorServiceImpl implements WorkerConnectorService {
|
||||
return Result.buildFailure(ResultStatus.NOT_EXIST);
|
||||
}
|
||||
|
||||
String url = String.format(RESTART_TASK_URI, connectCluster.getClusterUrl(), dto.getConnectorName(), dto.getTaskId());
|
||||
String url = String.format(RESTART_TASK_URI, connectCluster.getSuitableRequestUrl(), dto.getConnectorName(), dto.getTaskId());
|
||||
try {
|
||||
restTool.postObjectWithJsonContent(url, null, String.class);
|
||||
} catch (Exception e) {
|
||||
|
||||
@@ -7,8 +7,8 @@ import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBas
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectWorker;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectWorkerPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupMemberPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.connector.ClusterWorkerOverviewVO;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.jmx.JmxEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
|
||||
@@ -50,6 +50,11 @@ public class WorkerServiceImpl implements WorkerService {
|
||||
connectWorkerDAO.insert(newPO);
|
||||
} else {
|
||||
newPO.setId(oldPO.getId());
|
||||
if (JmxEnum.UNKNOWN.getPort().equals(newPO.getJmxPort())) {
|
||||
// 如果所获取的jmx端口未知,则不更新jmx端口
|
||||
newPO.setJmxPort(oldPO.getJmxPort());
|
||||
}
|
||||
|
||||
connectWorkerDAO.updateById(newPO);
|
||||
}
|
||||
} catch (DuplicateKeyException dke) {
|
||||
|
||||
@@ -12,9 +12,9 @@ import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
|
||||
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public interface GroupService {
|
||||
/**
|
||||
@@ -35,10 +35,11 @@ public interface GroupService {
|
||||
|
||||
/**
|
||||
* 批量更新DB
|
||||
* @param clusterPhyId 集群ID
|
||||
* @param newGroupList 新的group列表
|
||||
* @param getFailedGroupSet 元信息获取失败的group列表
|
||||
*/
|
||||
void batchReplaceGroupsAndMembers(Long clusterPhyId, List<Group> newGroupList, long updateTime);
|
||||
|
||||
int deleteByUpdateTimeBeforeInDB(Long clusterPhyId, Date beforeTime);
|
||||
void batchReplaceGroupsAndMembers(Long clusterPhyId, List<Group> newGroupList, Set<String> getFailedGroupSet);
|
||||
|
||||
/**
|
||||
* DB-Group相关接口
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
package com.xiaojukeji.know.streaming.km.core.service.group;
|
||||
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.DeleteGroupParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.DeleteGroupTopicParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.DeleteGroupTopicPartitionParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||
|
||||
public interface OpGroupService {
|
||||
/**
|
||||
* 删除Offset
|
||||
*/
|
||||
Result<Void> deleteGroupOffset(DeleteGroupParam param, String operator);
|
||||
Result<Void> deleteGroupTopicOffset(DeleteGroupTopicParam param, String operator);
|
||||
Result<Void> deleteGroupTopicPartitionOffset(DeleteGroupTopicPartitionParam param, String operator);
|
||||
}
|
||||
@@ -39,7 +39,7 @@ import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafk
|
||||
*/
|
||||
@Service("groupMetricService")
|
||||
public class GroupMetricServiceImpl extends BaseMetricService implements GroupMetricService {
|
||||
private static final ILog LOGGER = LogFactory.getLog( GroupMetricServiceImpl.class);
|
||||
private static final ILog LOGGER = LogFactory.getLog(GroupMetricServiceImpl.class);
|
||||
|
||||
public static final String GROUP_METHOD_GET_JUST_FRO_TEST = "getMetricJustForTest";
|
||||
public static final String GROUP_METHOD_GET_HEALTH_SCORE = "getMetricHealthScore";
|
||||
@@ -54,7 +54,7 @@ public class GroupMetricServiceImpl extends BaseMetricService implements GroupMe
|
||||
@Override
|
||||
protected void initRegisterVCHandler(){
|
||||
registerVCHandler( GROUP_METHOD_GET_JUST_FRO_TEST, this::getMetricJustForTest);
|
||||
registerVCHandler( GROUP_METHOD_GET_LAG_RELEVANT_FROM_ADMIN_CLIENT, this::getLagRelevantFromAdminClient );
|
||||
registerVCHandler( GROUP_METHOD_GET_LAG_RELEVANT_FROM_ADMIN_CLIENT, this::getLagRelevantFromAdminClient);
|
||||
registerVCHandler( GROUP_METHOD_GET_HEALTH_SCORE, this::getMetricHealthScore);
|
||||
registerVCHandler( GROUP_METHOD_GET_STATE, this::getGroupState);
|
||||
}
|
||||
@@ -129,8 +129,14 @@ public class GroupMetricServiceImpl extends BaseMetricService implements GroupMe
|
||||
@Override
|
||||
public Result<List<MetricMultiLinesVO>> listGroupMetricsFromES(Long clusterId, MetricGroupPartitionDTO dto) {
|
||||
Table<String/*metric*/, String/*topic&partition*/, List<MetricPointVO>> retTable = groupMetricESDAO.listGroupMetrics(
|
||||
clusterId, dto.getGroup(), dto.getGroupTopics(), dto.getMetricsNames(),
|
||||
dto.getAggType(), dto.getStartTime(), dto.getEndTime());
|
||||
clusterId,
|
||||
dto.getGroup(),
|
||||
dto.getGroupTopics(),
|
||||
dto.getMetricsNames(),
|
||||
dto.getAggType(),
|
||||
dto.getStartTime(),
|
||||
dto.getEndTime()
|
||||
);
|
||||
|
||||
List<MetricMultiLinesVO> multiLinesVOS = metricMap2VO(clusterId, retTable.rowMap());
|
||||
return Result.buildSuc(multiLinesVOS);
|
||||
@@ -140,7 +146,11 @@ public class GroupMetricServiceImpl extends BaseMetricService implements GroupMe
|
||||
public Result<List<GroupMetrics>> listLatestMetricsAggByGroupTopicFromES(Long clusterPhyId, List<GroupTopic> groupTopicList,
|
||||
List<String> metricNames, AggTypeEnum aggType) {
|
||||
List<GroupMetricPO> groupMetricPOS = groupMetricESDAO.listLatestMetricsAggByGroupTopic(
|
||||
clusterPhyId, groupTopicList, metricNames, aggType);
|
||||
clusterPhyId,
|
||||
groupTopicList,
|
||||
metricNames,
|
||||
aggType
|
||||
);
|
||||
|
||||
return Result.buildSuc( ConvertUtil.list2List(groupMetricPOS, GroupMetrics.class));
|
||||
}
|
||||
@@ -149,7 +159,11 @@ public class GroupMetricServiceImpl extends BaseMetricService implements GroupMe
|
||||
public Result<List<GroupMetrics>> listPartitionLatestMetricsFromES(Long clusterPhyId, String groupName, String topicName,
|
||||
List<String> metricNames) {
|
||||
List<GroupMetricPO> groupMetricPOS = groupMetricESDAO.listPartitionLatestMetrics(
|
||||
clusterPhyId, groupName, topicName, metricNames);
|
||||
clusterPhyId,
|
||||
groupName,
|
||||
topicName,
|
||||
metricNames
|
||||
);
|
||||
|
||||
return Result.buildSuc( ConvertUtil.list2List(groupMetricPOS, GroupMetrics.class));
|
||||
}
|
||||
@@ -158,9 +172,7 @@ public class GroupMetricServiceImpl extends BaseMetricService implements GroupMe
|
||||
public Result<Integer> countMetricValueOccurrencesFromES(Long clusterPhyId, String groupName,
|
||||
SearchTerm term, Long startTime, Long endTime) {
|
||||
setQueryMetricFlag(term);
|
||||
int count = groupMetricESDAO.countMetricValue(clusterPhyId, groupName,
|
||||
term, startTime, endTime);
|
||||
|
||||
int count = groupMetricESDAO.countMetricValue(clusterPhyId, groupName, term, startTime, endTime);
|
||||
if(count < 0){
|
||||
return Result.buildFail();
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemT
|
||||
|
||||
@Service
|
||||
public class GroupServiceImpl extends BaseKafkaVersionControlService implements GroupService {
|
||||
private static final ILog log = LogFactory.getLog(GroupServiceImpl.class);
|
||||
private static final ILog LOGGER = LogFactory.getLog(GroupServiceImpl.class);
|
||||
|
||||
@Autowired
|
||||
private GroupDAO groupDAO;
|
||||
@@ -92,7 +92,7 @@ public class GroupServiceImpl extends BaseKafkaVersionControlService implements
|
||||
|
||||
return groupNameList;
|
||||
} catch (Exception e) {
|
||||
log.error("method=listGroupsFromKafka||clusterPhyId={}||errMsg=exception!", clusterPhy.getId(), e);
|
||||
LOGGER.error("method=listGroupsFromKafka||clusterPhyId={}||errMsg=exception!", clusterPhy.getId(), e);
|
||||
|
||||
throw new AdminOperateException(e.getMessage(), e, ResultStatus.KAFKA_OPERATE_FAILED);
|
||||
} finally {
|
||||
@@ -142,7 +142,8 @@ public class GroupServiceImpl extends BaseKafkaVersionControlService implements
|
||||
member.setMemberCount(member.getMemberCount() + 1);
|
||||
}
|
||||
}
|
||||
group.setTopicMembers(memberMap.values().stream().collect(Collectors.toList()));
|
||||
|
||||
group.setTopicMembers(new ArrayList<>(memberMap.values()));
|
||||
|
||||
return group;
|
||||
}
|
||||
@@ -161,7 +162,7 @@ public class GroupServiceImpl extends BaseKafkaVersionControlService implements
|
||||
|
||||
return offsetMap;
|
||||
} catch (Exception e) {
|
||||
log.error("method=getGroupOffset||clusterPhyId={}|groupName={}||errMsg=exception!", clusterPhyId, groupName, e);
|
||||
LOGGER.error("method=getGroupOffset||clusterPhyId={}|groupName={}||errMsg=exception!", clusterPhyId, groupName, e);
|
||||
|
||||
throw new AdminOperateException(e.getMessage(), e, ResultStatus.KAFKA_OPERATE_FAILED);
|
||||
}
|
||||
@@ -187,7 +188,7 @@ public class GroupServiceImpl extends BaseKafkaVersionControlService implements
|
||||
|
||||
return describeGroupsResult.all().get().get(groupName);
|
||||
} catch(Exception e){
|
||||
log.error("method=getGroupDescription||clusterPhyId={}|groupName={}||errMsg=exception!", clusterPhy.getId(), groupName, e);
|
||||
LOGGER.error("method=getGroupDescription||clusterPhyId={}|groupName={}||errMsg=exception!", clusterPhy.getId(), groupName, e);
|
||||
|
||||
throw new AdminOperateException(e.getMessage(), e, ResultStatus.KAFKA_OPERATE_FAILED);
|
||||
} finally {
|
||||
@@ -202,12 +203,12 @@ public class GroupServiceImpl extends BaseKafkaVersionControlService implements
|
||||
}
|
||||
|
||||
@Override
|
||||
public void batchReplaceGroupsAndMembers(Long clusterPhyId, List<Group> newGroupList, long updateTime) {
|
||||
public void batchReplaceGroupsAndMembers(Long clusterPhyId, List<Group> newGroupList, Set<String> getFailedGroupSet) {
|
||||
// 更新Group信息
|
||||
this.batchReplaceGroups(clusterPhyId, newGroupList, updateTime);
|
||||
this.batchReplaceGroups(clusterPhyId, newGroupList, getFailedGroupSet);
|
||||
|
||||
// 更新Group-Topic信息
|
||||
this.batchReplaceGroupMembers(clusterPhyId, newGroupList, updateTime);
|
||||
this.batchReplaceGroupMembers(clusterPhyId, newGroupList, getFailedGroupSet);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -283,21 +284,6 @@ public class GroupServiceImpl extends BaseKafkaVersionControlService implements
|
||||
return groupDAO.selectList(lambdaQueryWrapper).stream().map(elem -> GroupConverter.convert2Group(elem)).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int deleteByUpdateTimeBeforeInDB(Long clusterPhyId, Date beforeTime) {
|
||||
// 删除过期Group信息
|
||||
LambdaQueryWrapper<GroupPO> groupPOLambdaQueryWrapper = new LambdaQueryWrapper<>();
|
||||
groupPOLambdaQueryWrapper.eq(GroupPO::getClusterPhyId, clusterPhyId);
|
||||
groupPOLambdaQueryWrapper.le(GroupPO::getUpdateTime, beforeTime);
|
||||
groupDAO.delete(groupPOLambdaQueryWrapper);
|
||||
|
||||
// 删除过期GroupMember信息
|
||||
LambdaQueryWrapper<GroupMemberPO> queryWrapper = new LambdaQueryWrapper<>();
|
||||
queryWrapper.eq(GroupMemberPO::getClusterPhyId, clusterPhyId);
|
||||
queryWrapper.le(GroupMemberPO::getUpdateTime, beforeTime);
|
||||
return groupMemberDAO.delete(queryWrapper);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getGroupsFromDB(Long clusterPhyId) {
|
||||
LambdaQueryWrapper<GroupPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
|
||||
@@ -368,7 +354,7 @@ public class GroupServiceImpl extends BaseKafkaVersionControlService implements
|
||||
|
||||
return Result.buildSuc();
|
||||
} catch(Exception e){
|
||||
log.error("method=resetGroupOffsets||clusterPhyId={}|groupName={}||errMsg=exception!", clusterPhyId, groupName, e);
|
||||
LOGGER.error("method=resetGroupOffsets||clusterPhyId={}|groupName={}||errMsg=exception!", clusterPhyId, groupName, e);
|
||||
|
||||
throw new AdminOperateException(e.getMessage(), e, ResultStatus.KAFKA_OPERATE_FAILED);
|
||||
}
|
||||
@@ -378,62 +364,96 @@ public class GroupServiceImpl extends BaseKafkaVersionControlService implements
|
||||
/**************************************************** private method ****************************************************/
|
||||
|
||||
|
||||
private void batchReplaceGroupMembers(Long clusterPhyId, List<Group> newGroupList, long updateTime) {
|
||||
if (ValidateUtils.isEmptyList(newGroupList)) {
|
||||
return;
|
||||
}
|
||||
|
||||
List<GroupMemberPO> dbPOList = this.listClusterGroupsMemberPO(clusterPhyId);
|
||||
Map<String, GroupMemberPO> dbPOMap = dbPOList.stream().collect(Collectors.toMap(elem -> elem.getGroupName() + elem.getTopicName(), Function.identity()));
|
||||
private void batchReplaceGroupMembers(Long clusterPhyId, List<Group> newGroupList, Set<String> getFailedGroupSet) {
|
||||
// DB 中的数据
|
||||
Map<String, GroupMemberPO> dbPOMap = this.listClusterGroupsMemberPO(clusterPhyId)
|
||||
.stream()
|
||||
.collect(Collectors.toMap(elem -> elem.getGroupName() + elem.getTopicName(), Function.identity()));
|
||||
|
||||
// 进行数据的更新
|
||||
for (Group group: newGroupList) {
|
||||
for (GroupTopicMember member : group.getTopicMembers()) {
|
||||
try {
|
||||
GroupMemberPO newPO = new GroupMemberPO(clusterPhyId, member.getTopicName(), group.getName(), group.getState().getState(), member.getMemberCount(), new Date(updateTime));
|
||||
GroupMemberPO newPO = new GroupMemberPO(clusterPhyId, member.getTopicName(), group.getName(), group.getState().getState(), member.getMemberCount(), new Date());
|
||||
|
||||
GroupMemberPO dbPO = dbPOMap.remove(newPO.getGroupName() + newPO.getTopicName());
|
||||
if (dbPO != null) {
|
||||
if (dbPO == null) {
|
||||
// 数据不存在则直接写入
|
||||
groupMemberDAO.insert(newPO);
|
||||
} else if (!dbPO.equal2GroupMemberPO(newPO)) {
|
||||
// 数据发生了变化则进行更新
|
||||
newPO.setId(dbPO.getId());
|
||||
groupMemberDAO.updateById(newPO);
|
||||
continue;
|
||||
}
|
||||
|
||||
groupMemberDAO.insert(newPO);
|
||||
} catch (Exception e) {
|
||||
log.error(
|
||||
LOGGER.error(
|
||||
"method=batchReplaceGroupMembers||clusterPhyId={}||groupName={}||topicName={}||errMsg=exception",
|
||||
clusterPhyId, group.getName(), member.getTopicName(), e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 删除剩余不存在的
|
||||
dbPOMap.values().forEach(elem -> {
|
||||
try {
|
||||
if (getFailedGroupSet.contains(elem.getGroupName())) {
|
||||
// 该group信息获取失败,所以忽略对该数据的删除
|
||||
return;
|
||||
}
|
||||
|
||||
groupDAO.deleteById(elem.getId());
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(
|
||||
"method=batchReplaceGroupMembers||clusterPhyId={}||groupName={}||topicName={}||msg=delete expired group data in db failed||errMsg=exception",
|
||||
clusterPhyId, elem.getGroupName(), elem.getTopicName(), e
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void batchReplaceGroups(Long clusterPhyId, List<Group> newGroupList, long updateTime) {
|
||||
if (ValidateUtils.isEmptyList(newGroupList)) {
|
||||
return;
|
||||
}
|
||||
|
||||
List<GroupPO> dbGroupList = this.listClusterGroupsPO(clusterPhyId);
|
||||
Map<String, GroupPO> dbGroupMap = dbGroupList.stream().collect(Collectors.toMap(elem -> elem.getName(), Function.identity()));
|
||||
private void batchReplaceGroups(Long clusterPhyId, List<Group> newGroupList, Set<String> getFailedGroupSet) {
|
||||
// 获取 DB 中的数据
|
||||
Map<String, GroupPO> dbGroupMap = this.listClusterGroupsPO(clusterPhyId)
|
||||
.stream()
|
||||
.collect(Collectors.toMap(elem -> elem.getName(), Function.identity()));
|
||||
|
||||
// 进行数据的更新
|
||||
for (Group newGroup: newGroupList) {
|
||||
try {
|
||||
GroupPO newPO = GroupConverter.convert2GroupPO(newGroup);
|
||||
newPO.setUpdateTime(new Date(updateTime));
|
||||
|
||||
GroupPO dbPO = dbGroupMap.remove(newGroup.getName());
|
||||
if (dbPO != null) {
|
||||
newPO.setId(dbPO.getId());
|
||||
groupDAO.updateById(newPO);
|
||||
if (dbPO == null) {
|
||||
// 一条新的数据,则直接insert
|
||||
groupDAO.insert(GroupConverter.convert2GroupPO(newGroup));
|
||||
continue;
|
||||
}
|
||||
|
||||
groupDAO.insert(newPO);
|
||||
GroupPO newPO = GroupConverter.convert2GroupPO(newGroup);
|
||||
if (!newPO.equal2GroupPO(dbPO)) {
|
||||
// 如果不相等,则直接更新
|
||||
newPO.setId(dbPO.getId());
|
||||
groupDAO.updateById(newPO);
|
||||
}
|
||||
|
||||
// 其他情况,则不需要进行任何操作
|
||||
} catch (Exception e) {
|
||||
log.error("method=batchGroupReplace||clusterPhyId={}||groupName={}||errMsg=exception", clusterPhyId, newGroup.getName(), e);
|
||||
LOGGER.error("method=batchReplaceGroups||clusterPhyId={}||groupName={}||errMsg=exception", clusterPhyId, newGroup.getName(), e);
|
||||
}
|
||||
}
|
||||
|
||||
// 删除剩余不存在的
|
||||
dbGroupMap.values().forEach(elem -> {
|
||||
try {
|
||||
if (getFailedGroupSet.contains(elem.getName())) {
|
||||
// 该group信息获取失败,所以忽略对该数据的删除
|
||||
return;
|
||||
}
|
||||
|
||||
groupDAO.deleteById(elem.getId());
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("method=batchReplaceGroups||clusterPhyId={}||groupName={}||msg=delete expired group data in db failed||errMsg=exception", clusterPhyId, elem.getName(), e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private List<GroupPO> listClusterGroupsPO(Long clusterPhyId) {
|
||||
|
||||
@@ -0,0 +1,272 @@
|
||||
package com.xiaojukeji.know.streaming.km.core.service.group.impl;
|
||||
|
||||
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
|
||||
import com.didiglobal.logi.log.ILog;
|
||||
import com.didiglobal.logi.log.LogFactory;
|
||||
import com.didiglobal.logi.security.common.dto.oplog.OplogDTO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.DeleteGroupParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.DeleteGroupTopicParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.DeleteGroupTopicPartitionParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupMemberPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupPO;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.ModuleEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
|
||||
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
|
||||
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.group.OpGroupService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.mysql.group.GroupDAO;
|
||||
import com.xiaojukeji.know.streaming.km.persistence.mysql.group.GroupMemberDAO;
|
||||
import org.apache.kafka.clients.admin.*;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus.VC_HANDLE_NOT_EXIST;
|
||||
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.*;
|
||||
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.SERVICE_OP_GROUP;
|
||||
|
||||
/**
|
||||
* @author didi
|
||||
*/
|
||||
@Service
|
||||
public class OpGroupServiceImpl extends BaseKafkaVersionControlService implements OpGroupService {
|
||||
private static final ILog LOGGER = LogFactory.getLog(OpGroupServiceImpl.class);
|
||||
|
||||
private static final String DELETE_GROUP_OFFSET = "deleteGroupOffset";
|
||||
private static final String DELETE_GROUP_TOPIC_OFFSET = "deleteGroupTopicOffset";
|
||||
private static final String DELETE_GROUP_TP_OFFSET = "deleteGroupTopicPartitionOffset";
|
||||
|
||||
@Autowired
|
||||
private GroupDAO groupDAO;
|
||||
|
||||
@Autowired
|
||||
private GroupMemberDAO groupMemberDAO;
|
||||
|
||||
@Autowired
|
||||
private OpLogWrapService opLogWrapService;
|
||||
|
||||
@Autowired
|
||||
private KafkaAdminClient kafkaAdminClient;
|
||||
|
||||
@Override
|
||||
protected VersionItemTypeEnum getVersionItemType() {
|
||||
return SERVICE_OP_GROUP;
|
||||
}
|
||||
|
||||
@PostConstruct
|
||||
private void init() {
|
||||
registerVCHandler(DELETE_GROUP_OFFSET, V_2_0_0, V_MAX, "deleteGroupOffsetByClient", this::deleteGroupOffsetByClient);
|
||||
registerVCHandler(DELETE_GROUP_TOPIC_OFFSET, V_2_4_0, V_MAX, "deleteGroupTopicOffsetByClient", this::deleteGroupTopicOffsetByClient);
|
||||
registerVCHandler(DELETE_GROUP_TP_OFFSET, V_2_4_0, V_MAX, "deleteGroupTopicPartitionOffsetByClient", this::deleteGroupTopicPartitionOffsetByClient);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<Void> deleteGroupOffset(DeleteGroupParam param, String operator) {
|
||||
// 日志记录
|
||||
LOGGER.info("method=deleteGroupOffset||param={}||operator={}||msg=delete group offset", ConvertUtil.obj2Json(param), operator);
|
||||
|
||||
try {
|
||||
Result<Void> rv = (Result<Void>) doVCHandler(param.getClusterPhyId(), DELETE_GROUP_OFFSET, param);
|
||||
if (rv == null || rv.failed()) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
// 记录操作
|
||||
OplogDTO oplogDTO = new OplogDTO(operator,
|
||||
OperationEnum.DELETE.getDesc(),
|
||||
ModuleEnum.KAFKA_GROUP.getDesc(),
|
||||
String.format("集群ID:[%d] Group名称:[%s]", param.getClusterPhyId(), param.getGroupName()),
|
||||
String.format("删除Offset:[%s]", ConvertUtil.obj2Json(param))
|
||||
);
|
||||
opLogWrapService.saveOplogAndIgnoreException(oplogDTO);
|
||||
|
||||
// 清理Group数据
|
||||
this.deleteGroupInDB(param.getClusterPhyId(), param.getGroupName());
|
||||
this.deleteGroupMemberInDB(param.getClusterPhyId(), param.getGroupName());
|
||||
|
||||
return rv;
|
||||
} catch (VCHandlerNotExistException e) {
|
||||
return Result.buildFailure(VC_HANDLE_NOT_EXIST);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<Void> deleteGroupTopicOffset(DeleteGroupTopicParam param, String operator) {
|
||||
// 日志记录
|
||||
LOGGER.info("method=deleteGroupTopicOffset||param={}||operator={}||msg=delete group topic offset", ConvertUtil.obj2Json(param), operator);
|
||||
|
||||
try {
|
||||
Result<Void> rv = (Result<Void>) doVCHandler(param.getClusterPhyId(), DELETE_GROUP_TOPIC_OFFSET, param);
|
||||
if (rv == null || rv.failed()) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
// 清理数据库中的数据
|
||||
// 记录操作
|
||||
OplogDTO oplogDTO = new OplogDTO(operator,
|
||||
OperationEnum.DELETE.getDesc(),
|
||||
ModuleEnum.KAFKA_GROUP.getDesc(),
|
||||
String.format("集群ID:[%d] Group名称:[%s] Topic名称:[%s]", param.getClusterPhyId(), param.getGroupName(), param.getTopicName()),
|
||||
String.format("删除Offset:[%s]", ConvertUtil.obj2Json(param))
|
||||
);
|
||||
opLogWrapService.saveOplogAndIgnoreException(oplogDTO);
|
||||
|
||||
// 清理group + topic 数据
|
||||
this.deleteGroupMemberInDB(param.getClusterPhyId(), param.getGroupName(), param.getTopicName());
|
||||
|
||||
return rv;
|
||||
} catch (VCHandlerNotExistException e) {
|
||||
return Result.buildFailure(VC_HANDLE_NOT_EXIST);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<Void> deleteGroupTopicPartitionOffset(DeleteGroupTopicPartitionParam param, String operator) {
|
||||
// 日志记录
|
||||
LOGGER.info("method=deleteGroupTopicPartitionOffset||param={}||operator={}||msg=delete group topic partition offset", ConvertUtil.obj2Json(param), operator);
|
||||
|
||||
try {
|
||||
Result<Void> rv = (Result<Void>) doVCHandler(param.getClusterPhyId(), DELETE_GROUP_TP_OFFSET, param);
|
||||
if (rv == null || rv.failed()) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
// 记录操作
|
||||
OplogDTO oplogDTO = new OplogDTO(operator,
|
||||
OperationEnum.DELETE.getDesc(),
|
||||
ModuleEnum.KAFKA_GROUP.getDesc(),
|
||||
String.format("集群ID:[%d] Group名称:[%s] Topic名称:[%s] PartitionID:[%d]", param.getClusterPhyId(), param.getGroupName(), param.getTopicName(), param.getPartitionId()),
|
||||
String.format("删除Offset:[%s]", ConvertUtil.obj2Json(param))
|
||||
);
|
||||
opLogWrapService.saveOplogAndIgnoreException(oplogDTO);
|
||||
|
||||
return rv;
|
||||
} catch (VCHandlerNotExistException e) {
|
||||
return Result.buildFailure(VC_HANDLE_NOT_EXIST);
|
||||
}
|
||||
}
|
||||
|
||||
/**************************************************** private method ****************************************************/
|
||||
|
||||
private Result<Void> deleteGroupOffsetByClient(VersionItemParam itemParam) {
|
||||
DeleteGroupParam param = (DeleteGroupParam) itemParam;
|
||||
try {
|
||||
AdminClient adminClient = kafkaAdminClient.getClient(param.getClusterPhyId());
|
||||
|
||||
DeleteConsumerGroupsResult deleteConsumerGroupsResult = adminClient.deleteConsumerGroups(
|
||||
Collections.singletonList(param.getGroupName()),
|
||||
new DeleteConsumerGroupsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS)
|
||||
);
|
||||
|
||||
deleteConsumerGroupsResult.all().get();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(
|
||||
"method=deleteGroupOffsetByClient||clusterPhyId={}||groupName={}||errMsg=delete group failed||msg=exception!",
|
||||
param.getClusterPhyId(), param.getGroupName(), e
|
||||
);
|
||||
|
||||
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
|
||||
}
|
||||
|
||||
return Result.buildSuc();
|
||||
}
|
||||
|
||||
private Result<Void> deleteGroupTopicOffsetByClient(VersionItemParam itemParam) {
|
||||
DeleteGroupTopicParam param = (DeleteGroupTopicParam) itemParam;
|
||||
try {
|
||||
AdminClient adminClient = kafkaAdminClient.getClient(param.getClusterPhyId());
|
||||
|
||||
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singletonList(
|
||||
param.getTopicName()),
|
||||
new DescribeTopicsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS)
|
||||
);
|
||||
|
||||
List<TopicPartition> tpList = describeTopicsResult
|
||||
.all()
|
||||
.get()
|
||||
.get(param.getTopicName())
|
||||
.partitions()
|
||||
.stream()
|
||||
.map(elem -> new TopicPartition(param.getTopicName(), elem.partition()))
|
||||
.collect(Collectors.toList());
|
||||
|
||||
DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsetsResult = adminClient.deleteConsumerGroupOffsets(
|
||||
param.getGroupName(),
|
||||
new HashSet<>(tpList),
|
||||
new DeleteConsumerGroupOffsetsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS)
|
||||
);
|
||||
|
||||
deleteConsumerGroupOffsetsResult.all().get();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(
|
||||
"method=deleteGroupTopicOffsetByClient||clusterPhyId={}||groupName={}||topicName={}||errMsg=delete group failed||msg=exception!",
|
||||
param.getClusterPhyId(), param.getGroupName(), param.getTopicName(), e
|
||||
);
|
||||
|
||||
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
|
||||
}
|
||||
|
||||
return Result.buildSuc();
|
||||
}
|
||||
|
||||
private Result<Void> deleteGroupTopicPartitionOffsetByClient(VersionItemParam itemParam) {
|
||||
DeleteGroupTopicPartitionParam param = (DeleteGroupTopicPartitionParam) itemParam;
|
||||
try {
|
||||
AdminClient adminClient = kafkaAdminClient.getClient(param.getClusterPhyId());
|
||||
|
||||
DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsetsResult = adminClient.deleteConsumerGroupOffsets(
|
||||
param.getGroupName(),
|
||||
new HashSet<>(Arrays.asList(new TopicPartition(param.getTopicName(), param.getPartitionId()))),
|
||||
new DeleteConsumerGroupOffsetsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS)
|
||||
);
|
||||
|
||||
deleteConsumerGroupOffsetsResult.all().get();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(
|
||||
"method=deleteGroupTopicPartitionOffsetByClient||clusterPhyId={}||groupName={}||topicName={}||partitionId={}||errMsg=delete group failed||msg=exception!",
|
||||
param.getClusterPhyId(), param.getGroupName(), param.getTopicName(), param.getPartitionId(), e
|
||||
);
|
||||
|
||||
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
|
||||
}
|
||||
|
||||
return Result.buildSuc();
|
||||
}
|
||||
|
||||
private int deleteGroupInDB(Long clusterPhyId, String groupName) {
|
||||
LambdaQueryWrapper<GroupPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
|
||||
lambdaQueryWrapper.eq(GroupPO::getClusterPhyId, clusterPhyId);
|
||||
lambdaQueryWrapper.eq(GroupPO::getName, groupName);
|
||||
|
||||
return groupDAO.delete(lambdaQueryWrapper);
|
||||
}
|
||||
|
||||
private int deleteGroupMemberInDB(Long clusterPhyId, String groupName) {
|
||||
LambdaQueryWrapper<GroupMemberPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
|
||||
lambdaQueryWrapper.eq(GroupMemberPO::getClusterPhyId, clusterPhyId);
|
||||
lambdaQueryWrapper.eq(GroupMemberPO::getGroupName, groupName);
|
||||
|
||||
return groupMemberDAO.delete(lambdaQueryWrapper);
|
||||
}
|
||||
|
||||
private int deleteGroupMemberInDB(Long clusterPhyId, String groupName, String topicName) {
|
||||
LambdaQueryWrapper<GroupMemberPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
|
||||
lambdaQueryWrapper.eq(GroupMemberPO::getClusterPhyId, clusterPhyId);
|
||||
lambdaQueryWrapper.eq(GroupMemberPO::getGroupName, groupName);
|
||||
lambdaQueryWrapper.eq(GroupMemberPO::getTopicName, topicName);
|
||||
|
||||
return groupMemberDAO.delete(lambdaQueryWrapper);
|
||||
}
|
||||
}
|
||||
@@ -102,6 +102,10 @@ public class HealthCheckZookeeperService extends AbstractHealthCheckService {
|
||||
);
|
||||
|
||||
long value = infoList.stream().filter(elem -> ZKRoleEnum.LEADER.getRole().equals(elem.getRole())).count();
|
||||
if (value == 0) {
|
||||
// ZK 在单机模式下,leader角色就是standalone
|
||||
value = infoList.stream().filter(elem -> ZKRoleEnum.STANDALONE.getRole().equals(elem.getRole())).count();
|
||||
}
|
||||
|
||||
checkResult.setPassed(value == 1 ? Constant.YES : Constant.NO);
|
||||
return checkResult;
|
||||
|
||||
@@ -3,6 +3,7 @@ package com.xiaojukeji.know.streaming.km.core.service.topic;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicCreateParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicPartitionExpandParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicTruncateParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
|
||||
|
||||
@@ -21,4 +22,9 @@ public interface OpTopicService {
|
||||
* 扩分区
|
||||
*/
|
||||
Result<Void> expandTopic(TopicPartitionExpandParam expandParam, String operator);
|
||||
|
||||
/**
|
||||
* 清空topic消息
|
||||
*/
|
||||
Result<Void> truncateTopic(TopicTruncateParam param, String operator);
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemPara
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicCreateParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicPartitionExpandParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicTruncateParam;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
|
||||
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
|
||||
@@ -33,6 +34,7 @@ import kafka.zk.AdminZkClient;
|
||||
import kafka.zk.KafkaZkClient;
|
||||
import org.apache.kafka.clients.admin.*;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.TopicPartitionInfo;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
import scala.Option;
|
||||
@@ -57,6 +59,7 @@ public class OpTopicServiceImpl extends BaseKafkaVersionControlService implement
|
||||
private static final String TOPIC_CREATE = "createTopic";
|
||||
private static final String TOPIC_DELETE = "deleteTopic";
|
||||
private static final String TOPIC_EXPAND = "expandTopic";
|
||||
private static final String TOPIC_TRUNCATE = "truncateTopic";
|
||||
|
||||
@Autowired
|
||||
private TopicService topicService;
|
||||
@@ -92,6 +95,8 @@ public class OpTopicServiceImpl extends BaseKafkaVersionControlService implement
|
||||
|
||||
registerVCHandler(TOPIC_EXPAND, V_0_10_0_0, V_0_11_0_3, "expandTopicByZKClient", this::expandTopicByZKClient);
|
||||
registerVCHandler(TOPIC_EXPAND, V_0_11_0_3, V_MAX, "expandTopicByKafkaClient", this::expandTopicByKafkaClient);
|
||||
|
||||
registerVCHandler(TOPIC_TRUNCATE, V_0_11_0_0, V_MAX, "truncateTopicByKafkaClient", this::truncateTopicByKafkaClient);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -203,9 +208,58 @@ public class OpTopicServiceImpl extends BaseKafkaVersionControlService implement
|
||||
return rv;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<Void> truncateTopic(TopicTruncateParam param, String operator) {
|
||||
try {
|
||||
// 清空topic数据
|
||||
Result<Void> rv = (Result<Void>) doVCHandler(param.getClusterPhyId(), TOPIC_TRUNCATE, param);
|
||||
|
||||
if (rv == null || rv.failed()) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
// 记录操作
|
||||
OplogDTO oplogDTO = new OplogDTO(operator,
|
||||
OperationEnum.TRUNCATE.getDesc(),
|
||||
ModuleEnum.KAFKA_TOPIC.getDesc(),
|
||||
MsgConstant.getTopicBizStr(param.getClusterPhyId(), param.getTopicName()),
|
||||
String.format("清空Topic:[%s]", param.toString()));
|
||||
opLogWrapService.saveOplogAndIgnoreException(oplogDTO);
|
||||
return rv;
|
||||
} catch (VCHandlerNotExistException e) {
|
||||
return Result.buildFailure(VC_HANDLE_NOT_EXIST);
|
||||
}
|
||||
}
|
||||
|
||||
/**************************************************** private method ****************************************************/
|
||||
|
||||
private Result<Void> truncateTopicByKafkaClient(VersionItemParam itemParam) {
|
||||
TopicTruncateParam param = (TopicTruncateParam) itemParam;
|
||||
try {
|
||||
AdminClient adminClient = kafkaAdminClient.getClient(param.getClusterPhyId());
|
||||
//获取topic的分区信息
|
||||
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Arrays.asList(param.getTopicName()), new DescribeTopicsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS));
|
||||
Map<String, TopicDescription> descriptionMap = describeTopicsResult.all().get();
|
||||
|
||||
Map<TopicPartition, RecordsToDelete> recordsToDelete = new HashMap<>();
|
||||
RecordsToDelete recordsToDeleteOffset = RecordsToDelete.beforeOffset(param.getOffset());
|
||||
|
||||
descriptionMap.forEach((topicName, topicDescription) -> {
|
||||
for (TopicPartitionInfo topicPartition : topicDescription.partitions()) {
|
||||
recordsToDelete.put(new TopicPartition(topicName, topicPartition.partition()), recordsToDeleteOffset);
|
||||
}
|
||||
});
|
||||
|
||||
DeleteRecordsResult deleteRecordsResult = adminClient.deleteRecords(recordsToDelete, new DeleteRecordsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS));
|
||||
deleteRecordsResult.all().get();
|
||||
} catch (Exception e) {
|
||||
log.error("truncate topic by kafka-client failed,clusterPhyId:{} topicName:{} offset:{}", param.getClusterPhyId(), param.getTopicName(), param.getOffset(), e);
|
||||
|
||||
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
|
||||
}
|
||||
|
||||
return Result.buildSuc();
|
||||
}
|
||||
|
||||
private Result<Void> deleteByKafkaClient(VersionItemParam itemParam) {
|
||||
TopicParam param = (TopicParam) itemParam;
|
||||
|
||||
@@ -36,7 +36,15 @@ public class FrontEndControlVersionItems extends BaseMetricVersionMetric {
|
||||
private static final String FE_HA_CREATE_MIRROR_TOPIC = "FEHaCreateMirrorTopic";
|
||||
private static final String FE_HA_DELETE_MIRROR_TOPIC = "FEHaDeleteMirrorTopic";
|
||||
|
||||
public FrontEndControlVersionItems(){}
|
||||
private static final String FE_TRUNCATE_TOPIC = "FETruncateTopic";
|
||||
|
||||
private static final String FE_DELETE_GROUP_OFFSET = "FEDeleteGroupOffset";
|
||||
private static final String FE_DELETE_GROUP_TOPIC_OFFSET = "FEDeleteGroupTopicOffset";
|
||||
private static final String FE_DELETE_GROUP_TOPIC_PARTITION_OFFSET = "FEDeleteGroupTopicPartitionOffset";
|
||||
|
||||
public FrontEndControlVersionItems() {
|
||||
// ignore
|
||||
}
|
||||
|
||||
@Override
|
||||
public int versionItemType() {
|
||||
@@ -89,6 +97,17 @@ public class FrontEndControlVersionItems extends BaseMetricVersionMetric {
|
||||
itemList.add(buildItem().minVersion(VersionEnum.V_2_5_0_D_300).maxVersion(VersionEnum.V_2_5_0_D_MAX)
|
||||
.name(FE_HA_DELETE_MIRROR_TOPIC).desc("HA-取消Topic复制"));
|
||||
|
||||
// truncate topic
|
||||
itemList.add(buildItem().minVersion(VersionEnum.V_0_11_0_0).maxVersion(VersionEnum.V_MAX)
|
||||
.name(FE_TRUNCATE_TOPIC).desc("清空Topic"));
|
||||
|
||||
// 删除Offset
|
||||
itemList.add(buildItem().minVersion(VersionEnum.V_2_0_0).maxVersion(VersionEnum.V_MAX)
|
||||
.name(FE_DELETE_GROUP_OFFSET).desc("删除GroupOffset"));
|
||||
itemList.add(buildItem().minVersion(VersionEnum.V_2_4_0).maxVersion(VersionEnum.V_MAX)
|
||||
.name(FE_DELETE_GROUP_TOPIC_OFFSET).desc("删除GroupTopicOffset"));
|
||||
itemList.add(buildItem().minVersion(VersionEnum.V_2_4_0).maxVersion(VersionEnum.V_MAX)
|
||||
.name(FE_DELETE_GROUP_TOPIC_PARTITION_OFFSET).desc("删除GroupTopicPartitionOffset"));
|
||||
return itemList;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -161,7 +161,7 @@ public class ZookeeperMetricServiceImpl extends BaseMetricService implements Zoo
|
||||
|
||||
// 格式转化
|
||||
List<MetricLineVO> voList = new ArrayList<>();
|
||||
pointVOMap.entrySet().stream().forEach(entry ->
|
||||
pointVOMap.entrySet().forEach(entry ->
|
||||
voList.add(new MetricLineVO(String.valueOf(clusterPhyId), entry.getKey(), entry.getValue()))
|
||||
);
|
||||
return Result.buildSuc(voList);
|
||||
@@ -208,11 +208,11 @@ public class ZookeeperMetricServiceImpl extends BaseMetricService implements Zoo
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_AVG_REQUEST_LATENCY, cmdData.getZkAvgLatency());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_MIN_REQUEST_LATENCY, cmdData.getZkMinLatency());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_MAX_REQUEST_LATENCY, cmdData.getZkMaxLatency());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_OUTSTANDING_REQUESTS, cmdData.getZkOutstandingRequests().floatValue());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_NODE_COUNT, cmdData.getZkZnodeCount().floatValue());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_NUM_ALIVE_CONNECTIONS, cmdData.getZkNumAliveConnections().floatValue());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_PACKETS_RECEIVED, cmdData.getZkPacketsReceived().floatValue());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_PACKETS_SENT, cmdData.getZkPacketsSent().floatValue());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_OUTSTANDING_REQUESTS, cmdData.getZkOutstandingRequests());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_NODE_COUNT, cmdData.getZkZnodeCount());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_NUM_ALIVE_CONNECTIONS, cmdData.getZkNumAliveConnections());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_PACKETS_RECEIVED, cmdData.getZkPacketsReceived());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_PACKETS_SENT, cmdData.getZkPacketsSent());
|
||||
|
||||
return Result.buildSuc(metrics);
|
||||
}
|
||||
@@ -257,16 +257,16 @@ public class ZookeeperMetricServiceImpl extends BaseMetricService implements Zoo
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_AVG_REQUEST_LATENCY, cmdData.getZkAvgLatency());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_MIN_REQUEST_LATENCY, cmdData.getZkMinLatency());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_MAX_REQUEST_LATENCY, cmdData.getZkMaxLatency());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_OUTSTANDING_REQUESTS, cmdData.getZkOutstandingRequests().floatValue());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_NODE_COUNT, cmdData.getZkZnodeCount().floatValue());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_WATCH_COUNT, cmdData.getZkWatchCount().floatValue());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_NUM_ALIVE_CONNECTIONS, cmdData.getZkNumAliveConnections().floatValue());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_PACKETS_RECEIVED, cmdData.getZkPacketsReceived().floatValue());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_PACKETS_SENT, cmdData.getZkPacketsSent().floatValue());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_EPHEMERALS_COUNT, cmdData.getZkEphemeralsCount().floatValue());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_APPROXIMATE_DATA_SIZE, cmdData.getZkApproximateDataSize().floatValue());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_OPEN_FILE_DESCRIPTOR_COUNT, cmdData.getZkOpenFileDescriptorCount().floatValue());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_MAX_FILE_DESCRIPTOR_COUNT, cmdData.getZkMaxFileDescriptorCount().floatValue());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_OUTSTANDING_REQUESTS, cmdData.getZkOutstandingRequests());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_NODE_COUNT, cmdData.getZkZnodeCount());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_WATCH_COUNT, cmdData.getZkWatchCount());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_NUM_ALIVE_CONNECTIONS, cmdData.getZkNumAliveConnections());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_PACKETS_RECEIVED, cmdData.getZkPacketsReceived());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_PACKETS_SENT, cmdData.getZkPacketsSent());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_EPHEMERALS_COUNT, cmdData.getZkEphemeralsCount());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_APPROXIMATE_DATA_SIZE, cmdData.getZkApproximateDataSize());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_OPEN_FILE_DESCRIPTOR_COUNT, cmdData.getZkOpenFileDescriptorCount());
|
||||
metrics.putMetric(ZOOKEEPER_METRIC_MAX_FILE_DESCRIPTOR_COUNT, cmdData.getZkMaxFileDescriptorCount());
|
||||
|
||||
return Result.buildSuc(metrics);
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
/**
|
||||
@@ -21,7 +22,7 @@ public class ApiCallThreadPoolService {
|
||||
@Value(value = "${thread-pool.api.queue-size:500}")
|
||||
private Integer queueSize;
|
||||
|
||||
private static FutureWaitUtil<Object> apiFutureUtil;
|
||||
private static FutureWaitUtil<Boolean> apiFutureUtil;
|
||||
|
||||
@PostConstruct
|
||||
private void init() {
|
||||
@@ -33,11 +34,21 @@ public class ApiCallThreadPoolService {
|
||||
);
|
||||
}
|
||||
|
||||
public static void runnableTask(String taskName, Integer timeoutUnisMs, Callable<Object> callable) {
|
||||
public static void runnableTask(String taskName, Integer timeoutUnisMs, Callable<Boolean> callable) {
|
||||
apiFutureUtil.runnableTask(taskName, timeoutUnisMs, callable);
|
||||
}
|
||||
|
||||
public static void waitResult(Integer stepWaitTimeUnitMs) {
|
||||
apiFutureUtil.waitResult(stepWaitTimeUnitMs);
|
||||
public static void runnableTask(String taskName, Integer timeoutUnisMs, Runnable runnable) {
|
||||
apiFutureUtil.runnableTask(taskName, timeoutUnisMs, runnable);
|
||||
}
|
||||
|
||||
public static void waitResult() {
|
||||
apiFutureUtil.waitResult(0);
|
||||
}
|
||||
|
||||
public static boolean waitResultAndReturnFinished(int taskNum) {
|
||||
List<Boolean> resultList = apiFutureUtil.waitResult(0);
|
||||
|
||||
return resultList != null && resultList.size() == taskNum;
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user