v2.1版本更新

This commit is contained in:
zengqiao
2020-12-19 00:27:16 +08:00
parent 3fea5c9c8c
commit 49280a8617
75 changed files with 1098 additions and 148 deletions

View File

@@ -76,6 +76,7 @@ public class AccountServiceImpl implements AccountService {
} catch (Exception e) {
LOGGER.error("create account failed, operate mysql failed, accountDO:{}.", accountDO, e);
}
LOGGER.warn("class=AccountServiceImpl||method=createAccount||accountDO={}||msg=add account fail{}!", accountDO,ResultStatus.MYSQL_ERROR.getMessage());
return ResultStatus.MYSQL_ERROR;
}
@@ -88,6 +89,7 @@ public class AccountServiceImpl implements AccountService {
} catch (Exception e) {
LOGGER.error("delete account failed, username:{}.", username, e);
}
LOGGER.warn("class=AccountServiceImpl||method=deleteByName||username={}||msg=delete account fail,{}!", username,ResultStatus.MYSQL_ERROR.getMessage());
return ResultStatus.MYSQL_ERROR;
}
@@ -110,6 +112,7 @@ public class AccountServiceImpl implements AccountService {
} catch (Exception e) {
LOGGER.error("update account failed, accountDO:{}.", accountDO, e);
}
LOGGER.warn("class=AccountServiceImpl||method=updateAccount||accountDO={}||msg=update account fail,{}!", accountDO,ResultStatus.MYSQL_ERROR.getMessage());
return ResultStatus.MYSQL_ERROR;
}

View File

@@ -6,23 +6,24 @@ package com.xiaojukeji.kafka.manager.bpm.common;
* @date 19/6/23
*/
public enum OrderTypeEnum {
APPLY_TOPIC (00, "Topic申请", "applyTopicOrder"),
DELETE_TOPIC (10, "Topic下线", "deleteTopicOrder"),
APPLY_TOPIC (00, "Topic申请", "applyTopicOrder"),
DELETE_TOPIC (10, "Topic下线", "deleteTopicOrder"),
THIRD_PART_DELETE_TOPIC (20, "第三方Topic下线申请", "thirdPartDeleteTopicOrder"),
APPLY_APP (01, "应用申请", "applyAppOrder"),
DELETE_APP (11, "应用下线", "deleteAppOrder"),
APPLY_APP (01, "应用申请", "applyAppOrder"),
DELETE_APP (11, "应用下线", "deleteAppOrder"),
APPLY_QUOTA (02, "配额申请", "applyQuotaOrder"),
APPLY_PARTITION (12, "分区申请", "applyPartitionOrder"),
APPLY_QUOTA (02, "配额申请", "applyQuotaOrder"),
APPLY_PARTITION (12, "分区申请", "applyPartitionOrder"),
APPLY_AUTHORITY (03, "权限申请", "applyAuthorityOrder"),
DELETE_AUTHORITY (13, "权限删除", "deleteAuthorityOrder"),
APPLY_AUTHORITY (03, "权限申请", "applyAuthorityOrder"),
DELETE_AUTHORITY (13, "权限删除", "deleteAuthorityOrder"),
APPLY_CLUSTER (04, "集群申请", "applyClusterOrder"),
DELETE_CLUSTER (14, "集群下线", "deleteClusterOrder"),
APPLY_CLUSTER (04, "集群申请", "applyClusterOrder"),
DELETE_CLUSTER (14, "集群下线", "deleteClusterOrder"),
APPLY_EXPAND_CLUSTER(05, "集群扩容", "modifyClusterOrder"),
APPLY_REDUCE_CLUSTER(15, "集群缩容", "modifyClusterOrder"),
APPLY_EXPAND_CLUSTER (05, "集群扩容", "modifyClusterOrder"),
APPLY_REDUCE_CLUSTER (15, "集群缩容", "modifyClusterOrder"),
;

View File

@@ -0,0 +1,69 @@
package com.xiaojukeji.kafka.manager.bpm.common.entry.apply;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
/**
* @author zengqiao
* @date 20/12/2
*/
public class OrderExtensionThirdPartDeleteTopicDTO {
private Long clusterId;
private String topicName;
private String appId;
private String password;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public String getAppId() {
return appId;
}
public void setAppId(String appId) {
this.appId = appId;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
@Override
public String toString() {
return "OrderExtensionThirdPartDeleteTopicDTO{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", appId='" + appId + '\'' +
", password='" + password + '\'' +
'}';
}
public boolean paramLegal() {
if (ValidateUtils.isNull(clusterId)
|| ValidateUtils.isBlank(topicName)
|| ValidateUtils.isBlank(appId)
|| ValidateUtils.isBlank(password)) {
return false;
}
return true;
}
}

View File

@@ -3,11 +3,8 @@ package com.xiaojukeji.kafka.manager.bpm.component;
import com.xiaojukeji.kafka.manager.bpm.common.OrderStatusEnum;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.pojo.OrderDO;
import com.xiaojukeji.kafka.manager.common.events.OrderApplyEvent;
import com.xiaojukeji.kafka.manager.common.utils.SpringTool;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.dao.OrderDao;
import com.xiaojukeji.kafka.manager.service.utils.ConfigUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@@ -27,17 +24,12 @@ public class LocalStorageService extends AbstractOrderStorageService {
@Autowired
private OrderDao orderDao;
@Autowired
private ConfigUtils configUtils;
@Override
public ResultStatus directSaveHandledOrder(OrderDO orderDO) {
try {
if (orderDao.directSaveHandledOrder(orderDO) <= 0) {
return ResultStatus.MYSQL_ERROR;
}
// 无需进行通知
// SpringTool.publish(new OrderApplyEvent(this, orderDO, configUtils.getIdc()));
return ResultStatus.SUCCESS;
} catch (Exception e) {
LOGGER.error("add order failed, orderDO:{}.", orderDO, e);
@@ -52,7 +44,6 @@ public class LocalStorageService extends AbstractOrderStorageService {
return false;
}
SpringTool.publish(new OrderApplyEvent(this, orderDO, configUtils.getIdc()));
return true;
} catch (Exception e) {
LOGGER.error("add order failed, orderDO:{}.", orderDO, e);

View File

@@ -261,6 +261,14 @@ public class OrderServiceImpl implements OrderService {
resultList.add(new OrderResult(id, Result.buildFrom(ResultStatus.ORDER_NOT_EXIST)));
continue;
}
// topic申请、topic分区申请不支持批量审批通过.
if (orderDO.getType().equals(OrderTypeEnum.APPLY_TOPIC.getCode())
|| orderDO.getType().equals(OrderTypeEnum.APPLY_PARTITION.getCode())) {
if (OrderStatusEnum.PASSED.getCode().equals(reqObj.getStatus())) {
continue;
}
}
orderDOList.add(orderDO);
}
// 根据创建时间排序

View File

@@ -0,0 +1,164 @@
package com.xiaojukeji.kafka.manager.bpm.order.impl;
import com.alibaba.fastjson.JSONObject;
import com.xiaojukeji.kafka.manager.bpm.common.OrderTypeEnum;
import com.xiaojukeji.kafka.manager.bpm.common.entry.apply.OrderExtensionThirdPartDeleteTopicDTO;
import com.xiaojukeji.kafka.manager.bpm.common.entry.detail.AbstractOrderDetailData;
import com.xiaojukeji.kafka.manager.bpm.common.entry.detail.OrderDetailDeleteTopicDTO;
import com.xiaojukeji.kafka.manager.bpm.common.handle.OrderHandleBaseDTO;
import com.xiaojukeji.kafka.manager.bpm.order.AbstractTopicOrder;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.topic.TopicConnection;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.OrderDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.TopicDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.AppDO;
import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.service.cache.LogicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.service.AdminService;
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
import com.xiaojukeji.kafka.manager.service.service.TopicManagerService;
import com.xiaojukeji.kafka.manager.service.service.gateway.AppService;
import com.xiaojukeji.kafka.manager.service.service.gateway.TopicConnectionService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.util.Date;
import java.util.List;
/**
* @author zengqiao
* @date 20/12/2
*/
@Component("thirdPartDeleteTopicOrder")
public class ThirdPartDeleteTopicOrder extends AbstractTopicOrder {
@Autowired
private LogicalClusterMetadataManager logicalClusterMetadataManager;
@Autowired
private AppService appService;
@Autowired
private ClusterService clusterService;
@Autowired
private AdminService adminService;
@Autowired
private TopicManagerService topicManagerService;
@Autowired
private TopicConnectionService connectionService;
@Override
public AbstractOrderDetailData getOrderExtensionDetailData(String extensions) {
OrderDetailDeleteTopicDTO orderDetailDTO = new OrderDetailDeleteTopicDTO();
OrderExtensionThirdPartDeleteTopicDTO orderExtensionDTO = JSONObject.parseObject(
extensions,
OrderExtensionThirdPartDeleteTopicDTO.class);
orderDetailDTO.setTopicName(orderExtensionDTO.getTopicName());
ClusterDO clusterDO = clusterService.getById(orderExtensionDTO.getClusterId());
if (!ValidateUtils.isNull(clusterDO)) {
orderDetailDTO.setPhysicalClusterId(clusterDO.getId());
orderDetailDTO.setPhysicalClusterName(clusterDO.getClusterName());
}
List<TopicConnection> connectionDTOList = connectionService.getByTopicName(
clusterDO.getId(),
orderExtensionDTO.getTopicName(),
new Date(System.currentTimeMillis() - Constant.TOPIC_CONNECTION_LATEST_TIME_MS),
new Date());
orderDetailDTO.setConnectionList(connectionDTOList);
TopicDO topicDO = topicManagerService.getByTopicName(clusterDO.getId(), orderExtensionDTO.getTopicName());
if (ValidateUtils.isNull(topicDO)) {
return orderDetailDTO;
}
AppDO appDO = appService.getByAppId(topicDO.getAppId());
if (ValidateUtils.isNull(appDO)) {
return orderDetailDTO;
}
orderDetailDTO.setAppId(appDO.getAppId());
orderDetailDTO.setAppName(appDO.getName());
orderDetailDTO.setAppPrincipals(appDO.getPrincipals());
return orderDetailDTO;
}
@Override
public Result<String> checkExtensionFieldsAndGenerateTitle(String extensions) {
OrderExtensionThirdPartDeleteTopicDTO orderExtensionDTO = JSONObject.parseObject(
extensions,
OrderExtensionThirdPartDeleteTopicDTO.class);
if (!orderExtensionDTO.paramLegal()) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
Long physicalClusterId = logicalClusterMetadataManager.getPhysicalClusterId(orderExtensionDTO.getClusterId(), true);
if (ValidateUtils.isNull(physicalClusterId)) {
return Result.buildFrom(ResultStatus.CLUSTER_NOT_EXIST);
}
if (!PhysicalClusterMetadataManager.isTopicExist(physicalClusterId, orderExtensionDTO.getTopicName())) {
return Result.buildFrom(ResultStatus.TOPIC_NOT_EXIST);
}
AppDO appDO = appService.getByAppId(orderExtensionDTO.getAppId());
if (ValidateUtils.isNull(appDO)) {
return Result.buildFrom(ResultStatus.APP_NOT_EXIST);
}
if (!appDO.getPassword().equals(orderExtensionDTO.getPassword())) {
return Result.buildFrom(ResultStatus.USER_WITHOUT_AUTHORITY);
}
String title = String.format(
"%s-%d-%s",
OrderTypeEnum.DELETE_TOPIC.getMessage(),
orderExtensionDTO.getClusterId(),
orderExtensionDTO.getTopicName()
);
return new Result<>(title);
}
@Override
public ResultStatus handleOrderDetail(OrderDO orderDO,
OrderHandleBaseDTO orderHandleBaseDTO,
String userName) {
OrderExtensionThirdPartDeleteTopicDTO extensionDTO = JSONObject.parseObject(orderDO.getExtensions(),
OrderExtensionThirdPartDeleteTopicDTO.class);
Long physicalClusterId = logicalClusterMetadataManager.getPhysicalClusterId(extensionDTO.getClusterId(), true);
if (ValidateUtils.isNull(physicalClusterId)) {
return ResultStatus.CLUSTER_NOT_EXIST;
}
ClusterDO clusterDO = clusterService.getById(physicalClusterId);
if (!PhysicalClusterMetadataManager.isTopicExistStrictly(physicalClusterId, extensionDTO.getTopicName())) {
return ResultStatus.TOPIC_NOT_EXIST;
}
if (connectionService.isExistConnection(
physicalClusterId,
extensionDTO.getTopicName(),
new Date(System.currentTimeMillis() - Constant.TOPIC_CONNECTION_LATEST_TIME_MS),
new Date())
) {
return ResultStatus.OPERATION_FORBIDDEN;
}
// 检查申请人是否在应用负责人里面
AppDO appDO = appService.getByAppId(extensionDTO.getAppId());
if (ValidateUtils.isNull(appDO)) {
return ResultStatus.APP_NOT_EXIST;
}
if (!appDO.getPassword().equals(extensionDTO.getPassword())
|| !ListUtils.string2StrList(appDO.getPrincipals()).contains(orderDO.getApplicant())) {
// 密码错误 or 申请人不在应用负责人里面, 则返回错误
return ResultStatus.USER_WITHOUT_AUTHORITY;
}
ResultStatus resultStatus = adminService.deleteTopic(clusterDO, extensionDTO.getTopicName(), userName);
if (!ResultStatus.SUCCESS.equals(resultStatus)) {
return resultStatus;
}
return resultStatus;
}
}

View File

@@ -94,6 +94,7 @@ public class N9e extends AbstractAgent {
);
N9eResult zr = JSON.parseObject(response, N9eResult.class);
if (!ValidateUtils.isBlank(zr.getErr())) {
LOGGER.warn("class=N9e||method=createTask||param={}||errMsg={}||msg=call create task fail", JsonUtils.toJSONString(param),zr.getErr());
return null;
}
return Long.valueOf(zr.getDat().toString());
@@ -110,7 +111,7 @@ public class N9e extends AbstractAgent {
String response = null;
try {
response = HttpUtils.postForString(
response = HttpUtils.putForString(
baseUrl + ACTION_TASK_URI.replace("{taskId}", taskId.toString()),
JSON.toJSONString(param),
buildHeader()
@@ -119,6 +120,7 @@ public class N9e extends AbstractAgent {
if (ValidateUtils.isBlank(zr.getErr())) {
return true;
}
LOGGER.warn("class=N9e||method=actionTask||param={}||errMsg={}||msg=call action task fail", JSON.toJSONString(param),zr.getErr());
return false;
} catch (Exception e) {
LOGGER.error("action task failed, taskId:{}, action:{}.", taskId, action, e);
@@ -134,7 +136,7 @@ public class N9e extends AbstractAgent {
String response = null;
try {
response = HttpUtils.postForString(
response = HttpUtils.putForString(
baseUrl + ACTION_HOST_TASK_URI.replace("{taskId}", taskId.toString()),
JSON.toJSONString(param),
buildHeader()
@@ -143,6 +145,7 @@ public class N9e extends AbstractAgent {
if (ValidateUtils.isBlank(zr.getErr())) {
return true;
}
LOGGER.warn("class=N9e||method=actionHostTask||param={}||errMsg={}||msg=call action host task fail", JSON.toJSONString(param),zr.getErr());
return false;
} catch (Exception e) {
LOGGER.error("action task failed, taskId:{} action:{} hostname:{}.", taskId, action, hostname, e);
@@ -265,6 +268,7 @@ public class N9e extends AbstractAgent {
while ((line = bufferedReader.readLine()) != null) {
stringBuilder.append(line);
stringBuilder.append("\n");
}
return stringBuilder.toString();
} catch (IOException e) {

View File

@@ -2,6 +2,7 @@ package com.xiaojukeji.kafka.manager.kcm.component.storage.local;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import com.xiaojukeji.kafka.manager.kcm.component.storage.AbstractStorageService;
import org.springframework.web.multipart.MultipartFile;
@@ -12,6 +13,9 @@ import org.springframework.web.multipart.MultipartFile;
*/
@Service("storageService")
public class Local extends AbstractStorageService {
@Value("${kcm.storage.base-url}")
private String baseUrl;
@Override
public boolean upload(String fileName, String fileMd5, MultipartFile uploadFile) {
return false;
@@ -24,6 +28,6 @@ public class Local extends AbstractStorageService {
@Override
public String getDownloadBaseUrl() {
return "";
return baseUrl;
}
}

View File

@@ -56,6 +56,7 @@ public class KafkaFileServiceImpl implements KafkaFileService {
}
return ResultStatus.SUCCESS;
} catch (DuplicateKeyException e) {
LOGGER.error("class=KafkaFileServiceImpl||method=uploadKafkaFile||errMsg={}||kafkaFileDTO={}||username={}", e.getMessage(), kafkaFileDTO, username, e);
return ResultStatus.RESOURCE_ALREADY_EXISTED;
} catch (Exception e) {
LOGGER.error("upload kafka file failed, kafkaFileDTO:{}.", kafkaFileDTO, e);
@@ -93,6 +94,7 @@ public class KafkaFileServiceImpl implements KafkaFileService {
return ResultStatus.MYSQL_ERROR;
}
} catch (DuplicateKeyException e) {
LOGGER.error("class=KafkaFileServiceImpl||method=modifyKafkaFile||errMsg={}||kafkaFileDTO={}||userName={}", e.getMessage(), kafkaFileDTO, userName, e);
return ResultStatus.RESOURCE_NAME_DUPLICATED;
} catch (Exception e) {
LOGGER.error("modify kafka file failed, kafkaFileDTO:{}.", kafkaFileDTO, e);

View File

@@ -24,7 +24,7 @@ public class ClusterHostTaskService extends AbstractClusterTaskService {
CreationTaskData dto = new CreationTaskData();
for (String hostname: clusterHostTaskDTO.getHostList()) {
if (!NetUtils.hostnameLegal(hostname)) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
return Result.buildFrom(ResultStatus.CLUSTER_TASK_HOST_LIST_ILLEGAL);
}
}
dto.setHostList(clusterHostTaskDTO.getHostList());

View File

@@ -19,12 +19,13 @@ p_kafka_server_properties_md5=${8} #server配置MD5
p_kafka_server_properties_url=${9} #server配置文件下载地址
#----------------------------------------配置信息------------------------------------------------------#
g_hostname=`hostname`
g_base_dir='/home/km'
g_base_dir='/home'
g_cluster_task_dir=${g_base_dir}"/kafka_cluster_task/task_${p_task_id}" #部署升级路径
g_rollback_version=${g_cluster_task_dir}"/rollback_version" #回滚版本
g_new_kafka_package_name='' #最终的包名
g_kafka_manager_addr='' #kafka-manager地址
g_local_ip=`ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:"`
g_hostname=${g_local_ip}
#----------------------------------------操作函数------------------------------------------------------#
@@ -71,11 +72,11 @@ function check_and_init_env() {
# 检查并等待集群所有的副本处于同步的状态
function check_and_wait_broker_stabled() {
under_replication_count=`curl -s -G -d "hostname="#{g_hostname} ${g_kafka_manager_addr}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l`
under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${g_kafka_manager_addr}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l`
while [ "$under_replication_count" -ne 1 ]; do
ECHO_LOG "存在${under_replication_count}个副本未同步, sleep 10s"
sleep 10
under_replication_count=`curl -s ${g_kafka_manager_addr}/api/v1/${p_cluster_id}/overview | python -m json.tool | grep false |wc -l`
under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${g_kafka_manager_addr}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l`
done
ECHO_LOG "集群副本都已经处于同步的状态, 可以进行集群升级"
}
@@ -137,6 +138,9 @@ function prepare_cluster_task_files() {
exit 1
fi
# listeners配置换成当前机器的IP写到server.properties最后一行
echo "listeners=SASL_PLAINTEXT://${g_local_ip}:9093,PLAINTEXT://${g_local_ip}:9092" >> "${g_cluster_task_dir}/${p_kafka_package_name}/config/server.properties"
# 将MD5信息写到包中
echo "package_md5:${p_kafka_package_md5} server_properties_md5:${p_kafka_package_md5}" > "${g_cluster_task_dir}/${p_kafka_package_name}/package_and_properties.md5"
}

View File

@@ -132,14 +132,12 @@ public class ThirdPartServiceImpl implements ThirdPartService {
if (ValidateUtils.isNull(dto)) {
return null;
}
List<PartitionOffsetDTO> offsetDTOList = dto.getPartitionOffsetDTOList();
if (ValidateUtils.isEmptyList(offsetDTOList)) {
offsetDTOList = topicService.getPartitionOffsetList(
clusterDO, dto.getTopicName(), dto.getTimestamp());
}
List<PartitionOffsetDTO> offsetDTOList = this.getPartitionOffsetDTOList(clusterDO, dto);
if (ValidateUtils.isEmptyList(offsetDTOList)) {
return null;
}
OffsetLocationEnum offsetLocation = dto.getLocation().equals(
OffsetLocationEnum.ZOOKEEPER.location) ? OffsetLocationEnum.ZOOKEEPER : OffsetLocationEnum.BROKER;
ResultStatus result = checkConsumerGroupExist(clusterDO, dto.getTopicName(), dto.getConsumerGroup(), offsetLocation, dto.getCreateIfAbsent());
@@ -160,6 +158,39 @@ public class ThirdPartServiceImpl implements ThirdPartService {
);
}
private List<PartitionOffsetDTO> getPartitionOffsetDTOList(ClusterDO clusterDO, OffsetResetDTO dto) {
List<PartitionOffsetDTO> offsetDTOList = dto.getPartitionOffsetDTOList();
if (!ValidateUtils.isEmptyList(offsetDTOList)) {
return offsetDTOList;
}
offsetDTOList = topicService.getPartitionOffsetList(clusterDO, dto.getTopicName(), dto.getTimestamp());
if (!ValidateUtils.isEmptyList(offsetDTOList)) {
return offsetDTOList;
}
Map<TopicPartition, Long> endOffsetMap = topicService.getPartitionOffset(clusterDO, dto.getTopicName(), OffsetPosEnum.END);
if (ValidateUtils.isEmptyMap(endOffsetMap)) {
return new ArrayList<>();
}
Map<TopicPartition, Long> beginOffsetMap = topicService.getPartitionOffset(clusterDO, dto.getTopicName(), OffsetPosEnum.BEGINNING);
if (ValidateUtils.isEmptyMap(beginOffsetMap)) {
return new ArrayList<>();
}
offsetDTOList = new ArrayList<>();
for (Map.Entry<TopicPartition, Long> entry: endOffsetMap.entrySet()) {
Long beginOffset = beginOffsetMap.get(entry.getKey());
if (ValidateUtils.isNull(beginOffset) || !beginOffset.equals(entry.getValue())) {
// offset 不相等, 表示还有数据, 则直接返回
return new ArrayList<>();
}
offsetDTOList.add(new PartitionOffsetDTO(entry.getKey().partition(), entry.getValue()));
}
return offsetDTOList;
}
private ResultStatus checkConsumerGroupExist(ClusterDO clusterDO,
String topicName,
String consumerGroup,