Merge pull request #172 from 17hao/issue-153

Tracking changes applied to Kafka cluster
This commit is contained in:
EricZeng
2021-02-07 14:38:38 +08:00
committed by GitHub
5 changed files with 43 additions and 7 deletions

View File

@@ -43,7 +43,7 @@ public interface ClusterService {
ClusterNameDTO getClusterName(Long logicClusterId);
ResultStatus deleteById(Long clusterId);
ResultStatus deleteById(Long clusterId, String operator);
/**
* 获取优先被选举为controller的broker

View File

@@ -1,9 +1,12 @@
package com.xiaojukeji.kafka.manager.service.service;
import com.xiaojukeji.kafka.manager.common.bizenum.ModuleEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.OperateEnum;
import com.xiaojukeji.kafka.manager.common.entity.dto.rd.OperateRecordDTO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.OperateRecordDO;
import java.util.List;
import java.util.Map;
/**
* @author zhongyuankai
@@ -12,5 +15,7 @@ import java.util.List;
public interface OperateRecordService {
int insert(OperateRecordDO operateRecordDO);
int insert(String operator, ModuleEnum module, String resourceName, OperateEnum operate, Map<String, String> content);
List<OperateRecordDO> queryByCondt(OperateRecordDTO dto);
}

View File

@@ -1,6 +1,8 @@
package com.xiaojukeji.kafka.manager.service.service.impl;
import com.xiaojukeji.kafka.manager.common.bizenum.DBStatusEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.ModuleEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.OperateEnum;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.ClusterDetailDTO;
@@ -15,10 +17,7 @@ import com.xiaojukeji.kafka.manager.dao.ClusterMetricsDao;
import com.xiaojukeji.kafka.manager.dao.ControllerDao;
import com.xiaojukeji.kafka.manager.service.cache.LogicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
import com.xiaojukeji.kafka.manager.service.service.ConsumerService;
import com.xiaojukeji.kafka.manager.service.service.RegionService;
import com.xiaojukeji.kafka.manager.service.service.ZookeeperService;
import com.xiaojukeji.kafka.manager.service.service.*;
import com.xiaojukeji.kafka.manager.service.utils.ConfigUtils;
import org.apache.zookeeper.ZooKeeper;
import org.slf4j.Logger;
@@ -65,6 +64,9 @@ public class ClusterServiceImpl implements ClusterService {
@Autowired
private ZookeeperService zookeeperService;
@Autowired
private OperateRecordService operateRecordService;
@Override
public ResultStatus addNew(ClusterDO clusterDO, String operator) {
if (ValidateUtils.isNull(clusterDO) || ValidateUtils.isNull(operator)) {
@@ -74,6 +76,12 @@ public class ClusterServiceImpl implements ClusterService {
return ResultStatus.ZOOKEEPER_CONNECT_FAILED;
}
try {
Map<String, String> content = new HashMap<>();
content.put("zk address", clusterDO.getZookeeper());
content.put("bootstrap servers", clusterDO.getBootstrapServers());
content.put("security properties", clusterDO.getSecurityProperties());
content.put("jmx properties", clusterDO.getJmxProperties());
operateRecordService.insert(operator, ModuleEnum.CLUSTER, clusterDO.getClusterName(), OperateEnum.ADD, content);
if (clusterDao.insert(clusterDO) <= 0) {
LOGGER.error("add new cluster failed, clusterDO:{}.", clusterDO);
return ResultStatus.MYSQL_ERROR;
@@ -104,6 +112,11 @@ public class ClusterServiceImpl implements ClusterService {
return ResultStatus.CHANGE_ZOOKEEPER_FORBIDDEN;
}
clusterDO.setStatus(originClusterDO.getStatus());
Map<String, String> content = new HashMap<>();
content.put("cluster id", clusterDO.getId().toString());
content.put("security properties", clusterDO.getSecurityProperties());
content.put("jmx properties", clusterDO.getJmxProperties());
operateRecordService.insert(operator, ModuleEnum.CLUSTER, clusterDO.getClusterName(), OperateEnum.EDIT, content);
return updateById(clusterDO);
}
@@ -254,12 +267,15 @@ public class ClusterServiceImpl implements ClusterService {
}
@Override
public ResultStatus deleteById(Long clusterId) {
public ResultStatus deleteById(Long clusterId, String operator) {
List<RegionDO> regionDOList = regionService.getByClusterId(clusterId);
if (!ValidateUtils.isEmptyList(regionDOList)) {
return ResultStatus.OPERATION_FORBIDDEN;
}
try {
Map<String, String> content = new HashMap<>();
content.put("cluster id", clusterId.toString());
operateRecordService.insert(operator, ModuleEnum.CLUSTER, getClusterName(clusterId).getPhysicalClusterName(), OperateEnum.DELETE, content);
if (clusterDao.deleteById(clusterId) <= 0) {
LOGGER.error("delete cluster failed, clusterId:{}.", clusterId);
return ResultStatus.MYSQL_ERROR;

View File

@@ -1,7 +1,10 @@
package com.xiaojukeji.kafka.manager.service.service.impl;
import com.xiaojukeji.kafka.manager.common.bizenum.ModuleEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.OperateEnum;
import com.xiaojukeji.kafka.manager.common.entity.dto.rd.OperateRecordDTO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.OperateRecordDO;
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.dao.OperateRecordDao;
import com.xiaojukeji.kafka.manager.service.service.OperateRecordService;
@@ -10,6 +13,7 @@ import org.springframework.stereotype.Service;
import java.util.Date;
import java.util.List;
import java.util.Map;
/**
* @author zhongyuankai
@@ -25,6 +29,17 @@ public class OperateRecordServiceImpl implements OperateRecordService {
return operateRecordDao.insert(operateRecordDO);
}
@Override
public int insert(String operator, ModuleEnum module, String resourceName, OperateEnum operate, Map<String, String> content) {
OperateRecordDO operateRecordDO = new OperateRecordDO();
operateRecordDO.setOperator(operator);
operateRecordDO.setModuleId(module.getCode());
operateRecordDO.setResource(resourceName);
operateRecordDO.setOperateId(operate.getCode());
operateRecordDO.setContent(JsonUtils.toJSONString(content));
return insert(operateRecordDO);
}
@Override
public List<OperateRecordDO> queryByCondt(OperateRecordDTO dto) {
return operateRecordDao.queryByCondt(

View File

@@ -43,7 +43,7 @@ public class OpClusterController {
@RequestMapping(value = "clusters", method = RequestMethod.DELETE)
@ResponseBody
public Result delete(@RequestParam(value = "clusterId") Long clusterId) {
return Result.buildFrom(clusterService.deleteById(clusterId));
return Result.buildFrom(clusterService.deleteById(clusterId, SpringTool.getUserName()));
}
@ApiOperation(value = "修改集群信息")