合并master分支

This commit is contained in:
qiao.zeng
2023-11-12 15:30:08 +08:00
73 changed files with 1561 additions and 902 deletions

View File

@@ -2,9 +2,9 @@ name: KnowStreaming Build
on:
push:
branches: [ "master", "ve_3.x", "ve_demo_3.x" ]
branches: [ "*" ]
pull_request:
branches: [ "master", "ve_3.x", "ve_demo_3.x" ]
branches: [ "*" ]
jobs:
build:

View File

@@ -101,7 +101,9 @@
**点击 [这里](https://doc.knowstreaming.com/product),也可以从官网获取到更多文档**
**`产品网址`**
- [产品官网https://knowstreaming.com](https://knowstreaming.com)
- [体验环境https://demo.knowstreaming.com](https://demo.knowstreaming.com),登陆账号admin/admin

View File

@@ -53,6 +53,11 @@ INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `l
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2046', '0', 'know-streaming');
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2048', '0', 'know-streaming');
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2050', '0', 'know-streaming');
-- 多集群管理权限2023-07-18新增
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2052', 'Security-User查看密码', '1593', '1', '2', 'Security-User查看密码', '0', 'know-streaming');
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2052', '0', 'know-streaming');
```
### 升级至 `3.3.0` 版本

View File

@@ -7,7 +7,7 @@
- [1、支持哪些 Kafka 版本?](#1支持哪些-kafka-版本)
- [1、2.x 版本和 3.0 版本有什么差异?](#12x-版本和-30-版本有什么差异)
- [3、页面流量信息等无数据](#3页面流量信息等无数据)
- [8.4、`Jmx`连接失败如何解决?](#84jmx连接失败如何解决)
- [4、`Jmx`连接失败如何解决?](#4jmx连接失败如何解决)
- [5、有没有 API 文档?](#5有没有-api-文档)
- [6、删除 Topic 成功后,为何过段时间又出现了?](#6删除-topic-成功后为何过段时间又出现了)
- [7、如何在不登录的情况下调用接口](#7如何在不登录的情况下调用接口)
@@ -21,6 +21,7 @@
- [15、测试时使用Testcontainers的说明](#15测试时使用testcontainers的说明)
- [16、JMX连接失败怎么办](#16jmx连接失败怎么办)
- [17、zk监控无数据问题](#17zk监控无数据问题)
- [18、启动失败报NoClassDefFoundError如何解决](#18启动失败报noclassdeffounderror如何解决)
## 1、支持哪些 Kafka 版本?
@@ -57,7 +58,7 @@
 
## 8.4、`Jmx`连接失败如何解决?
## 4、`Jmx`连接失败如何解决?
- 参看 [Jmx 连接配置&问题解决](https://doc.knowstreaming.com/product/9-attachment#91jmx-%E8%BF%9E%E6%8E%A5%E5%A4%B1%E8%B4%A5%E9%97%AE%E9%A2%98%E8%A7%A3%E5%86%B3) 说明。
@@ -278,3 +279,31 @@ zookeeper集群正常但Ks上zk页面所有监控指标无数据`KnowStrea
```
4lw.commands.whitelist=*
```
## 18、启动失败报NoClassDefFoundError如何解决
**错误现象:**
```log
# 启动失败报nested exception is java.lang.NoClassDefFoundError: Could not initialize class com.didiglobal.logi.job.core.WorkerSingleton$Singleton
2023-08-11 22:54:29.842 [main] ERROR class=org.springframework.boot.SpringApplication||Application run failed
org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'quartzScheduler' defined in class path resource [com/didiglobal/logi/job/LogIJobAutoConfiguration.class]: Bean instantiation via factory method failed; nested exception is org.springframework.beans.BeanInstantiationException: Failed to instantiate [com.didiglobal.logi.job.core.Scheduler]: Factory method 'quartzScheduler' threw exception; nested exception is java.lang.NoClassDefFoundError: Could not initialize class com.didiglobal.logi.job.core.WorkerSingleton$Singleton
at org.springframework.beans.factory.support.ConstructorResolver.instantiate(ConstructorResolver.java:657)
```
**问题原因:**
1. `KnowStreaming` 依赖的 `Logi-Job` 初始化 `WorkerSingleton$Singleton` 失败。
2. `WorkerSingleton$Singleton` 初始化的过程中,会去获取一些操作系统的信息,如果获取时出现了异常,则会导致 `WorkerSingleton$Singleton` 初始化失败。
**临时建议:**
`Logi-Job` 问题的修复时间不好控制,之前我们测试验证了一下,在 `Windows``Mac``CentOS` 这几个操作系统下基本上都是可以正常运行的。
所以,如果有条件的话,可以暂时先使用这几个系统部署 `KnowStreaming`
如果在在 `Windows``Mac``CentOS` 这几个操作系统下也出现了启动失败的问题可以重试2-3次看是否还是启动失败或者换一台机器试试。

View File

@@ -12,6 +12,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.connect.connector.ConnectorStateVO;
import com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.OpConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.plugin.PluginService;
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerConnectorService;
import org.apache.kafka.connect.runtime.AbstractStatus;
@@ -30,6 +31,9 @@ public class ConnectorManagerImpl implements ConnectorManager {
@Autowired
private ConnectorService connectorService;
@Autowired
private OpConnectorService opConnectorService;
@Autowired
private WorkerConnectorService workerConnectorService;
@@ -44,24 +48,24 @@ public class ConnectorManagerImpl implements ConnectorManager {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "Connector参数错误");
}
return connectorService.updateConnectorConfig(connectClusterId, connectorName, configs, operator);
return opConnectorService.updateConnectorConfig(connectClusterId, connectorName, configs, operator);
}
@Override
public Result<Void> createConnector(ConnectorCreateDTO dto, String operator) {
dto.getSuitableConfig().put(KafkaConnectConstant.MIRROR_MAKER_NAME_FIELD_NAME, dto.getConnectorName());
Result<KSConnectorInfo> createResult = connectorService.createConnector(dto.getConnectClusterId(), dto.getConnectorName(), dto.getSuitableConfig(), operator);
Result<KSConnectorInfo> createResult = opConnectorService.createConnector(dto.getConnectClusterId(), dto.getConnectorName(), dto.getSuitableConfig(), operator);
if (createResult.failed()) {
return Result.buildFromIgnoreData(createResult);
}
Result<KSConnector> ksConnectorResult = connectorService.getAllConnectorInfoFromCluster(dto.getConnectClusterId(), dto.getConnectorName());
Result<KSConnector> ksConnectorResult = connectorService.getConnectorFromKafka(dto.getConnectClusterId(), dto.getConnectorName());
if (ksConnectorResult.failed()) {
return Result.buildFromRSAndMsg(ResultStatus.SUCCESS, "创建成功但是获取元信息失败页面元信息会存在1分钟延迟");
}
connectorService.addNewToDB(ksConnectorResult.getData());
opConnectorService.addNewToDB(ksConnectorResult.getData());
return Result.buildSuc();
}
@@ -69,12 +73,12 @@ public class ConnectorManagerImpl implements ConnectorManager {
public Result<Void> createConnector(ConnectorCreateDTO dto, String heartbeatName, String checkpointName, String operator) {
dto.getSuitableConfig().put(KafkaConnectConstant.MIRROR_MAKER_NAME_FIELD_NAME, dto.getConnectorName());
Result<KSConnectorInfo> createResult = connectorService.createConnector(dto.getConnectClusterId(), dto.getConnectorName(), dto.getSuitableConfig(), operator);
Result<KSConnectorInfo> createResult = opConnectorService.createConnector(dto.getConnectClusterId(), dto.getConnectorName(), dto.getSuitableConfig(), operator);
if (createResult.failed()) {
return Result.buildFromIgnoreData(createResult);
}
Result<KSConnector> ksConnectorResult = connectorService.getAllConnectorInfoFromCluster(dto.getConnectClusterId(), dto.getConnectorName());
Result<KSConnector> ksConnectorResult = connectorService.getConnectorFromKafka(dto.getConnectClusterId(), dto.getConnectorName());
if (ksConnectorResult.failed()) {
return Result.buildFromRSAndMsg(ResultStatus.SUCCESS, "创建成功但是获取元信息失败页面元信息会存在1分钟延迟");
}
@@ -83,7 +87,7 @@ public class ConnectorManagerImpl implements ConnectorManager {
connector.setCheckpointConnectorName(checkpointName);
connector.setHeartbeatConnectorName(heartbeatName);
connectorService.addNewToDB(connector);
opConnectorService.addNewToDB(connector);
return Result.buildSuc();
}

View File

@@ -37,6 +37,7 @@ import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.OpConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.mm2.MirrorMakerMetricService;
import com.xiaojukeji.know.streaming.km.core.service.connect.plugin.PluginService;
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerConnectorService;
@@ -67,6 +68,9 @@ public class MirrorMakerManagerImpl implements MirrorMakerManager {
@Autowired
private ConnectorService connectorService;
@Autowired
private OpConnectorService opConnectorService;
@Autowired
private WorkerConnectorService workerConnectorService;
@@ -156,20 +160,20 @@ public class MirrorMakerManagerImpl implements MirrorMakerManager {
Result<Void> rv = Result.buildSuc();
if (!ValidateUtils.isBlank(connectorPO.getCheckpointConnectorName())) {
rv = connectorService.deleteConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator);
rv = opConnectorService.deleteConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator);
}
if (rv.failed()) {
return rv;
}
if (!ValidateUtils.isBlank(connectorPO.getHeartbeatConnectorName())) {
rv = connectorService.deleteConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator);
rv = opConnectorService.deleteConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator);
}
if (rv.failed()) {
return rv;
}
return connectorService.deleteConnector(connectClusterId, sourceConnectorName, operator);
return opConnectorService.deleteConnector(connectClusterId, sourceConnectorName, operator);
}
@Override
@@ -181,20 +185,20 @@ public class MirrorMakerManagerImpl implements MirrorMakerManager {
Result<Void> rv = Result.buildSuc();
if (!ValidateUtils.isBlank(connectorPO.getCheckpointConnectorName()) && dto.getCheckpointConnectorConfigs() != null) {
rv = connectorService.updateConnectorConfig(dto.getConnectClusterId(), connectorPO.getCheckpointConnectorName(), dto.getCheckpointConnectorConfigs(), operator);
rv = opConnectorService.updateConnectorConfig(dto.getConnectClusterId(), connectorPO.getCheckpointConnectorName(), dto.getCheckpointConnectorConfigs(), operator);
}
if (rv.failed()) {
return rv;
}
if (!ValidateUtils.isBlank(connectorPO.getHeartbeatConnectorName()) && dto.getHeartbeatConnectorConfigs() != null) {
rv = connectorService.updateConnectorConfig(dto.getConnectClusterId(), connectorPO.getHeartbeatConnectorName(), dto.getHeartbeatConnectorConfigs(), operator);
rv = opConnectorService.updateConnectorConfig(dto.getConnectClusterId(), connectorPO.getHeartbeatConnectorName(), dto.getHeartbeatConnectorConfigs(), operator);
}
if (rv.failed()) {
return rv;
}
return connectorService.updateConnectorConfig(dto.getConnectClusterId(), dto.getConnectorName(), dto.getSuitableConfig(), operator);
return opConnectorService.updateConnectorConfig(dto.getConnectClusterId(), dto.getConnectorName(), dto.getSuitableConfig(), operator);
}
@Override
@@ -206,20 +210,20 @@ public class MirrorMakerManagerImpl implements MirrorMakerManager {
Result<Void> rv = Result.buildSuc();
if (!ValidateUtils.isBlank(connectorPO.getCheckpointConnectorName())) {
rv = connectorService.restartConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator);
rv = opConnectorService.restartConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator);
}
if (rv.failed()) {
return rv;
}
if (!ValidateUtils.isBlank(connectorPO.getHeartbeatConnectorName())) {
rv = connectorService.restartConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator);
rv = opConnectorService.restartConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator);
}
if (rv.failed()) {
return rv;
}
return connectorService.restartConnector(connectClusterId, sourceConnectorName, operator);
return opConnectorService.restartConnector(connectClusterId, sourceConnectorName, operator);
}
@Override
@@ -231,20 +235,20 @@ public class MirrorMakerManagerImpl implements MirrorMakerManager {
Result<Void> rv = Result.buildSuc();
if (!ValidateUtils.isBlank(connectorPO.getCheckpointConnectorName())) {
rv = connectorService.stopConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator);
rv = opConnectorService.stopConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator);
}
if (rv.failed()) {
return rv;
}
if (!ValidateUtils.isBlank(connectorPO.getHeartbeatConnectorName())) {
rv = connectorService.stopConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator);
rv = opConnectorService.stopConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator);
}
if (rv.failed()) {
return rv;
}
return connectorService.stopConnector(connectClusterId, sourceConnectorName, operator);
return opConnectorService.stopConnector(connectClusterId, sourceConnectorName, operator);
}
@Override
@@ -256,20 +260,20 @@ public class MirrorMakerManagerImpl implements MirrorMakerManager {
Result<Void> rv = Result.buildSuc();
if (!ValidateUtils.isBlank(connectorPO.getCheckpointConnectorName())) {
rv = connectorService.resumeConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator);
rv = opConnectorService.resumeConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator);
}
if (rv.failed()) {
return rv;
}
if (!ValidateUtils.isBlank(connectorPO.getHeartbeatConnectorName())) {
rv = connectorService.resumeConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator);
rv = opConnectorService.resumeConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator);
}
if (rv.failed()) {
return rv;
}
return connectorService.resumeConnector(connectClusterId, sourceConnectorName, operator);
return opConnectorService.resumeConnector(connectClusterId, sourceConnectorName, operator);
}
@Override

View File

@@ -44,7 +44,7 @@ public class ConnectConnectorMetricCollector extends AbstractConnectMetricCollec
Long connectClusterId = connectCluster.getId();
List<VersionControlItem> items = versionControlService.listVersionControlItem(this.getClusterVersion(connectCluster), collectorType().getCode());
Result<List<String>> connectorList = connectorService.listConnectorsFromCluster(connectClusterId);
Result<List<String>> connectorList = connectorService.listConnectorsFromCluster(connectCluster);
FutureWaitUtil<Void> future = this.getFutureUtilByClusterPhyId(connectClusterId);

View File

@@ -1,7 +1,6 @@
package com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BaseMetrics;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.ToString;
@@ -12,20 +11,18 @@ import lombok.ToString;
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
@ToString
public class ConnectClusterMetrics extends BaseMetrics {
private Long connectClusterId;
protected Long connectClusterId;
public ConnectClusterMetrics(Long clusterPhyId, Long connectClusterId){
public ConnectClusterMetrics(Long clusterPhyId, Long connectClusterId ){
super(clusterPhyId);
this.connectClusterId = connectClusterId;
}
public static ConnectClusterMetrics initWithMetric(Long connectClusterId, String metric, Float value) {
ConnectClusterMetrics brokerMetrics = new ConnectClusterMetrics(connectClusterId, connectClusterId);
brokerMetrics.putMetric(metric, value);
return brokerMetrics;
public ConnectClusterMetrics(Long connectClusterId, String metricName, Float metricValue) {
this(null, connectClusterId);
this.putMetric(metricName, metricValue);
}
@Override

View File

@@ -1,7 +1,5 @@
package com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BaseMetrics;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.ToString;
@@ -11,25 +9,19 @@ import lombok.ToString;
* @date 2022/11/2
*/
@Data
@AllArgsConstructor
@NoArgsConstructor
@ToString
public class ConnectWorkerMetrics extends BaseMetrics {
private Long connectClusterId;
public class ConnectWorkerMetrics extends ConnectClusterMetrics {
private String workerId;
public static ConnectWorkerMetrics initWithMetric(Long connectClusterId, String workerId, String metric, Float value) {
ConnectWorkerMetrics connectWorkerMetrics = new ConnectWorkerMetrics();
connectWorkerMetrics.setConnectClusterId(connectClusterId);
connectWorkerMetrics.setWorkerId(workerId);
connectWorkerMetrics.putMetric(metric, value);
return connectWorkerMetrics;
public ConnectWorkerMetrics(Long connectClusterId, String workerId, String metricName, Float metricValue) {
super(null, connectClusterId);
this.workerId = workerId;
this.putMetric(metricName, metricValue);
}
@Override
public String unique() {
return "KCC@" + clusterPhyId + "@" + connectClusterId + "@" + workerId;
return "KCW@" + clusterPhyId + "@" + connectClusterId + "@" + workerId;
}
}

View File

@@ -1,6 +1,5 @@
package com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BaseMetrics;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.ToString;
@@ -12,24 +11,21 @@ import lombok.ToString;
@Data
@NoArgsConstructor
@ToString
public class ConnectorMetrics extends BaseMetrics {
private Long connectClusterId;
public class ConnectorMetrics extends ConnectClusterMetrics {
protected String connectorName;
private String connectorName;
private String connectorNameAndClusterId;
protected String connectorNameAndClusterId;
public ConnectorMetrics(Long connectClusterId, String connectorName) {
super(null);
super(null, connectClusterId);
this.connectClusterId = connectClusterId;
this.connectorName = connectorName;
this.connectorNameAndClusterId = connectorName + "#" + connectClusterId;
}
public static ConnectorMetrics initWithMetric(Long connectClusterId, String connectorName, String metricName, Float value) {
ConnectorMetrics metrics = new ConnectorMetrics(connectClusterId, connectorName);
metrics.putMetric(metricName, value);
return metrics;
public ConnectorMetrics(Long connectClusterId, String connectorName, String metricName, Float metricValue) {
this(connectClusterId, connectorName);
this.putMetric(metricName, metricValue);
}
@Override

View File

@@ -1,6 +1,5 @@
package com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BaseMetrics;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.ToString;
@@ -12,11 +11,7 @@ import lombok.ToString;
@Data
@NoArgsConstructor
@ToString
public class ConnectorTaskMetrics extends BaseMetrics {
private Long connectClusterId;
private String connectorName;
public class ConnectorTaskMetrics extends ConnectorMetrics {
private Integer taskId;
public ConnectorTaskMetrics(Long connectClusterId, String connectorName, Integer taskId) {
@@ -25,14 +20,13 @@ public class ConnectorTaskMetrics extends BaseMetrics {
this.taskId = taskId;
}
public static ConnectorTaskMetrics initWithMetric(Long connectClusterId, String connectorName, Integer taskId, String metricName, Float value) {
ConnectorTaskMetrics metrics = new ConnectorTaskMetrics(connectClusterId, connectorName, taskId);
metrics.putMetric(metricName,value);
return metrics;
public ConnectorTaskMetrics(Long connectClusterId, String connectorName, Integer taskId, String metricName, Float metricValue) {
this(connectClusterId, connectorName, taskId);
this.putMetric(metricName, metricValue);
}
@Override
public String unique() {
return "KCOR@" + connectClusterId + "@" + connectorName + "@" + taskId;
return "KCORT@" + connectClusterId + "@" + connectorName + "@" + taskId;
}
}

View File

@@ -0,0 +1,16 @@
package com.xiaojukeji.know.streaming.km.common.bean.event.cluster.connect;
import com.xiaojukeji.know.streaming.km.common.bean.event.cluster.ClusterPhyBaseEvent;
import lombok.Getter;
/**
* 集群删除事件
* @author zengqiao
* @date 23/08/15
*/
@Getter
public class ClusterPhyDeletedEvent extends ClusterPhyBaseEvent {
public ClusterPhyDeletedEvent(Object source, Long clusterPhyId) {
super(source, clusterPhyId);
}
}

View File

@@ -16,6 +16,8 @@ import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiL
import com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant;
import com.xiaojukeji.know.streaming.km.common.utils.CommonUtils;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.Triple;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import java.util.ArrayList;
import java.util.HashMap;
@@ -24,6 +26,9 @@ import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant.MIRROR_MAKER_SOURCE_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME;
import static com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant.MIRROR_MAKER_TARGET_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME;
public class ConnectConverter {
public static ConnectorBasicCombineExistVO convert2BasicVO(ConnectCluster connectCluster, ConnectorPO connectorPO) {
ConnectorBasicCombineExistVO vo = new ConnectorBasicCombineExistVO();
@@ -153,6 +158,66 @@ public class ConnectConverter {
return ksConnector;
}
public static List<KSConnector> convertAndSupplyMirrorMakerInfo(ConnectCluster connectCluster, List<Triple<KSConnectorInfo, List<String>, KSConnectorStateInfo>> connectorFullInfoList) {
// <connectorName, targetBootstrapServers + "@" + sourceBootstrapServers>
Map<String, String> sourceMap = new HashMap<>();
// <targetBootstrapServers + "@" + sourceBootstrapServers, connectorName>
Map<String, String> heartbeatMap = new HashMap<>();
Map<String, String> checkpointMap = new HashMap<>();
// 获取每个类型的connector的map信息
connectorFullInfoList.forEach(connector -> {
Map<String, String> mm2Map = null;
if (KafkaConnectConstant.MIRROR_MAKER_SOURCE_CONNECTOR_TYPE.equals(connector.v1().getConfig().get(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME))) {
mm2Map = sourceMap;
} else if (KafkaConnectConstant.MIRROR_MAKER_HEARTBEAT_CONNECTOR_TYPE.equals(connector.v1().getConfig().get(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME))) {
mm2Map = heartbeatMap;
} else if (KafkaConnectConstant.MIRROR_MAKER_CHECKPOINT_CONNECTOR_TYPE.equals(connector.v1().getConfig().get(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME))) {
mm2Map = checkpointMap;
}
String targetBootstrapServers = connector.v1().getConfig().get(MIRROR_MAKER_TARGET_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME);
String sourceBootstrapServers = connector.v1().getConfig().get(MIRROR_MAKER_SOURCE_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME);
if (ValidateUtils.anyBlank(targetBootstrapServers, sourceBootstrapServers) || mm2Map == null) {
return;
}
if (KafkaConnectConstant.MIRROR_MAKER_SOURCE_CONNECTOR_TYPE.equals(connector.v1().getConfig().get(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME))) {
// source 类型的格式和 heartbeat & checkpoint 的不一样
mm2Map.put(connector.v1().getName(), targetBootstrapServers + "@" + sourceBootstrapServers);
} else {
mm2Map.put(targetBootstrapServers + "@" + sourceBootstrapServers, connector.v1().getName());
}
});
List<KSConnector> connectorList = new ArrayList<>();
connectorFullInfoList.forEach(connector -> {
// 转化并添加到list中
KSConnector ksConnector = ConnectConverter.convert2KSConnector(
connectCluster.getKafkaClusterPhyId(),
connectCluster.getId(),
connector.v1(),
connector.v3(),
connector.v2()
);
connectorList.add(ksConnector);
// 补充mm2信息
String targetAndSource = sourceMap.get(ksConnector.getConnectorName());
if (ValidateUtils.isBlank(targetAndSource)) {
return;
}
ksConnector.setHeartbeatConnectorName(heartbeatMap.getOrDefault(targetAndSource, ""));
ksConnector.setCheckpointConnectorName(checkpointMap.getOrDefault(targetAndSource, ""));
});
return connectorList;
}
private static String genConnectorKey(Long connectorId, String connectorName){
return connectorId + "#" + connectorName;
}

View File

@@ -0,0 +1,50 @@
package com.xiaojukeji.know.streaming.km.common.enums.connect;
import org.apache.kafka.connect.runtime.AbstractStatus;
/**
* connector运行状态
* @see AbstractStatus
*/
public enum ConnectStatusEnum {
UNASSIGNED(0, "UNASSIGNED"),
RUNNING(1,"RUNNING"),
PAUSED(2,"PAUSED"),
FAILED(3, "FAILED"),
DESTROYED(4, "DESTROYED"),
UNKNOWN(-1, "UNKNOWN")
;
ConnectStatusEnum(int status, String value) {
this.status = status;
this.value = value;
}
private final int status;
private final String value;
public static ConnectStatusEnum getByValue(String value) {
for (ConnectStatusEnum statusEnum: ConnectStatusEnum.values()) {
if (statusEnum.value.equals(value)) {
return statusEnum;
}
}
return ConnectStatusEnum.UNKNOWN;
}
public int getStatus() {
return status;
}
public String getValue() {
return value;
}
}

View File

@@ -96,7 +96,7 @@ const RoleDetailAndUpdate = forwardRef((props, ref): JSX.Element => {
arr.push(permissions[i].id);
}
});
formData.permissionIdList = formData.permissionIdList.flat();
formData.permissionIdList = formData.permissionIdList.flat().filter((item) => item !== undefined);
setConfirmLoading(true);
request(api.editRole, {
method: type === RoleOperate.Add ? 'POST' : 'PUT',
@@ -250,7 +250,7 @@ const RoleDetailAndUpdate = forwardRef((props, ref): JSX.Element => {
<CheckboxGroupContainer
key={i}
formInstance={form}
fieldName="permissionIdList"
fieldName={`permissionIdList`}
options={permission.options}
initSelectedOptions={initSelectedPermissions[permission.id] || []}
groupIdx={i}

View File

@@ -34,11 +34,11 @@ module.exports = {
proxy: {
'/ks-km/api/v3': {
changeOrigin: true,
target: 'https://api-kylin-xg02.intra.xiaojukeji.com/ks-km/',
target: 'http://127.0.0.1/',
},
'/logi-security/api/v1': {
changeOrigin: true,
target: 'https://api-kylin-xg02.intra.xiaojukeji.com/ks-km/',
target: 'http://127.0.0.1/',
},
},
},

View File

@@ -10004,6 +10004,12 @@
}
}
},
"pubsub-js": {
"version": "1.9.4",
"resolved": "https://registry.npmmirror.com/pubsub-js/-/pubsub-js-1.9.4.tgz",
"integrity": "sha512-hJYpaDvPH4w8ZX/0Fdf9ma1AwRgU353GfbaVfPjfJQf1KxZ2iHaHl3fAUw1qlJIR5dr4F3RzjGaWohYUEyoh7A==",
"dev": true
},
"pump": {
"version": "3.0.0",
"resolved": "https://registry.npmmirror.com/pump/-/pump-3.0.0.tgz",

View File

@@ -82,6 +82,7 @@
"@types/lodash": "^4.14.171",
"@types/node": "^12.12.25",
"@types/pubsub-js": "^1.5.18",
"pubsub-js": "^1.5.18",
"@typescript-eslint/eslint-plugin": "4.13.0",
"@typescript-eslint/parser": "4.13.0",
"babel-eslint": "10.1.0",

View File

@@ -95,7 +95,7 @@ const api = {
getApi(`/clusters/${clusterPhyId}/groups/${groupName}/partitions`),
resetGroupOffset: () => getApi('/group-offsets'),
getGroupOverview: (clusterPhyId: number) => getApi(`/clusters/${clusterPhyId}/groups-overview`),
deleteGroupOffset: () => getApi('/group-offsets'),
// topics列表
getTopicsList: (clusterPhyId: number) => getApi(`/clusters/${clusterPhyId}/topics-overview`),
getReassignmentList: () => getApi(`/reassignment/topics-overview`),
@@ -108,6 +108,7 @@ const api = {
getTopicState: (clusterPhyId: number, topicName: string) => getApi(`/clusters/${clusterPhyId}/topics/${topicName}/state`),
getTopicMetadata: (clusterPhyId: number, topicName: string) =>
getApi(`/clusters/${clusterPhyId}/topics/${topicName}/metadata-combine-exist`),
deleteTopicData: () => getApi(`/topics/truncate-topic`),
// 最新的指标值
getMetricPointsLatest: (clusterPhyId: number) => getApi(`/physical-clusters/${clusterPhyId}/latest-metrics`),

View File

@@ -20,6 +20,7 @@ import { getLicenseInfo } from './constants/common';
import api from './api';
import ClusterContainer from './pages/index';
import ksLogo from './assets/ks-logo.png';
import {ClustersPermissionMap} from "./pages/CommonConfig";
interface ILocaleMap {
[index: string]: any;
@@ -78,6 +79,9 @@ const AppContent = (props: { setlanguage: (language: string) => void }) => {
const userInfo = localStorage.getItem('userInfo');
const [curActiveAppName, setCurActiveAppName] = useState('');
const [versionInfo, setVersionInfo] = useState<VersionInfo>();
const [global] = AppContainer.useGlobalValue();
const quickEntries=[];
useEffect(() => {
if (pathname.startsWith('/config')) {
setCurActiveAppName('config');
@@ -93,6 +97,23 @@ const AppContent = (props: { setlanguage: (language: string) => void }) => {
});
}, []);
if (global.hasPermission && global.hasPermission(ClustersPermissionMap.CLUSTERS_MANAGE_VIEW)){
quickEntries.push({
icon: <IconFont type="icon-duojiqunguanli"/>,
txt: '多集群管理',
ident: '',
active: curActiveAppName === 'cluster',
});
}
if (global.hasPermission && global.hasPermission(ClustersPermissionMap.SYS_MANAGE_VIEW)){
quickEntries.push({
icon: <IconFont type="icon-xitongguanli" />,
txt: '系统管理',
ident: 'config',
active: curActiveAppName === 'config',
});
}
return (
<DProLayout.Container
headerProps={{
@@ -103,20 +124,7 @@ const AppContent = (props: { setlanguage: (language: string) => void }) => {
),
username: userInfo ? JSON.parse(userInfo)?.userName : '',
icon: <DotChartOutlined />,
quickEntries: [
{
icon: <IconFont type="icon-duojiqunguanli" />,
txt: '多集群管理',
ident: '',
active: curActiveAppName === 'cluster',
},
{
icon: <IconFont type="icon-xitongguanli" />,
txt: '系统管理',
ident: 'config',
active: curActiveAppName === 'config',
},
],
quickEntries: quickEntries,
isFixed: false,
userDropMenuItems: [
<Menu.Item key={0}>

View File

@@ -0,0 +1,10 @@
import { useCallback, useState } from 'react';
export function useForceRefresh() {
const [refreshKey, setRefresh] = useState<number>(0);
const forceRefresh: () => void = useCallback(() => {
setRefresh((x) => x + 1);
}, []);
return [refreshKey, forceRefresh];
}

View File

@@ -7,6 +7,9 @@ import { goLogin } from '@src/constants/axiosConfig';
export enum ClustersPermissionMap {
CLUSTERS_MANAGE = '多集群管理',
CLUSTERS_MANAGE_VIEW = '多集群管理查看',
//仅用作隐藏掉系统管理菜单
SYS_MANAGE = '系统管理',
SYS_MANAGE_VIEW = '系统管理查看',
// Cluster
CLUSTER_ADD = '接入集群',
CLUSTER_DEL = '删除集群',
@@ -39,6 +42,12 @@ export enum ClustersPermissionMap {
MM2_DELETE = 'MM2-删除',
MM2_RESTART = 'MM2-重启',
MM2_STOP_RESUME = 'MM2-暂停&恢复',
// Connector
CONNECTOR_ADD = 'Connector-新增',
CONNECTOR_CHANGE_CONFIG = 'Connector-编辑',
CONNECTOR_DELETE = 'Connector-删除',
CONNECTOR_RESTART = 'Connector-重启',
CONNECTOR_STOP_RESUME = 'Connector-暂停&恢复',
}
export interface PermissionNode {
@@ -88,6 +97,13 @@ const CommonConfig = () => {
clustersPermissions &&
clustersPermissions.childList.forEach((node: PermissionNode) => node.has && userPermissions.push(node.permissionName));
// 获取用户在系统管理拥有的权限
const configPermissions = userPermissionTree.find(
(sys: PermissionNode) => sys.permissionName === ClustersPermissionMap.SYS_MANAGE
);
configPermissions &&
configPermissions.childList.forEach((node: PermissionNode) => node.has && userPermissions.push(node.permissionName));
const hasPermission = (permissionName: ClustersPermissionMap) => permissionName && userPermissions.includes(permissionName);
setGlobal((curState: any) => ({ ...curState, permissions: allPermissions, userPermissions, hasPermission, userInfo }));

View File

@@ -189,7 +189,14 @@ const StepFormFirst = (props: SubFormProps) => {
const result: FormConnectorConfigs = {
pluginConfig: {},
};
// 获取一份默认配置
const defaultPluginConfig: any = {};
pluginConfig.configs.forEach(({ definition }) => {
// 获取一份默认配置
defaultPluginConfig[definition.name] = definition?.defaultValue;
if (!getExistFormItems(pluginType).includes(definition.name)) {
const pluginConfigs = result.pluginConfig;
const group = definition.group || 'Others';
@@ -205,7 +212,7 @@ const StepFormFirst = (props: SubFormProps) => {
Object.keys(result).length &&
form.setFieldsValue({
configs: result,
configs: { ...result, defaultPluginConfig, editConnectorConfig: result.connectorConfig },
});
})
.finally(() => props.setSubmitLoading(false));
@@ -816,6 +823,8 @@ const StepFormFifth = (props: SubFormProps) => {
<InputNumber />
) : type.toUpperCase() === 'BOOLEAN' ? (
<Switch size="small" />
) : type.toUpperCase() === 'PASSWORD' ? (
<Input.Password />
) : (
<Input />
)}
@@ -947,7 +956,7 @@ export default forwardRef(
success?: {
connectClusterId: number;
connectorName: string;
configs: {
config: {
[key: string]: any;
};
};
@@ -955,6 +964,7 @@ export default forwardRef(
}) => void
) => {
const promises: Promise<any>[] = [];
const compareConfig = stepsFormRef.current[0].getFieldValue('configs'); // 获取步骤一的form信息
Object.values(stepsFormRef.current).forEach((form, i) => {
const promise = form
.validateFields()
@@ -985,11 +995,22 @@ export default forwardRef(
const [k, ...v] = l.split('=');
result[k] = v.join('=');
});
const editConnectorConfig = operateInfo.type === 'edit' ? compareConfig.editConnectorConfig : {}; // 编辑状态时拿到config配置
const newCompareConfig = { ...compareConfig.defaultPluginConfig, ...editConnectorConfig, ...result }; // 整合后的表单提交信息
Object.keys(newCompareConfig).forEach((item) => {
if (
newCompareConfig[item] === compareConfig.defaultPluginConfig[item] ||
newCompareConfig[item]?.toString() === compareConfig.defaultPluginConfig[item]?.toString()
) {
delete newCompareConfig[item]; // 清除默认值
}
});
callback({
success: {
connectClusterId: res[0].connectClusterId,
connectorName: result['name'],
configs: result,
config: newCompareConfig,
},
});
},
@@ -1013,7 +1034,7 @@ export default forwardRef(
curClusterName = cluster.label;
}
});
(jsonRef as any)?.onOpen(operateInfo.type, curClusterName, info.success.configs);
(jsonRef as any)?.onOpen(operateInfo.type, curClusterName, info.success.config);
onClose();
}
});
@@ -1026,9 +1047,9 @@ export default forwardRef(
setCurrentStep(info.error);
} else {
setSubmitLoading(true);
Object.entries(info.success.configs).forEach(([key, val]) => {
Object.entries(info.success.config).forEach(([key, val]) => {
if (val === null) {
delete info.success.configs[key];
delete info.success.config[key];
}
});
Utils.put(api.validateConnectorConfig, info.success).then(

View File

@@ -10,7 +10,7 @@ const PLACEHOLDER = `配置格式如下
{
"connectClusterName": "", // Connect Cluster 名称
"configs": { // 具体配置项
"config": { // 具体配置项
"name": "",
"connector.class": "",
"tasks.max": 1,
@@ -47,7 +47,7 @@ export default forwardRef((props: any, ref) => {
configs: JSON.stringify(
{
connectClusterName,
configs: defaultConfigs,
config: defaultConfigs,
},
null,
2
@@ -63,13 +63,13 @@ export default forwardRef((props: any, ref) => {
form.validateFields().then(
(data) => {
const postData = JSON.parse(data.configs);
postData.connectorName = postData.configs.name;
postData.connectorName = postData.config.name;
postData.connectClusterId = connectClusters.find((cluster) => cluster.label === postData.connectClusterName).value;
delete postData.connectClusterName;
Object.entries(postData.configs).forEach(([key, val]) => {
Object.entries(postData.config).forEach(([key, val]) => {
if (val === null) {
delete postData.configs[key];
delete postData.config[key];
}
});
Utils.put(api.validateConnectorConfig, postData).then(
@@ -198,20 +198,20 @@ export default forwardRef((props: any, ref) => {
}
}
if (!v.configs || typeof v.configs !== 'object') {
return Promise.reject('内容缺少 configs 字段或字段格式错误');
if (!v.config || typeof v.config !== 'object') {
return Promise.reject('内容缺少 config 字段或字段格式错误');
} else {
// 校验 connectorName 字段
if (!v.configs.name) {
return Promise.reject('configs 字段下缺少 name 项');
if (!v.config.name) {
return Promise.reject('config 字段下缺少 name 项');
} else {
if (type === 'edit' && v.configs.name !== defaultConfigs.name) {
if (type === 'edit' && v.config.name !== defaultConfigs.name) {
return Promise.reject('编辑模式下不允许修改 name 字段');
}
}
if (!v.configs['connector.class']) {
return Promise.reject('configs 字段下缺少 connector.class 项');
} else if (type === 'edit' && v.configs['connector.class'] !== defaultConfigs['connector.class']) {
if (!v.config['connector.class']) {
return Promise.reject('config 字段下缺少 connector.class 项');
} else if (type === 'edit' && v.config['connector.class'] !== defaultConfigs['connector.class']) {
return Promise.reject('编辑模式下不允许修改 connector.class 字段');
}
}
@@ -219,13 +219,13 @@ export default forwardRef((props: any, ref) => {
if (type === 'create') {
// 异步校验 connector 名称是否重复 以及 className 是否存在
return Promise.all([
Utils.request(api.isConnectorExist(connectClusterId, v.configs.name)),
Utils.request(api.isConnectorExist(connectClusterId, v.config.name)),
Utils.request(api.getConnectorPlugins(connectClusterId)),
]).then(
([data, plugins]: [any, ConnectorPlugin[]]) => {
return data?.exist
? Promise.reject('name 与已有 Connector 重复')
: plugins.every((plugin) => plugin.className !== v.configs['connector.class'])
: plugins.every((plugin) => plugin.className !== v.config['connector.class'])
? Promise.reject('该 connectCluster 下不存在 connector.class 项配置的插件')
: Promise.resolve();
},

View File

@@ -1,8 +1,9 @@
import SmallChart from '@src/components/SmallChart';
import TagsWithHide from '@src/components/TagsWithHide';
import { Button, Tag, Tooltip, Utils, Popconfirm } from 'knowdesign';
import { Button, Tag, Tooltip, Utils, Popconfirm, AppContainer } from 'knowdesign';
import React from 'react';
import Delete from './Delete';
import { ClustersPermissionMap } from '../CommonConfig';
export const defaultPagination = {
current: 1,
pageSize: 10,
@@ -93,7 +94,8 @@ const renderLine = (record: any, metricName: string) => {
};
export const getConnectorsColumns = (arg?: any) => {
const columns = [
const [global] = AppContainer.useGlobalValue();
const columns: any = [
{
title: 'Connector Name',
dataIndex: 'connectorName',
@@ -213,7 +215,10 @@ export const getConnectorsColumns = (arg?: any) => {
return t && t.length > 0 ? <TagsWithHide placement="bottom" list={t} expandTagContent={(num: any) => `共有${num}`} /> : '-';
},
},
{
];
if (global.hasPermission) {
columns.push({
title: '操作',
dataIndex: 'options',
key: 'options',
@@ -224,20 +229,24 @@ export const getConnectorsColumns = (arg?: any) => {
render: (_t: any, r: any) => {
return (
<div>
<Popconfirm
title="是否重启当前任务?"
onConfirm={() => arg?.optionConnect(r, 'restart')}
// onCancel={cancel}
okText="是"
cancelText=""
overlayClassName="connect-popconfirm"
>
<Button key="restart" type="link" size="small">
</Button>
</Popconfirm>
{global.hasPermission(ClustersPermissionMap.CONNECTOR_RESTART) ? (
<Popconfirm
title="是否重启当前任务?"
onConfirm={() => arg?.optionConnect(r, 'restart')}
// onCancel={cancel}
okText=""
cancelText="否"
overlayClassName="connect-popconfirm"
>
<Button key="restart" type="link" size="small">
</Button>
</Popconfirm>
) : (
<></>
)}
{(r.state === 'RUNNING' || r.state === 'PAUSED') && (
{global.hasPermission(ClustersPermissionMap.CONNECTOR_STOP_RESUME) && (r.state === 'RUNNING' || r.state === 'PAUSED') && (
<Popconfirm
title={`是否${r.state === 'RUNNING' ? '暂停' : '继续'}当前任务?`}
onConfirm={() => arg?.optionConnect(r, r.state === 'RUNNING' ? 'stop' : 'resume')}
@@ -252,16 +261,24 @@ export const getConnectorsColumns = (arg?: any) => {
</Button>
</Popconfirm>
)}
{global.hasPermission(ClustersPermissionMap.CONNECTOR_CHANGE_CONFIG) ? (
<Button type="link" size="small" onClick={() => arg?.editConnector(r)}>
</Button>
) : (
<></>
)}
<Button type="link" size="small" onClick={() => arg?.editConnector(r)}>
</Button>
<Delete record={r} onConfirm={arg?.deleteTesk}></Delete>
{global.hasPermission(ClustersPermissionMap.CONNECTOR_DELETE) ? (
<Delete record={r} onConfirm={arg?.deleteTesk}></Delete>
) : (
<></>
)}
</div>
);
},
},
];
});
}
return columns;
};

View File

@@ -12,6 +12,7 @@ import notification from '@src/components/Notification';
import './index.less';
import AddConnectorUseJSON from './AddConnectorUseJSON';
import HasConnector from './HasConnector';
import { ClustersPermissionMap } from '../CommonConfig';
const { request } = Utils;
const rateMap: any = {
@@ -174,21 +175,25 @@ const Connectors: React.FC = () => {
maxLength: 128,
}}
/>
<span className="add-connect">
<Button
className="add-connect-btn"
icon={<IconFont type="icon-jiahao" />}
type="primary"
onClick={() => addConnectorRef.current?.onOpen('create', addConnectorJsonRef.current)}
>
Connector
</Button>
<Dropdown overlayClassName="add-connect-dropdown-menu" overlay={menu}>
<Button className="add-connect-json" type="primary">
<IconFont type="icon-guanwangxiala" />
{global.hasPermission && global.hasPermission(ClustersPermissionMap.CONNECTOR_ADD) ? (
<span className="add-connect">
<Button
className="add-connect-btn"
icon={<IconFont type="icon-jiahao" />}
type="primary"
onClick={() => addConnectorRef.current?.onOpen('create', addConnectorJsonRef.current)}
>
Connector
</Button>
</Dropdown>
</span>
<Dropdown overlayClassName="add-connect-dropdown-menu" overlay={menu}>
<Button className="add-connect-json" type="primary">
<IconFont type="icon-guanwangxiala" />
</Button>
</Dropdown>
</span>
) : (
<></>
)}
</div>
</div>
<ProTable

View File

@@ -0,0 +1,104 @@
import React, { useState } from 'react';
import { useParams } from 'react-router-dom';
import { Button, Form, Input, Modal, Utils } from 'knowdesign';
import notification from '@src/components/Notification';
import Api from '@src/api/index';
// eslint-disable-next-line react/display-name
export default (props: { record: any; onConfirm?: () => void }) => {
const { record, onConfirm } = props;
const routeParams = useParams<{
clusterId: string;
}>();
const [form] = Form.useForm();
const [delDialogVisible, setDelDialogVisble] = useState(false);
const handleDelOk = () => {
form.validateFields().then((e) => {
const formVal = form.getFieldsValue();
formVal.clusterPhyId = Number(routeParams.clusterId);
formVal.deleteType = 0;
Utils.delete(Api.deleteGroupOffset(), { data: formVal }).then((res: any) => {
if (res === null) {
notification.success({
message: '删除消费组成功',
});
setDelDialogVisble(false);
onConfirm && onConfirm();
} else {
notification.error({
message: '删除消费组失败',
});
}
});
});
};
return (
<>
<Button
style={{ paddingLeft: 0 }}
type="link"
onClick={(_) => {
setDelDialogVisble(true);
}}
>
</Button>
<Modal
className="custom-modal"
title="确定要删除此Topic吗"
centered={true}
visible={delDialogVisible}
wrapClassName="del-topic-modal"
destroyOnClose={true}
maskClosable={false}
onOk={handleDelOk}
onCancel={(_) => {
setDelDialogVisble(false);
}}
okText="删除"
okButtonProps={{
danger: true,
size: 'small',
style: {
paddingLeft: '16px',
paddingRight: '16px',
},
}}
cancelButtonProps={{
size: 'small',
style: {
paddingLeft: '16px',
paddingRight: '16px',
},
}}
>
{/* <div className="tip-info">
<IconFont type="icon-warning-circle"></IconFont>
<span>会删除Topic的全部消息数据和ACL权限请再次输入Topic名称进行确认</span>
</div> */}
<Form form={form} labelCol={{ span: 5 }} style={{ marginTop: 18 }}>
<Form.Item label="TopicName">{record.name}</Form.Item>
<Form.Item
name="groupName"
label="GroupName"
rules={[
// { required: true },
() => ({
validator(_, value) {
if (!value) {
return Promise.reject(new Error('请输入Group名称'));
} else if (value !== record.name) {
return Promise.reject(new Error('请输入正确的Group名称'));
}
return Promise.resolve();
},
}),
]}
>
<Input placeholder="请输入" size="small"></Input>
</Form.Item>
</Form>
</Modal>
</>
);
};

View File

@@ -1,12 +1,13 @@
import React, { useState, useEffect } from 'react';
import { useParams, useHistory } from 'react-router-dom';
import { Drawer, ProTable, Utils } from 'knowdesign';
import { Button, Space, Divider, Drawer, ProTable, Utils, notification } from 'knowdesign';
import { IconFont } from '@knowdesign/icons';
import API from '@src/api/index';
import { defaultPagination, hashDataParse } from '@src/constants/common';
import { getGtoupTopicColumns } from './config';
import { ExpandedRow } from './ExpandedRow';
import ResetOffsetDrawer from './ResetOffsetDrawer';
import { useForceRefresh } from '@src/components/utils';
const { request } = Utils;
export interface MetricLine {
@@ -63,6 +64,7 @@ const GroupDetail = (props: any) => {
const [openKeys, setOpenKeys] = useState();
const [resetOffsetVisible, setResetOffsetVisible] = useState(false);
const [resetOffsetArg, setResetOffsetArg] = useState({});
const [refreshKey, forceRefresh] = useForceRefresh();
const genData = async ({ pageNo, pageSize, groupName }: any) => {
if (urlParams?.clusterId === undefined) return;
@@ -110,6 +112,23 @@ const GroupDetail = (props: any) => {
groupName: record?.groupName,
});
};
// 删除消费组Topic
const deleteOffset = (record: any) => {
const params = {
clusterPhyId: +urlParams?.clusterId,
deleteType: 1, // 0:group纬度1Topic纬度2Partition纬度
groupName: record.groupName,
topicName: record.topicName,
};
Utils.delete(API.deleteGroupOffset(), { data: params }).then((data: any) => {
if (data === null) {
notification.success({
message: '删除Topic成功!',
});
genData({ pageNo: 1, pageSize: pagination.pageSize, groupName: hashData.groupName });
}
});
};
const onTableChange = (pagination: any, filters: any, sorter: any) => {
genData({ pageNo: pagination.current, pageSize: pagination.pageSize, filters, sorter, groupName: hashData.groupName });
@@ -160,7 +179,7 @@ const GroupDetail = (props: any) => {
// // 获取Consumer列表 表格模式
// getTopicGroupMetric(hashData);
// });
}, [hashDataParse(location.hash).groupName]);
}, [hashDataParse(location.hash).groupName, refreshKey]);
return (
<Drawer
@@ -182,6 +201,14 @@ const GroupDetail = (props: any) => {
// <Divider type="vertical" />
// </Space>
// }
extra={
<Space>
<span style={{ display: 'inline-block', fontSize: '15px' }} onClick={forceRefresh as () => void}>
<i className="iconfont icon-shuaxin1" style={{ cursor: 'pointer' }} />
</span>
<Divider type="vertical" />
</Space>
}
>
<ProTable
showQueryForm={false}
@@ -189,7 +216,7 @@ const GroupDetail = (props: any) => {
showHeader: false,
rowKey: 'key',
loading: loading,
columns: getGtoupTopicColumns({ resetOffset }),
columns: getGtoupTopicColumns({ resetOffset, deleteOffset }),
dataSource: topicData,
paginationProps: { ...pagination },
// noPagination: true,

View File

@@ -8,6 +8,7 @@ import { IconFont } from '@knowdesign/icons';
import API from '@src/api/index';
import { hashDataParse } from '@src/constants/common';
const { Option } = Select;
import PubSub from 'pubsub-js'
export interface MetricLine {
createTime?: number;
@@ -214,6 +215,11 @@ export const ExpandedRow: any = ({ record, groupName }: any) => {
// getTopicGroupMetric();
// }, [sortObj]);
// 订阅重置offset成功的消息
PubSub.subscribe('ConsumerGroup-ResetOffset', function(data){
getTopicGroupMetric({});
})
useEffect(() => {
const hashData = hashDataParse(location.hash);
// if (!hashData.groupName) return;

View File

@@ -4,6 +4,7 @@ import { useParams } from 'react-router-dom';
import EditTable from '../TestingProduce/component/EditTable';
import Api from '@src/api/index';
import moment from 'moment';
import PubSub from 'pubsub-js'
const CustomSelectResetTime = (props: { value?: string; onChange?: (val: Number | String) => void }) => {
const { value, onChange } = props;
@@ -106,6 +107,8 @@ export default (props: any) => {
message: '重置offset成功',
});
setVisible(false);
// 发布重置offset成功的消息
PubSub.publish('ConsumerGroup-ResetOffset', '1');
} else {
notification.error({
message: '重置offset失败',

View File

@@ -1,8 +1,9 @@
/* eslint-disable @typescript-eslint/explicit-module-boundary-types */
import React from 'react';
import { AppContainer } from 'knowdesign';
import { AppContainer, Button, Popconfirm } from 'knowdesign';
import TagsWithHide from '@src/components/TagsWithHide';
import { ClustersPermissionMap } from '../CommonConfig';
import Delete from './Delete';
export const runningStatusEnum: any = {
1: 'Doing',
@@ -62,6 +63,21 @@ export const getGroupColumns = (arg?: any) => {
width: 200,
render: (t: number) => (t ? t.toLocaleString() : '-'),
},
{
title: '操作',
dataIndex: 'options',
key: 'options',
width: 200,
filterTitle: true,
fixed: 'right',
render: (_t: any, r: any) => {
return (
<div>
<Delete record={r} onConfirm={arg?.deleteTesk}></Delete>
</div>
);
},
},
];
return columns;
};
@@ -103,11 +119,20 @@ export const getGtoupTopicColumns = (arg?: any) => {
title: '操作',
dataIndex: 'desc',
key: 'desc',
width: 150,
width: 200,
render: (value: any, record: any) => {
return (
<div>
<a onClick={() => arg.resetOffset(record)}>Offset</a>
<Popconfirm
placement="top"
title={`是否要删除当前Topic`}
onConfirm={() => arg.deleteOffset(record)}
okText="是"
cancelText="否"
>
<Button type="link"></Button>
</Popconfirm>
</div>
);
},

View File

@@ -58,6 +58,11 @@ const BrokerList: React.FC = (props: any) => {
genData({ pageNo: pagination.current, pageSize: pagination.pageSize, filters, sorter });
};
// 删除Group
const deleteTesk = () => {
genData({ pageNo: 1, pageSize: pagination.pageSize });
};
useEffect(() => {
genData({
pageNo: 1,
@@ -115,7 +120,7 @@ const BrokerList: React.FC = (props: any) => {
showHeader: false,
rowKey: 'group_list',
loading: loading,
columns: getGroupColumns(),
columns: getGroupColumns(deleteTesk),
dataSource: data,
paginationProps: { ...pagination },
attrs: {

View File

@@ -522,21 +522,15 @@ const ConnectorForm = (props: {
const params = {
...values,
id: initFieldsValue?.id,
jmxProperties: values.jmxProperties ? `{ "jmxProperties": "${values.jmxProperties}" }` : undefined,
jmxProperties: values.jmxProperties ? `{ "jmxPort": "${values.jmxProperties}" }` : undefined,
};
Utils.put(api.batchConnectClusters, [params])
.then((res) => {
// setSelectedTabKey(undefined);
getConnectClustersList();
notification.success({
message: '修改Connect集群成功',
});
})
.catch((error) => {
notification.success({
message: '修改Connect集群失败',
});
Utils.put(api.batchConnectClusters, [params]).then((res) => {
// setSelectedTabKey(undefined);
getConnectClustersList();
notification.success({
message: '修改Connect集群成功',
});
});
};
const onCancel = () => {

View File

@@ -135,6 +135,7 @@ const AddDrawer = forwardRef((_, ref) => {
if (configType === 'custom') {
// 1. 自定义权限
// TODO: 需要和后端联调
const { resourceType, resourcePatternType, aclPermissionType, aclOperation, aclClientHost } = formData;
submitData.push({
clusterId,
@@ -281,6 +282,42 @@ const AddDrawer = forwardRef((_, ref) => {
</Form.Item>
<Form.Item dependencies={['configType']} style={{ marginBottom: 0 }}>
{({ getFieldValue }) => {
const SelectFormItems = (props: { type: string }) => {
const { type } = props;
return (
<Form.Item
name={`${type}Name`}
dependencies={[`${type}PatternType`]}
validateTrigger="onBlur"
rules={[
({ getFieldValue }) => ({
validator: (rule: any, value: string) => {
if (!value) {
return Promise.reject(`${type}Name 不能为空`);
}
if (type === 'topic' && getFieldValue(`${type}PatternType`) === ACL_PATTERN_TYPE['Literal']) {
return Utils.request(api.getTopicMetadata(clusterId as any, value)).then((res: any) => {
return res?.exist ? Promise.resolve() : Promise.reject('该 Topic 不存在');
});
}
return Promise.resolve();
},
}),
]}
>
<AutoComplete
filterOption={(value, option) => {
if (option?.value.includes(value)) {
return true;
}
return false;
}}
options={type === 'topic' ? topicMetaData : groupMetaData}
placeholder={`请输入 ${type}Name`}
/>
</Form.Item>
);
};
const PatternTypeFormItems = (props: { type: string }) => {
const { type } = props;
const UpperCaseType = type[0].toUpperCase() + type.slice(1);
@@ -388,6 +425,27 @@ const AddDrawer = forwardRef((_, ref) => {
}))}
/>
</Form.Item>
<Form.Item dependencies={['resourceType']}>
{({ getFieldValue }) => {
const type = getFieldValue('resourceType');
if (type === ACL_RESOURCE_TYPE['Cluster'] || type === ACL_RESOURCE_TYPE['TransactionalId']) {
//TODO需要和后端获取集群和事务接口联调
return (
<Form.Item
name={`${type === 4 ? 'cluster' : 'transactionalId'}`}
rules={[{ required: true, message: `${type === 4 ? 'Cluster名称' : 'TransactionalId'} 不能为空` }]}
>
<Input placeholder={`请输入${type === 4 ? 'Cluster名称' : 'TransactionalId'}`}></Input>
</Form.Item>
);
} else if (type === ACL_RESOURCE_TYPE['Topic']) {
return <PatternTypeFormItems type="topic" />;
} else if (type === ACL_RESOURCE_TYPE['Group']) {
return <PatternTypeFormItems type="group" />;
}
return null;
}}
</Form.Item>
<Form.Item dependencies={['resourceType']} style={{ marginBottom: 0 }}>
{({ getFieldValue }) => {
form.resetFields(['aclOperation']);

View File

@@ -8,6 +8,7 @@ import { useParams } from 'react-router-dom';
import TagsWithHide from '@src/components/TagsWithHide';
import SwitchTab from '@src/components/SwitchTab';
import RenderEmpty from '@src/components/RenderEmpty';
import { useForceRefresh } from '@src/components/utils';
interface PropsType {
hashData: any;
@@ -401,11 +402,18 @@ export default (props: PropsType) => {
const { hashData } = props;
const [showMode, setShowMode] = useState<string>('card');
const [refreshKey, forceRefresh] = useForceRefresh();
return (
<>
<div className="brokers-tab-container">
<div className="brokers-tab-container" key={`${refreshKey}`}>
<div className="overview">
<div className="left">
<span
style={{ display: 'inline-block', padding: '0 10px', marginRight: '10px', borderRight: '1px solid #ccc', fontSize: '15px' }}
onClick={forceRefresh as () => void}
>
<i className="iconfont icon-shuaxin1" style={{ cursor: 'pointer' }} />
</span>
<PartitionSummary clusterId={clusterId} topicName={hashData.topicName} />
</div>
<div className="cases-box">

View File

@@ -10,6 +10,7 @@ import { ClustersPermissionMap } from '../CommonConfig';
import ResetOffsetDrawer from './ResetOffsetDrawer';
import SwitchTab from '@src/components/SwitchTab';
import ContentWithCopy from '@src/components/CopyContent';
import PubSub from "pubsub-js";
const { Option } = Select;
@@ -335,6 +336,11 @@ export default (props: any) => {
});
}, [visible]);
// 订阅重置offset成功的消息
PubSub.subscribe('TopicDetail-ResetOffset', function(message, data){
getTopicGroupMetric({hashData: data});
})
useEffect(() => {
if (partitionList.length === 0) return;
getTopicGroupMetricHistory(partitionList, hashData);

View File

@@ -4,6 +4,7 @@ import { useParams } from 'react-router-dom';
import EditTable from '../TestingProduce/component/EditTable';
import Api from '@src/api/index';
import moment from 'moment';
import PubSub from "pubsub-js";
const CustomSelectResetTime = (props: { value?: string; onChange?: (val: Number | String) => void }) => {
const { value, onChange } = props;
@@ -106,6 +107,13 @@ export default (props: any) => {
message: '重置offset成功',
});
setResetOffsetVisible(false);
// 发布重置offset成功的消息
PubSub.publish('TopicDetail-ResetOffset',
{
groupName: record.groupName,
topicName: record.topicName
}
);
} else {
notification.error({
message: '重置offset失败',

View File

@@ -81,7 +81,8 @@ export const getTopicMessagesColmns = () => {
title: 'Offset',
dataIndex: 'offset',
key: 'offset',
render: (t: number) => (t ? t.toLocaleString() : '-'),
sorter: true,
render: (t: number) => (+t ? t.toLocaleString() : '-'),
},
{
title: 'Timestamp',

View File

@@ -26,6 +26,7 @@
.left {
display: flex;
align-items: center;
.info-box {
display: flex;
height: 36px;

View File

@@ -15,9 +15,21 @@ import Replicator from './Replicator';
import './index.less';
import TopicDetailHealthCheck from '@src/components/CardBar/TopicDetailHealthCheck';
import { hashDataParse } from '@src/constants/common';
import { useForceRefresh } from '@src/components/utils';
const { TabPane } = Tabs;
const Reload = (props: any) => {
return (
<span
style={{ display: 'inline-block', padding: '0 10px', marginRight: '10px', borderRight: '1px solid #ccc', fontSize: '15px' }}
onClick={props.forceRefresh as () => void}
>
<i className="iconfont icon-shuaxin1" style={{ cursor: 'pointer' }} />
</span>
);
};
const OperationsSlot: any = {
// eslint-disable-next-line react/display-name
// ['Partitions']: (arg: any) => {
@@ -70,17 +82,20 @@ const OperationsSlot: any = {
// eslint-disable-next-line react/display-name
['ConsumerGroups']: (arg: any) => {
return (
<SearchInput
onSearch={arg.setSearchKeywords}
attrs={{
value: arg.searchValue,
onChange: arg.setSearchValue,
placeholder: '请输入Consumer Group',
size: 'small',
style: { width: '210px', marginRight: '2px' },
maxLength: 128,
}}
/>
<>
<Reload {...arg} />
<SearchInput
onSearch={arg.setSearchKeywords}
attrs={{
value: arg.searchValue,
onChange: arg.setSearchValue,
placeholder: '请输入Consumer Group',
size: 'small',
style: { width: '210px', marginRight: '2px' },
maxLength: 128,
}}
/>
</>
);
},
};
@@ -94,6 +109,7 @@ const TopicDetail = (props: any) => {
const [searchValue, setSearchValue] = useState<string>('');
const [visible, setVisible] = useState(false);
const [hashData, setHashData] = useState<any>({});
const [refreshKey, forceRefresh] = useForceRefresh();
const callback = (key: any) => {
setSearchValue('');
@@ -184,7 +200,7 @@ const TopicDetail = (props: any) => {
onChange={callback}
tabBarExtraContent={
OperationsSlot[positionType] &&
OperationsSlot[positionType]({ ...props, setSearchKeywords, setSearchValue, searchValue, positionType })
OperationsSlot[positionType]({ ...props, setSearchKeywords, setSearchValue, searchValue, positionType, forceRefresh })
}
destroyInactiveTabPane
>
@@ -196,7 +212,7 @@ const TopicDetail = (props: any) => {
</TabPane>
<TabPane tab="ConsumerGroups" key="ConsumerGroups">
{positionType === 'ConsumerGroups' && (
<Consumers searchKeywords={searchKeywords} positionType={positionType} hashData={hashData} />
<Consumers searchKeywords={searchKeywords} positionType={positionType} hashData={hashData} key={`${refreshKey}`} />
)}
</TabPane>
<TabPane tab="ACLs" key="ACLs">

View File

@@ -1,7 +1,22 @@
/* eslint-disable react/display-name */
import React, { useState, useEffect } from 'react';
import { useHistory, useParams } from 'react-router-dom';
import { AppContainer, Input, ProTable, Select, Switch, Tooltip, Utils, Dropdown, Menu, Button, Divider, Tag } from 'knowdesign';
import {
AppContainer,
Input,
ProTable,
Select,
Switch,
Tooltip,
Utils,
Dropdown,
Menu,
Button,
Divider,
Tag,
Popconfirm,
notification,
} from 'knowdesign';
import { IconFont } from '@knowdesign/icons';
import Create from './Create';
import './index.less';
@@ -85,6 +100,21 @@ const AutoPage = (props: any) => {
setTopicListLoading(false);
});
};
const deleteTopicData = (record: any) => {
console.log(record, 'record');
const params = {
clusterId: Number(routeParams.clusterId),
topicName: record.topicName,
};
Utils.post(Api.deleteTopicData(), params).then((data: any) => {
if (data === null) {
notification.success({
message: '清除数据成功',
});
getTopicsList();
}
});
};
useEffect(() => {
getTopicsList();
}, [sortObj, showInternalTopics, searchKeywords, pageIndex, pageSize]);
@@ -247,7 +277,7 @@ const AutoPage = (props: any) => {
dataIndex: 'desc',
key: 'desc',
fixed: 'right',
width: 140,
width: 200,
render: (value: any, record: any) => {
return (
<div className="operation-list">
@@ -257,6 +287,19 @@ const AutoPage = (props: any) => {
<></>
)}
{global.hasPermission(ClustersPermissionMap.TOPIC_DEL) ? <Delete record={record} onConfirm={getTopicsList}></Delete> : <></>}
{global.hasPermission(ClustersPermissionMap.TOPIC_DEL) ? ( // TODO替换为清除数据的权限
<Popconfirm
placement="topRight"
title={`是否要清空当前Topic的数据`}
onConfirm={() => deleteTopicData(record)}
okText="是"
cancelText="否"
>
<Button type="link"></Button>
</Popconfirm>
) : (
<></>
)}
</div>
);
},

View File

@@ -1,15 +1,13 @@
package com.xiaojukeji.know.streaming.km.core.service.acl;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.KafkaAclPO;
import com.xiaojukeji.know.streaming.km.core.service.meta.MetaDataService;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.resource.ResourceType;
import java.util.List;
public interface KafkaAclService {
Result<List<AclBinding>> getAclFromKafka(Long clusterPhyId);
public interface KafkaAclService extends MetaDataService<AclBinding> {
List<KafkaAclPO> getKafkaAclFromDB(Long clusterPhyId);
Integer countKafkaAclFromDB(Long clusterPhyId);
@@ -17,10 +15,5 @@ public interface KafkaAclService {
Integer countResTypeAndDistinctFromDB(Long clusterPhyId, ResourceType resourceType);
Integer countKafkaUserAndDistinctFromDB(Long clusterPhyId);
List<KafkaAclPO> getKafkaResTypeAclFromDB(Long clusterPhyId, Integer resType);
List<KafkaAclPO> getTopicAclFromDB(Long clusterPhyId, String topicName);
List<KafkaAclPO> getGroupAclFromDB(Long clusterPhyId, String groupName);
}

View File

@@ -3,10 +3,6 @@ package com.xiaojukeji.know.streaming.km.core.service.acl;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.acl.ACLAtomParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.KafkaAclPO;
import org.apache.kafka.common.resource.ResourceType;
import java.util.Date;
import java.util.List;
public interface OpKafkaAclService {
/**
@@ -19,14 +15,5 @@ public interface OpKafkaAclService {
*/
Result<Void> deleteKafkaAcl(ACLAtomParam aclAtomParam, String operator);
/**
* 删除ACL
*/
Result<Void> deleteKafkaAclByResName(ResourceType resourceType, String resourceName, String operator);
Result<Void> insertAndIgnoreDuplicate(KafkaAclPO kafkaAclPO);
void batchUpdateAcls(Long clusterPhyId, List<KafkaAclPO> poList);
int deleteByUpdateTimeBeforeInDB(Long clusterPhyId, Date beforeTime);
}

View File

@@ -11,6 +11,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.po.KafkaAclPO;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
import com.xiaojukeji.know.streaming.km.common.converter.KafkaAclConverter;
import com.xiaojukeji.know.streaming.km.common.enums.cluster.ClusterAuthTypeEnum;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
@@ -18,8 +19,6 @@ import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.acl.KafkaAclService;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
import com.xiaojukeji.know.streaming.km.persistence.mysql.KafkaAclDAO;
@@ -36,11 +35,13 @@ import org.apache.kafka.common.resource.ResourceType;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import org.apache.kafka.common.utils.SecurityUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import javax.annotation.PostConstruct;
import java.util.ArrayList;
import java.util.List;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
import scala.jdk.javaapi.CollectionConverters;
@@ -77,18 +78,49 @@ public class KafkaAclServiceImpl extends BaseKafkaVersionControlService implemen
}
@Override
public Result<List<AclBinding>> getAclFromKafka(Long clusterPhyId) {
if (LoadedClusterPhyCache.getByPhyId(clusterPhyId) == null) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getClusterPhyNotExist(clusterPhyId));
}
public Result<List<AclBinding>> getDataFromKafka(ClusterPhy clusterPhy) {
try {
return (Result<List<AclBinding>>) versionControlService.doHandler(getVersionItemType(), getMethodName(clusterPhyId, ACL_GET_FROM_KAFKA), new ClusterPhyParam(clusterPhyId));
Result<List<AclBinding>> dataResult = (Result<List<AclBinding>>) versionControlService.doHandler(getVersionItemType(), getMethodName(clusterPhy.getId(), ACL_GET_FROM_KAFKA), new ClusterPhyParam(clusterPhy.getId()));
if (dataResult.failed()) {
Result.buildFromIgnoreData(dataResult);
}
return Result.buildSuc(dataResult.getData());
} catch (VCHandlerNotExistException e) {
return Result.buildFailure(e.getResultStatus());
}
}
@Override
public void writeToDB(Long clusterPhyId, List<AclBinding> dataList) {
Map<String, KafkaAclPO> dbPOMap = this.getKafkaAclFromDB(clusterPhyId).stream().collect(Collectors.toMap(KafkaAclPO::getUniqueField, Function.identity()));
long now = System.currentTimeMillis();
for (AclBinding aclBinding: dataList) {
KafkaAclPO newPO = KafkaAclConverter.convert2KafkaAclPO(clusterPhyId, aclBinding, now);
KafkaAclPO oldPO = dbPOMap.remove(newPO.getUniqueField());
if (oldPO == null) {
// 新增的ACL
this.insertAndIgnoreDuplicate(newPO);
}
// 不需要update
}
// 删除已经不存在的
for (KafkaAclPO dbPO: dbPOMap.values()) {
kafkaAclDAO.deleteById(dbPO);
}
}
@Override
public int deleteInDBByKafkaClusterId(Long clusterPhyId) {
LambdaQueryWrapper<KafkaAclPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(KafkaAclPO::getClusterPhyId, clusterPhyId);
return kafkaAclDAO.delete(lambdaQueryWrapper);
}
@Override
public List<KafkaAclPO> getKafkaAclFromDB(Long clusterPhyId) {
LambdaQueryWrapper<KafkaAclPO> queryWrapper = new LambdaQueryWrapper<>();
@@ -116,7 +148,7 @@ public class KafkaAclServiceImpl extends BaseKafkaVersionControlService implemen
return 0;
}
return (int)poList.stream().map(elem -> elem.getResourceName()).distinct().count();
return (int)poList.stream().map(KafkaAclPO::getResourceName).distinct().count();
}
@Override
@@ -130,15 +162,7 @@ public class KafkaAclServiceImpl extends BaseKafkaVersionControlService implemen
return 0;
}
return (int)poList.stream().map(elem -> elem.getPrincipal()).distinct().count();
}
@Override
public List<KafkaAclPO> getKafkaResTypeAclFromDB(Long clusterPhyId, Integer resType) {
LambdaQueryWrapper<KafkaAclPO> queryWrapper = new LambdaQueryWrapper<>();
queryWrapper.eq(KafkaAclPO::getClusterPhyId, clusterPhyId);
queryWrapper.eq(KafkaAclPO::getResourceType, resType);
return kafkaAclDAO.selectList(queryWrapper);
return (int)poList.stream().map(KafkaAclPO::getPrincipal).distinct().count();
}
@Override
@@ -152,15 +176,6 @@ public class KafkaAclServiceImpl extends BaseKafkaVersionControlService implemen
return kafkaAclDAO.selectList(queryWrapper);
}
@Override
public List<KafkaAclPO> getGroupAclFromDB(Long clusterPhyId, String groupName) {
LambdaQueryWrapper<KafkaAclPO> queryWrapper = new LambdaQueryWrapper<>();
queryWrapper.eq(KafkaAclPO::getClusterPhyId, clusterPhyId);
queryWrapper.eq(KafkaAclPO::getResourceType, ResourceType.GROUP.code());
queryWrapper.eq(KafkaAclPO::getResourceName, groupName);
return kafkaAclDAO.selectList(queryWrapper);
}
/**************************************************** private method ****************************************************/
private Result<List<AclBinding>> getAclByZKClient(VersionItemParam itemParam){
@@ -170,7 +185,7 @@ public class KafkaAclServiceImpl extends BaseKafkaVersionControlService implemen
for (ZkAclStore store: CollectionConverters.asJava(ZkAclStore.stores())) {
Result<List<AclBinding>> rl = this.getSpecifiedTypeAclByZKClient(param.getClusterPhyId(), store.patternType());
if (rl.failed()) {
return rl;
return Result.buildFromIgnoreData(rl);
}
aclList.addAll(rl.getData());
@@ -229,4 +244,19 @@ public class KafkaAclServiceImpl extends BaseKafkaVersionControlService implemen
return Result.buildSuc(kafkaAclList);
}
private Result<Void> insertAndIgnoreDuplicate(KafkaAclPO kafkaAclPO) {
try {
kafkaAclDAO.insert(kafkaAclPO);
return Result.buildSuc();
} catch (DuplicateKeyException dke) {
// 直接写入如果出现key冲突则直接忽略因为key冲突时表示该数据已完整存在不需要替换任何数据
return Result.buildSuc();
} catch (Exception e) {
log.error("method=insertAndIgnoreDuplicate||kafkaAclPO={}||errMsg=exception", kafkaAclPO, e);
return Result.buildFromRSAndMsg(ResultStatus.MYSQL_OPERATE_FAILED, e.getMessage());
}
}
}

View File

@@ -20,7 +20,6 @@ import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistExcept
import com.xiaojukeji.know.streaming.km.core.service.acl.OpKafkaAclService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
import com.xiaojukeji.know.streaming.km.persistence.mysql.KafkaAclDAO;
@@ -32,7 +31,6 @@ import org.apache.kafka.clients.admin.*;
import org.apache.kafka.common.acl.*;
import org.apache.kafka.common.resource.ResourcePattern;
import org.apache.kafka.common.resource.ResourcePatternFilter;
import org.apache.kafka.common.resource.ResourceType;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
@@ -41,8 +39,6 @@ import scala.jdk.javaapi.CollectionConverters;
import javax.annotation.PostConstruct;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.*;
@@ -169,11 +165,6 @@ public class OpKafkaAclServiceImpl extends BaseKafkaVersionControlService implem
return rv;
}
@Override
public Result<Void> deleteKafkaAclByResName(ResourceType resourceType, String resourceName, String operator) {
return Result.buildSuc();
}
@Override
public Result<Void> insertAndIgnoreDuplicate(KafkaAclPO kafkaAclPO) {
try {
@@ -190,34 +181,6 @@ public class OpKafkaAclServiceImpl extends BaseKafkaVersionControlService implem
}
}
@Override
public void batchUpdateAcls(Long clusterPhyId, List<KafkaAclPO> poList) {
LambdaQueryWrapper<KafkaAclPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(KafkaAclPO::getClusterPhyId, clusterPhyId);
Map<String, KafkaAclPO> dbPOMap = kafkaAclDAO.selectList(lambdaQueryWrapper).stream().collect(Collectors.toMap(KafkaAclPO::getUniqueField, Function.identity()));
for (KafkaAclPO po: poList) {
KafkaAclPO dbPO = dbPOMap.remove(po.getUniqueField());
if (dbPO == null) {
// 新增的ACL
this.insertAndIgnoreDuplicate(po);
}
}
// 删除已经不存在的
for (KafkaAclPO dbPO: dbPOMap.values()) {
kafkaAclDAO.deleteById(dbPO);
}
}
@Override
public int deleteByUpdateTimeBeforeInDB(Long clusterPhyId, Date beforeTime) {
LambdaQueryWrapper<KafkaAclPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(KafkaAclPO::getClusterPhyId, clusterPhyId);
lambdaQueryWrapper.le(KafkaAclPO::getUpdateTime, beforeTime);
return kafkaAclDAO.delete(lambdaQueryWrapper);
}
/**************************************************** private method ****************************************************/
private Result<Void> deleteInDB(KafkaAclPO kafkaAclPO) {

View File

@@ -8,6 +8,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.event.cluster.ClusterPhyAddedEvent;
import com.xiaojukeji.know.streaming.km.common.bean.event.cluster.connect.ClusterPhyDeletedEvent;
import com.xiaojukeji.know.streaming.km.common.bean.po.cluster.ClusterPhyPO;
import com.xiaojukeji.know.streaming.km.common.component.SpringTool;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
@@ -146,6 +147,9 @@ public class ClusterPhyServiceImpl implements ClusterPhyService {
String.format("删除集群:%s",clusterPhy.toString()));
opLogWrapService.saveOplogAndIgnoreException(oplogDTO);
// 发布删除集群事件
SpringTool.publish(new ClusterPhyDeletedEvent(this, clusterPhyId));
return Result.buildSuc();
} catch (Exception e) {
log.error("method=removeClusterPhyById||clusterPhyId={}||operator={}||msg=remove cluster failed||errMsg=exception!",

View File

@@ -4,14 +4,16 @@ package com.xiaojukeji.know.streaming.km.core.service.connect.cluster;
import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.cluster.ConnectClusterDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectClusterMetadata;
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafka.KSGroupDescription;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.core.service.meta.MetaDataService;
import java.util.List;
/**
* Connect-Cluster
*/
public interface ConnectClusterService {
public interface ConnectClusterService extends MetaDataService<KSGroupDescription> {
Long replaceAndReturnIdInDB(ConnectClusterMetadata metadata);
List<ConnectCluster> listByKafkaCluster(Long kafkaClusterPhyId);

View File

@@ -24,9 +24,9 @@ import com.xiaojukeji.know.streaming.km.core.cache.CollectedMetricsLocalCache;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterMetricService;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseConnectorMetricService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseConnectMetricService;
import com.xiaojukeji.know.streaming.km.persistence.connect.ConnectJMXClient;
import com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.ConnectClusterMetricESDAO;
import com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.cluster.ConnectClusterMetricESDAO;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
@@ -43,7 +43,7 @@ import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultS
* @author didi
*/
@Service
public class ConnectClusterMetricServiceImpl extends BaseConnectorMetricService implements ConnectClusterMetricService {
public class ConnectClusterMetricServiceImpl extends BaseConnectMetricService implements ConnectClusterMetricService {
protected static final ILog LOGGER = LogFactory.getLog(ConnectClusterMetricServiceImpl.class);
public static final String CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_AVG = "getWorkerMetricAvg";
@@ -86,8 +86,7 @@ public class ConnectClusterMetricServiceImpl extends BaseConnectorMetricService
String connectClusterMetricKey = CollectedMetricsLocalCache.genConnectClusterMetricCacheKey(connectClusterPhyId, metric);
Float keyValue = CollectedMetricsLocalCache.getConnectClusterMetrics(connectClusterMetricKey);
if (keyValue != null) {
ConnectClusterMetrics connectClusterMetrics = ConnectClusterMetrics.initWithMetric(connectClusterPhyId,metric,keyValue);
return Result.buildSuc(connectClusterMetrics);
return Result.buildSuc(new ConnectClusterMetrics(connectClusterPhyId, metric, keyValue));
}
Result<ConnectClusterMetrics> ret = this.collectConnectClusterMetricsFromKafka(connectClusterPhyId, metric);
@@ -209,8 +208,7 @@ public class ConnectClusterMetricServiceImpl extends BaseConnectorMetricService
try {
//2、获取jmx指标
String value = jmxConnectorWrap.getAttribute(new ObjectName(jmxInfo.getJmxObjectName()), jmxInfo.getJmxAttribute()).toString();
ConnectWorkerMetrics connectWorkerMetrics = ConnectWorkerMetrics.initWithMetric(connectClusterId, workerId, metric, Float.valueOf(value));
return Result.buildSuc(connectWorkerMetrics);
return Result.buildSuc(new ConnectWorkerMetrics(connectClusterId, workerId, metric, Float.valueOf(value)));
} catch (Exception e) {
LOGGER.error("method=getConnectWorkerMetricsByJMX||connectClusterId={}||workerId={}||metrics={}||jmx={}||msg={}",
connectClusterId, workerId, metric, jmxInfo.getJmxObjectName(), e.getClass().getName());
@@ -231,8 +229,8 @@ public class ConnectClusterMetricServiceImpl extends BaseConnectorMetricService
.collect(Collectors.toList());
}
protected List<MetricMultiLinesVO> metricMap2VO(Long connectClusterId,
Map<String/*metric*/, Map<Long, List<MetricPointVO>>> map){
private List<MetricMultiLinesVO> metricMap2VO(Long connectClusterId,
Map<String/*metric*/, Map<Long, List<MetricPointVO>>> map){
List<MetricMultiLinesVO> multiLinesVOS = new ArrayList<>();
if (map == null || map.isEmpty()) {
// 如果为空,则直接返回

View File

@@ -38,6 +38,14 @@ public class ConnectClusterServiceImpl implements ConnectClusterService {
@Autowired
private OpLogWrapService opLogWrapService;
@Override
public int deleteInDBByKafkaClusterId(Long clusterPhyId) {
LambdaQueryWrapper<ConnectClusterPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectClusterPO::getKafkaClusterPhyId, clusterPhyId);
return connectClusterDAO.deleteById(lambdaQueryWrapper);
}
@Override
public Long replaceAndReturnIdInDB(ConnectClusterMetadata metadata) {
ConnectClusterPO oldPO = this.getPOFromDB(metadata.getKafkaClusterPhyId(), metadata.getGroupName());

View File

@@ -4,49 +4,30 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluste
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorStateInfo;
import com.xiaojukeji.know.streaming.km.core.service.meta.MetaDataService;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum;
import java.util.List;
import java.util.Properties;
import java.util.Set;
/**
* 查看Connector
*/
public interface ConnectorService {
Result<KSConnectorInfo> createConnector(Long connectClusterId, String connectorName, Properties configs, String operator);
public interface ConnectorService extends MetaDataService<KSConnector> {
/**
* 获取所有的连接器名称列表
*/
Result<List<String>> listConnectorsFromCluster(Long connectClusterId);
Result<List<String>> listConnectorsFromCluster(ConnectCluster connectCluster);
/**
* 获取单个连接器信息
*/
Result<KSConnectorInfo> getConnectorInfoFromCluster(Long connectClusterId, String connectorName);
Result<List<String>> getConnectorTopicsFromCluster(Long connectClusterId, String connectorName);
Result<KSConnectorStateInfo> getConnectorStateInfoFromCluster(Long connectClusterId, String connectorName);
Result<KSConnector> getAllConnectorInfoFromCluster(Long connectClusterId, String connectorName);
Result<Void> resumeConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> restartConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> stopConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> deleteConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> updateConnectorConfig(Long connectClusterId, String connectorName, Properties configs, String operator);
void batchReplace(Long kafkaClusterPhyId, Long connectClusterId, List<KSConnector> connectorList, Set<String> allConnectorNameSet);
void addNewToDB(KSConnector connector);
Result<KSConnector> getConnectorFromKafka(Long connectClusterId, String connectorName);
List<ConnectorPO> listByKafkaClusterIdFromDB(Long kafkaClusterPhyId);
@@ -57,6 +38,4 @@ public interface ConnectorService {
ConnectorPO getConnectorFromDB(Long connectClusterId, String connectorName);
ConnectorTypeEnum getConnectorType(Long connectClusterId, String connectorName);
void completeMirrorMakerInfo(ConnectCluster connectCluster, List<KSConnector> connectorList);
}

View File

@@ -0,0 +1,26 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.connector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import java.util.Properties;
/**
* 查看Connector
*/
public interface OpConnectorService {
Result<KSConnectorInfo> createConnector(Long connectClusterId, String connectorName, Properties configs, String operator);
Result<Void> resumeConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> restartConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> stopConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> deleteConnector(Long connectClusterId, String connectorName, String operator);
Result<Void> updateConnectorConfig(Long connectClusterId, String connectorName, Properties configs, String operator);
void addNewToDB(KSConnector connector);
}

View File

@@ -18,6 +18,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.po.metrice.connect.Connector
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricLineVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO;
import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectStatusEnum;
import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException;
@@ -32,7 +33,7 @@ import com.xiaojukeji.know.streaming.km.core.service.connect.connector.Connector
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerService;
import com.xiaojukeji.know.streaming.km.core.service.health.state.HealthStateService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseConnectorMetricService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseConnectMetricService;
import com.xiaojukeji.know.streaming.km.persistence.connect.ConnectJMXClient;
import com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.connector.ConnectorMetricESDAO;
import org.springframework.beans.factory.annotation.Autowired;
@@ -52,7 +53,7 @@ import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultS
* @author didi
*/
@Service
public class ConnectorMetricServiceImpl extends BaseConnectorMetricService implements ConnectorMetricService {
public class ConnectorMetricServiceImpl extends BaseConnectMetricService implements ConnectorMetricService {
protected static final ILog LOGGER = LogFactory.getLog(ConnectorMetricServiceImpl.class);
public static final String CONNECTOR_METHOD_DO_NOTHING = "doNothing";
@@ -67,6 +68,8 @@ public class ConnectorMetricServiceImpl extends BaseConnectorMetricService imple
public static final String CONNECTOR_METHOD_GET_METRIC_HEALTH_SCORE = "getMetricHealthScore";
public static final String CONNECTOR_METHOD_GET_METRIC_RUNNING_STATUS = "getMetricRunningStatus";
@Autowired
private ConnectorMetricESDAO connectorMetricESDAO;
@@ -98,11 +101,12 @@ public class ConnectorMetricServiceImpl extends BaseConnectorMetricService imple
@Override
protected void initRegisterVCHandler() {
registerVCHandler(CONNECTOR_METHOD_DO_NOTHING, this::doNothing);
registerVCHandler(CONNECTOR_METHOD_GET_CONNECT_WORKER_METRIC_SUM, this::getConnectWorkerMetricSum);
registerVCHandler(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG, this::getConnectorTaskMetricsAvg);
registerVCHandler(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_MAX, this::getConnectorTaskMetricsMax);
registerVCHandler(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, this::getConnectorTaskMetricsSum);
registerVCHandler(CONNECTOR_METHOD_GET_METRIC_HEALTH_SCORE, this::getMetricHealthScore);
registerVCHandler(CONNECTOR_METHOD_GET_CONNECT_WORKER_METRIC_SUM, this::getConnectWorkerMetricSum);
registerVCHandler(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG, this::getConnectorTaskMetricsAvg);
registerVCHandler(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_MAX, this::getConnectorTaskMetricsMax);
registerVCHandler(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, this::getConnectorTaskMetricsSum);
registerVCHandler(CONNECTOR_METHOD_GET_METRIC_HEALTH_SCORE, this::getMetricHealthScore);
registerVCHandler(CONNECTOR_METHOD_GET_METRIC_RUNNING_STATUS, this::getMetricRunningStatus);
}
@Override
@@ -111,8 +115,7 @@ public class ConnectorMetricServiceImpl extends BaseConnectorMetricService imple
Float keyValue = CollectedMetricsLocalCache.getConnectorMetrics(connectorMetricKey);
if (null != keyValue) {
ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterPhyId, connectorName, metric, keyValue);
return Result.buildSuc(connectorMetrics);
return Result.buildSuc(new ConnectorMetrics(connectClusterPhyId, connectorName, metric, keyValue));
}
Result<ConnectorMetrics> ret = this.collectConnectClusterMetricsFromKafka(connectClusterPhyId, connectorName, metric);
@@ -216,6 +219,20 @@ public class ConnectorMetricServiceImpl extends BaseConnectorMetricService imple
return Result.buildSuc(metrics);
}
private Result<ConnectorMetrics> getMetricRunningStatus(VersionItemParam metricParam) {
ConnectorMetricParam param = (ConnectorMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
String connectorName = param.getConnectorName();
String metricName = param.getMetricName();
ConnectorPO connector = connectorService.getConnectorFromDB(connectClusterId, connectorName);
if (connector == null) {
return Result.buildSuc(new ConnectorMetrics(connectClusterId, connectorName, metricName, (float)ConnectStatusEnum.UNKNOWN.getStatus()));
}
return Result.buildSuc(new ConnectorMetrics(connectClusterId, connectorName, metricName, (float)ConnectStatusEnum.getByValue(connector.getState()).getStatus()));
}
private Result<ConnectorMetrics> getConnectWorkerMetricSum(VersionItemParam metricParam) {
ConnectorMetricParam param = (ConnectorMetricParam) metricParam;
Long connectClusterId = param.getConnectClusterId();
@@ -240,12 +257,16 @@ public class ConnectorMetricServiceImpl extends BaseConnectorMetricService imple
if (!isCollected) {
return Result.buildFailure(NOT_EXIST);
}
return Result.buildSuc(ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, sum));
return Result.buildSuc(new ConnectorMetrics(connectClusterId, connectorName, metric, sum));
}
//kafka.connect:type=connect-worker-metrics,connector="{connector}" 指标
private Result<ConnectorMetrics> getConnectorMetric(Long connectClusterId, String workerId, String connectorName, String metric, ConnectorTypeEnum connectorType) {
VersionConnectJmxInfo jmxInfo = getJMXInfo(connectClusterId, metric);
if (null == jmxInfo) {
return Result.buildFailure(VC_ITEM_JMX_NOT_EXIST);
}
if (jmxInfo.getType() != null) {
if (connectorType == null) {
@@ -257,9 +278,6 @@ public class ConnectorMetricServiceImpl extends BaseConnectorMetricService imple
}
}
if (null == jmxInfo) {
return Result.buildFailure(VC_ITEM_JMX_NOT_EXIST);
}
String jmxObjectName = String.format(jmxInfo.getJmxObjectName(), connectorName);
JmxConnectorWrap jmxConnectorWrap = connectJMXClient.getClientWithCheck(connectClusterId, workerId);
@@ -270,8 +288,7 @@ public class ConnectorMetricServiceImpl extends BaseConnectorMetricService imple
try {
//2、获取jmx指标
String value = jmxConnectorWrap.getAttribute(new ObjectName(jmxObjectName), jmxInfo.getJmxAttribute()).toString();
ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, Float.valueOf(value));
return Result.buildSuc(connectorMetrics);
return Result.buildSuc(new ConnectorMetrics(connectClusterId, connectorName, metric, Float.valueOf(value)));
} catch (InstanceNotFoundException e) {
// 忽略该错误该错误出现的原因是该指标在JMX中不存在
return Result.buildSuc(new ConnectorMetrics(connectClusterId, connectorName));
@@ -296,8 +313,7 @@ public class ConnectorMetricServiceImpl extends BaseConnectorMetricService imple
}
Float sum = ret.getData().stream().map(elem -> elem.getMetric(metric)).reduce(Float::sum).get();
ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, sum / ret.getData().size());
return Result.buildSuc(connectorMetrics);
return Result.buildSuc(new ConnectorMetrics(connectClusterId, connectorName, metric, sum / ret.getData().size()));
}
private Result<ConnectorMetrics> getConnectorTaskMetricsMax(VersionItemParam metricParam){
@@ -313,8 +329,7 @@ public class ConnectorMetricServiceImpl extends BaseConnectorMetricService imple
}
Float max = ret.getData().stream().max((a, b) -> a.getMetric(metric).compareTo(b.getMetric(metric))).get().getMetric(metric);
ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, max);
return Result.buildSuc(connectorMetrics);
return Result.buildSuc(new ConnectorMetrics(connectClusterId, connectorName, metric, max));
}
private Result<ConnectorMetrics> getConnectorTaskMetricsSum(VersionItemParam metricParam){
@@ -330,8 +345,7 @@ public class ConnectorMetricServiceImpl extends BaseConnectorMetricService imple
}
Float sum = ret.getData().stream().map(elem -> elem.getMetric(metric)).reduce(Float::sum).get();
ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, sum);
return Result.buildSuc(connectorMetrics);
return Result.buildSuc(new ConnectorMetrics(connectClusterId, connectorName, metric, sum));
}
@@ -358,6 +372,9 @@ public class ConnectorMetricServiceImpl extends BaseConnectorMetricService imple
private Result<ConnectorTaskMetrics> getConnectorTaskMetric(Long connectClusterId, String workerId, String connectorName, Integer taskId, String metric, ConnectorTypeEnum connectorType) {
VersionConnectJmxInfo jmxInfo = getJMXInfo(connectClusterId, metric);
if (null == jmxInfo) {
return Result.buildFailure(VC_ITEM_JMX_NOT_EXIST);
}
if (jmxInfo.getType() != null) {
if (connectorType == null) {
@@ -369,9 +386,6 @@ public class ConnectorMetricServiceImpl extends BaseConnectorMetricService imple
}
}
if (null == jmxInfo) {
return Result.buildFailure(VC_ITEM_JMX_NOT_EXIST);
}
String jmxObjectName=String.format(jmxInfo.getJmxObjectName(), connectorName, taskId);
JmxConnectorWrap jmxConnectorWrap = connectJMXClient.getClientWithCheck(connectClusterId, workerId);
@@ -382,8 +396,7 @@ public class ConnectorMetricServiceImpl extends BaseConnectorMetricService imple
try {
//2、获取jmx指标
String value = jmxConnectorWrap.getAttribute(new ObjectName(jmxObjectName), jmxInfo.getJmxAttribute()).toString();
ConnectorTaskMetrics connectorTaskMetrics = ConnectorTaskMetrics.initWithMetric(connectClusterId, connectorName, taskId, metric, Float.valueOf(value));
return Result.buildSuc(connectorTaskMetrics);
return Result.buildSuc(new ConnectorTaskMetrics(connectClusterId, connectorName, taskId, metric, Float.valueOf(value)));
} catch (Exception e) {
LOGGER.error("method=getConnectorTaskMetric||connectClusterId={}||workerId={}||connectorName={}||taskId={}||metrics={}||jmx={}||msg={}",
connectClusterId, workerId, connectorName, taskId, metric, jmxObjectName, e.getClass().getName());

View File

@@ -3,7 +3,6 @@ package com.xiaojukeji.know.streaming.km.core.service.connect.connector.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.didiglobal.logi.security.common.dto.oplog.OplogDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorInfo;
@@ -13,19 +12,14 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
import com.xiaojukeji.know.streaming.km.common.component.RestTool;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant;
import com.xiaojukeji.know.streaming.km.common.converter.ConnectConverter;
import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.ModuleEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.utils.BackoffUtils;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.Triple;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.ConnectorDAO;
import org.apache.kafka.connect.runtime.rest.entities.ActiveTopicsInfo;
import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
@@ -34,14 +28,9 @@ import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import java.util.*;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant.MIRROR_MAKER_SOURCE_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME;
import static com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant.MIRROR_MAKER_TARGET_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.SERVICE_OP_CONNECT_CONNECTOR;
@Service
public class ConnectorServiceImpl extends BaseVersionControlService implements ConnectorService {
public class ConnectorServiceImpl implements ConnectorService {
private static final ILog LOGGER = LogFactory.getLog(ConnectorServiceImpl.class);
@Autowired
@@ -53,79 +42,14 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
@Autowired
private ConnectClusterService connectClusterService;
@Autowired
private OpLogWrapService opLogWrapService;
private static final String LIST_CONNECTORS_URI = "/connectors";
private static final String GET_CONNECTOR_INFO_PREFIX_URI = "/connectors";
private static final String GET_CONNECTOR_TOPICS_URI = "/connectors/%s/topics";
private static final String GET_CONNECTOR_STATUS_URI = "/connectors/%s/status";
private static final String CREATE_CONNECTOR_URI = "/connectors";
private static final String RESUME_CONNECTOR_URI = "/connectors/%s/resume";
private static final String RESTART_CONNECTOR_URI = "/connectors/%s/restart";
private static final String PAUSE_CONNECTOR_URI = "/connectors/%s/pause";
private static final String DELETE_CONNECTOR_URI = "/connectors/%s";
private static final String UPDATE_CONNECTOR_CONFIG_URI = "/connectors/%s/config";
@Override
protected VersionItemTypeEnum getVersionItemType() {
return SERVICE_OP_CONNECT_CONNECTOR;
}
@Override
public Result<KSConnectorInfo> createConnector(Long connectClusterId, String connectorName, Properties configs, String operator) {
public Result<List<String>> listConnectorsFromCluster(ConnectCluster connectCluster) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
// 构造参数
Properties props = new Properties();
props.put(KafkaConnectConstant.MIRROR_MAKER_NAME_FIELD_NAME, connectorName);
props.put("config", configs);
ConnectorInfo connectorInfo = restTool.postObjectWithJsonContent(
connectCluster.getSuitableRequestUrl() + CREATE_CONNECTOR_URI,
props,
ConnectorInfo.class
);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.ADD.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
ConvertUtil.obj2Json(configs)
));
KSConnectorInfo connector = new KSConnectorInfo();
connector.setConnectClusterId(connectClusterId);
connector.setConfig(connectorInfo.config());
connector.setName(connectorInfo.name());
connector.setTasks(connectorInfo.tasks());
connector.setType(connectorInfo.type());
return Result.buildSuc(connector);
} catch (Exception e) {
LOGGER.error(
"method=createConnector||connectClusterId={}||connectorName={}||configs={}||operator={}||errMsg=exception",
connectClusterId, connectorName, configs, operator, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<List<String>> listConnectorsFromCluster(Long connectClusterId) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
List<String> nameList = restTool.getArrayObjectWithJsonContent(
connectCluster.getSuitableRequestUrl() + LIST_CONNECTORS_URI,
new HashMap<>(),
@@ -135,8 +59,8 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
return Result.buildSuc(nameList);
} catch (Exception e) {
LOGGER.error(
"method=listConnectorsFromCluster||connectClusterId={}||errMsg=exception",
connectClusterId, e
"method=listConnectorsFromCluster||connectClusterId={}||connectClusterSuitableUrl={}||errMsg=exception",
connectCluster.getId(), connectCluster.getSuitableRequestUrl(), e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
@@ -153,16 +77,6 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
return this.getConnectorInfoFromCluster(connectCluster, connectorName);
}
@Override
public Result<List<String>> getConnectorTopicsFromCluster(Long connectClusterId, String connectorName) {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
return this.getConnectorTopicsFromCluster(connectCluster, connectorName);
}
@Override
public Result<KSConnectorStateInfo> getConnectorStateInfoFromCluster(Long connectClusterId, String connectorName) {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
@@ -174,270 +88,26 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
}
@Override
public Result<KSConnector> getAllConnectorInfoFromCluster(Long connectClusterId, String connectorName) {
public Result<KSConnector> getConnectorFromKafka(Long connectClusterId, String connectorName) {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
Result<KSConnectorInfo> connectorResult = this.getConnectorInfoFromCluster(connectCluster, connectorName);
if (connectorResult.failed()) {
LOGGER.error(
"method=getAllConnectorInfoFromCluster||connectClusterId={}||connectorName={}||result={}",
connectClusterId, connectorName, connectorResult
);
return Result.buildFromIgnoreData(connectorResult);
}
Result<List<String>> topicNameListResult = this.getConnectorTopicsFromCluster(connectCluster, connectorName);
if (topicNameListResult.failed()) {
LOGGER.error(
"method=getAllConnectorInfoFromCluster||connectClusterId={}||connectorName={}||result={}",
connectClusterId, connectorName, connectorResult
);
}
Result<KSConnectorStateInfo> stateInfoResult = this.getConnectorStateInfoFromCluster(connectCluster, connectorName);
if (stateInfoResult.failed()) {
LOGGER.error(
"method=getAllConnectorInfoFromCluster||connectClusterId={}||connectorName={}||result={}",
connectClusterId, connectorName, connectorResult
);
Result<Triple<KSConnectorInfo, List<String>, KSConnectorStateInfo>> fullInfoResult = this.getConnectorFullInfoFromKafka(connectCluster, connectorName);
if (fullInfoResult.failed()) {
return Result.buildFromIgnoreData(fullInfoResult);
}
return Result.buildSuc(ConnectConverter.convert2KSConnector(
connectCluster.getKafkaClusterPhyId(),
connectCluster.getId(),
connectorResult.getData(),
stateInfoResult.getData(),
topicNameListResult.getData()
fullInfoResult.getData().v1(),
fullInfoResult.getData().v3(),
fullInfoResult.getData().v2()
));
}
@Override
public Result<Void> resumeConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.putJsonForObject(
connectCluster.getSuitableRequestUrl() + String.format(RESUME_CONNECTOR_URI, connectorName),
new HashMap<>(),
String.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.ENABLE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"class=ConnectorServiceImpl||method=resumeConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> restartConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.postObjectWithJsonContent(
connectCluster.getSuitableRequestUrl() + String.format(RESTART_CONNECTOR_URI, connectorName),
new HashMap<>(),
String.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.RESTART.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=restartConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> stopConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.putJsonForObject(
connectCluster.getSuitableRequestUrl() + String.format(PAUSE_CONNECTOR_URI, connectorName),
new HashMap<>(),
String.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.DISABLE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=stopConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> deleteConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.deleteWithParamsAndHeader(
connectCluster.getSuitableRequestUrl() + String.format(DELETE_CONNECTOR_URI, connectorName),
new HashMap<>(),
new HashMap<>(),
String.class
);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.DELETE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
this.deleteConnectorInDB(connectClusterId, connectorName);
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=deleteConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> updateConnectorConfig(Long connectClusterId, String connectorName, Properties configs, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
ConnectorInfo connectorInfo = restTool.putJsonForObject(
connectCluster.getSuitableRequestUrl() + String.format(UPDATE_CONNECTOR_CONFIG_URI, connectorName),
configs,
org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.EDIT.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
ConvertUtil.obj2Json(configs)
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=updateConnectorConfig||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public void batchReplace(Long kafkaClusterPhyId, Long connectClusterId, List<KSConnector> connectorList, Set<String> allConnectorNameSet) {
List<ConnectorPO> poList = this.listByConnectClusterIdFromDB(connectClusterId);
Map<String, ConnectorPO> oldPOMap = new HashMap<>();
poList.forEach(elem -> oldPOMap.put(elem.getConnectorName(), elem));
for (KSConnector connector: connectorList) {
try {
ConnectorPO oldPO = oldPOMap.remove(connector.getConnectorName());
if (oldPO == null) {
oldPO = ConvertUtil.obj2Obj(connector, ConnectorPO.class);
connectorDAO.insert(oldPO);
} else {
ConnectorPO newPO = ConvertUtil.obj2Obj(connector, ConnectorPO.class);
newPO.setId(oldPO.getId());
connectorDAO.updateById(newPO);
}
} catch (DuplicateKeyException dke) {
// ignore
}
}
try {
oldPOMap.values().forEach(elem -> {
if (allConnectorNameSet.contains(elem.getConnectorName())) {
// 当前connector还存在
return;
}
// 当前connector不存在了则进行删除
connectorDAO.deleteById(elem.getId());
});
} catch (Exception e) {
// ignore
}
}
@Override
public void addNewToDB(KSConnector connector) {
try {
connectorDAO.insert(ConvertUtil.obj2Obj(connector, ConnectorPO.class));
} catch (DuplicateKeyException dke) {
// ignore
}
}
@Override
public List<ConnectorPO> listByKafkaClusterIdFromDB(Long kafkaClusterPhyId) {
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
@@ -482,53 +152,98 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
}
@Override
public void completeMirrorMakerInfo(ConnectCluster connectCluster, List<KSConnector> connectorList) {
List<KSConnector> sourceConnectorList = connectorList.stream().filter(elem -> elem.getConnectorClassName().equals(KafkaConnectConstant.MIRROR_MAKER_SOURCE_CONNECTOR_TYPE)).collect(Collectors.toList());
if (sourceConnectorList.isEmpty()) {
return;
public Result<Tuple<Set<String>, List<KSConnector>>> getDataFromKafka(ConnectCluster connectCluster) {
Result<List<String>> nameListResult = this.listConnectorsFromCluster(connectCluster);
if (nameListResult.failed()) {
return Result.buildFromIgnoreData(nameListResult);
}
List<KSConnector> heartBeatConnectorList = connectorList.stream().filter(elem -> elem.getConnectorClassName().equals(KafkaConnectConstant.MIRROR_MAKER_HEARTBEAT_CONNECTOR_TYPE)).collect(Collectors.toList());
List<KSConnector> checkpointConnectorList = connectorList.stream().filter(elem -> elem.getConnectorClassName().equals(KafkaConnectConstant.MIRROR_MAKER_CHECKPOINT_CONNECTOR_TYPE)).collect(Collectors.toList());
Map<String, String> heartbeatMap = this.buildMirrorMakerMap(connectCluster, heartBeatConnectorList);
Map<String, String> checkpointMap = this.buildMirrorMakerMap(connectCluster, checkpointConnectorList);
for (KSConnector sourceConnector : sourceConnectorList) {
Result<KSConnectorInfo> ret = this.getConnectorInfoFromCluster(connectCluster, sourceConnector.getConnectorName());
if (!ret.hasData()) {
LOGGER.error(
"method=completeMirrorMakerInfo||connectClusterId={}||connectorName={}||get connectorInfo fail!",
connectCluster.getId(), sourceConnector.getConnectorName()
);
continue;
}
KSConnectorInfo ksConnectorInfo = ret.getData();
String targetServers = ksConnectorInfo.getConfig().get(MIRROR_MAKER_TARGET_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME);
String sourceServers = ksConnectorInfo.getConfig().get(MIRROR_MAKER_SOURCE_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME);
if (ValidateUtils.anyBlank(targetServers, sourceServers)) {
// 逐个获取
List<Triple<KSConnectorInfo, List<String>, KSConnectorStateInfo>> connectorFullInfoList = new ArrayList<>();
for (String connectorName: nameListResult.getData()) {
Result<Triple<KSConnectorInfo, List<String>, KSConnectorStateInfo>> ksConnectorResult = this.getConnectorFullInfoFromKafka(connectCluster, connectorName);
if (ksConnectorResult.failed()) {
continue;
}
String[] targetBrokerList = getBrokerList(targetServers);
String[] sourceBrokerList = getBrokerList(sourceServers);
sourceConnector.setHeartbeatConnectorName(this.findBindConnector(targetBrokerList, sourceBrokerList, heartbeatMap));
sourceConnector.setCheckpointConnectorName(this.findBindConnector(targetBrokerList, sourceBrokerList, checkpointMap));
connectorFullInfoList.add(ksConnectorResult.getData());
}
// 返回结果
return Result.buildSuc(new Tuple<>(
new HashSet<>(nameListResult.getData()),
ConnectConverter.convertAndSupplyMirrorMakerInfo(connectCluster, connectorFullInfoList)) // 转换并补充mm2相关信息
);
}
/**************************************************** private method ****************************************************/
private int deleteConnectorInDB(Long connectClusterId, String connectorName) {
@Override
public void writeToDB(Long connectClusterId, Set<String> fullNameSet, List<KSConnector> dataList) {
List<ConnectorPO> poList = this.listByConnectClusterIdFromDB(connectClusterId);
Map<String, ConnectorPO> oldPOMap = new HashMap<>();
poList.forEach(elem -> oldPOMap.put(elem.getConnectorName(), elem));
for (KSConnector connector: dataList) {
try {
ConnectorPO oldPO = oldPOMap.remove(connector.getConnectorName());
if (oldPO == null) {
oldPO = ConvertUtil.obj2Obj(connector, ConnectorPO.class);
connectorDAO.insert(oldPO);
continue;
}
ConnectorPO newPO = ConvertUtil.obj2Obj(connector, ConnectorPO.class);
newPO.setId(oldPO.getId());
if (!ValidateUtils.isBlank(oldPO.getCheckpointConnectorName())
&& ValidateUtils.isBlank(newPO.getCheckpointConnectorName())
&& fullNameSet.contains(oldPO.getCheckpointConnectorName())) {
// 新的po里面没有checkpoint的信息但是db中的数据显示有且集群中有该connector则保留该checkpoint数据
newPO.setCheckpointConnectorName(oldPO.getCheckpointConnectorName());
}
if (!ValidateUtils.isBlank(oldPO.getHeartbeatConnectorName())
&& ValidateUtils.isBlank(newPO.getHeartbeatConnectorName())
&& fullNameSet.contains(oldPO.getHeartbeatConnectorName())) {
// 新的po里面没有checkpoint的信息但是db中的数据显示有且集群中有该connector则保留该checkpoint数据
newPO.setHeartbeatConnectorName(oldPO.getHeartbeatConnectorName());
}
connectorDAO.updateById(newPO);
} catch (DuplicateKeyException dke) {
// ignore
} catch (Exception e) {
LOGGER.error(
"method=writeToDB||connectClusterId={}||connectorName={}||errMsg=exception",
connector.getConnectClusterId(), connector.getConnectorName(), e
);
}
}
try {
oldPOMap.values().forEach(elem -> {
if (fullNameSet.contains(elem.getConnectorName())) {
// 当前connector还存在
return;
}
// 当前connector不存在了则进行删除
connectorDAO.deleteById(elem.getId());
});
} catch (Exception e) {
// ignore
}
}
@Override
public int deleteInDBByKafkaClusterId(Long clusterPhyId) {
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId);
lambdaQueryWrapper.eq(ConnectorPO::getConnectorName, connectorName);
lambdaQueryWrapper.eq(ConnectorPO::getKafkaClusterPhyId, clusterPhyId);
return connectorDAO.delete(lambdaQueryWrapper);
}
/**************************************************** private method ****************************************************/
private Result<KSConnectorInfo> getConnectorInfoFromCluster(ConnectCluster connectCluster, String connectorName) {
try {
ConnectorInfo connectorInfo = restTool.getForObject(
@@ -594,90 +309,37 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C
}
}
private void updateStatus(ConnectCluster connectCluster, Long connectClusterId, String connectorName) {
try {
// 延迟3秒
BackoffUtils.backoff(2000);
Result<KSConnectorStateInfo> stateInfoResult = this.getConnectorStateInfoFromCluster(connectCluster, connectorName);
if (stateInfoResult.failed()) {
return;
}
ConnectorPO po = new ConnectorPO();
po.setConnectClusterId(connectClusterId);
po.setConnectorName(connectorName);
po.setState(stateInfoResult.getData().getConnector().getState());
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId);
lambdaQueryWrapper.eq(ConnectorPO::getConnectorName, connectorName);
connectorDAO.update(po, lambdaQueryWrapper);
} catch (Exception e) {
private Result<Triple<KSConnectorInfo, List<String>, KSConnectorStateInfo>> getConnectorFullInfoFromKafka(ConnectCluster connectCluster, String connectorName) {
Result<KSConnectorInfo> connectorResult = this.getConnectorInfoFromCluster(connectCluster, connectorName);
if (connectorResult.failed()) {
LOGGER.error(
"method=updateStatus||connectClusterId={}||connectorName={}||errMsg=exception",
connectClusterId, connectorName, e
"method=getConnectorAllInfoFromKafka||connectClusterId={}||connectClusterSuitableUrl={}||result={}||errMsg=get connectors info from cluster failed",
connectCluster.getId(), connectCluster.getSuitableRequestUrl(), connectorResult
);
return Result.buildFromIgnoreData(connectorResult);
}
Result<List<String>> topicNameListResult = this.getConnectorTopicsFromCluster(connectCluster, connectorName);
if (topicNameListResult.failed()) {
LOGGER.error(
"method=getConnectorAllInfoFromKafka||connectClusterId={}||connectClusterSuitableUrl={}||result={}||errMsg=get connectors topics from cluster failed",
connectCluster.getId(), connectCluster.getSuitableRequestUrl(), topicNameListResult
);
}
}
private Map<String, String> buildMirrorMakerMap(ConnectCluster connectCluster, List<KSConnector> ksConnectorList) {
Map<String, String> bindMap = new HashMap<>();
for (KSConnector ksConnector : ksConnectorList) {
Result<KSConnectorInfo> ret = this.getConnectorInfoFromCluster(connectCluster, ksConnector.getConnectorName());
if (!ret.hasData()) {
LOGGER.error(
"method=buildMirrorMakerMap||connectClusterId={}||connectorName={}||get connectorInfo fail!",
connectCluster.getId(), ksConnector.getConnectorName()
);
continue;
}
KSConnectorInfo ksConnectorInfo = ret.getData();
String targetServers = ksConnectorInfo.getConfig().get(MIRROR_MAKER_TARGET_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME);
String sourceServers = ksConnectorInfo.getConfig().get(MIRROR_MAKER_SOURCE_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME);
if (ValidateUtils.anyBlank(targetServers, sourceServers)) {
continue;
}
String[] targetBrokerList = getBrokerList(targetServers);
String[] sourceBrokerList = getBrokerList(sourceServers);
for (String targetBroker : targetBrokerList) {
for (String sourceBroker : sourceBrokerList) {
bindMap.put(targetBroker + "@" + sourceBroker, ksConnector.getConnectorName());
}
}
Result<KSConnectorStateInfo> stateInfoResult = this.getConnectorStateInfoFromCluster(connectCluster, connectorName);
if (stateInfoResult.failed()) {
LOGGER.error(
"method=getConnectorAllInfoFromKafka||connectClusterId={}||connectClusterSuitableUrl={}||result={}||errMsg=get connectors state from cluster failed",
connectCluster.getId(), connectCluster.getSuitableRequestUrl(), stateInfoResult
);
}
return bindMap;
}
private String findBindConnector(String[] targetBrokerList, String[] sourceBrokerList, Map<String, String> connectorBindMap) {
for (String targetBroker : targetBrokerList) {
for (String sourceBroker : sourceBrokerList) {
String connectorName = connectorBindMap.get(targetBroker + "@" + sourceBroker);
if (connectorName != null) {
return connectorName;
}
}
}
return "";
}
private String[] getBrokerList(String str) {
if (ValidateUtils.isBlank(str)) {
return new String[0];
}
if (str.contains(";")) {
return str.split(";");
}
if (str.contains(",")) {
return str.split(",");
}
return new String[]{str};
return Result.buildSuc(new Triple<>(
connectorResult.getData(),
topicNameListResult.getData(),
stateInfoResult.getData()
));
}
}

View File

@@ -0,0 +1,352 @@
package com.xiaojukeji.know.streaming.km.core.service.connect.connector.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.didiglobal.logi.security.common.dto.oplog.OplogDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorStateInfo;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO;
import com.xiaojukeji.know.streaming.km.common.component.RestTool;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.ModuleEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum;
import com.xiaojukeji.know.streaming.km.common.utils.BackoffUtils;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.OpConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService;
import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.ConnectorDAO;
import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import java.util.*;
import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.SERVICE_OP_CONNECT_CONNECTOR;
@Service
public class OpConnectorServiceImpl extends BaseVersionControlService implements OpConnectorService {
private static final ILog LOGGER = LogFactory.getLog(OpConnectorServiceImpl.class);
@Autowired
private RestTool restTool;
@Autowired
private ConnectorDAO connectorDAO;
@Autowired
private ConnectClusterService connectClusterService;
@Autowired
private OpLogWrapService opLogWrapService;
private static final String GET_CONNECTOR_STATUS_URI = "/connectors/%s/status";
private static final String CREATE_CONNECTOR_URI = "/connectors";
private static final String RESUME_CONNECTOR_URI = "/connectors/%s/resume";
private static final String RESTART_CONNECTOR_URI = "/connectors/%s/restart";
private static final String PAUSE_CONNECTOR_URI = "/connectors/%s/pause";
private static final String DELETE_CONNECTOR_URI = "/connectors/%s";
private static final String UPDATE_CONNECTOR_CONFIG_URI = "/connectors/%s/config";
@Override
protected VersionItemTypeEnum getVersionItemType() {
return SERVICE_OP_CONNECT_CONNECTOR;
}
@Override
public Result<KSConnectorInfo> createConnector(Long connectClusterId, String connectorName, Properties configs, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
// 构造参数
Properties props = new Properties();
props.put(KafkaConnectConstant.MIRROR_MAKER_NAME_FIELD_NAME, connectorName);
props.put("config", configs);
ConnectorInfo connectorInfo = restTool.postObjectWithJsonContent(
connectCluster.getSuitableRequestUrl() + CREATE_CONNECTOR_URI,
props,
ConnectorInfo.class
);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.ADD.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
ConvertUtil.obj2Json(configs)
));
KSConnectorInfo connector = new KSConnectorInfo();
connector.setConnectClusterId(connectClusterId);
connector.setConfig(connectorInfo.config());
connector.setName(connectorInfo.name());
connector.setTasks(connectorInfo.tasks());
connector.setType(connectorInfo.type());
return Result.buildSuc(connector);
} catch (Exception e) {
LOGGER.error(
"method=createConnector||connectClusterId={}||connectorName={}||configs={}||operator={}||errMsg=exception",
connectClusterId, connectorName, configs, operator, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> resumeConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.putJsonForObject(
connectCluster.getSuitableRequestUrl() + String.format(RESUME_CONNECTOR_URI, connectorName),
new HashMap<>(),
String.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.ENABLE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"class=ConnectorServiceImpl||method=resumeConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> restartConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.postObjectWithJsonContent(
connectCluster.getSuitableRequestUrl() + String.format(RESTART_CONNECTOR_URI, connectorName),
new HashMap<>(),
String.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.RESTART.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=restartConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> stopConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.putJsonForObject(
connectCluster.getSuitableRequestUrl() + String.format(PAUSE_CONNECTOR_URI, connectorName),
new HashMap<>(),
String.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.DISABLE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=stopConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> deleteConnector(Long connectClusterId, String connectorName, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
restTool.deleteWithParamsAndHeader(
connectCluster.getSuitableRequestUrl() + String.format(DELETE_CONNECTOR_URI, connectorName),
new HashMap<>(),
new HashMap<>(),
String.class
);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.DELETE.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
""
));
this.deleteConnectorInDB(connectClusterId, connectorName);
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=deleteConnector||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public Result<Void> updateConnectorConfig(Long connectClusterId, String connectorName, Properties configs, String operator) {
try {
ConnectCluster connectCluster = connectClusterService.getById(connectClusterId);
if (ValidateUtils.isNull(connectCluster)) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId));
}
ConnectorInfo connectorInfo = restTool.putJsonForObject(
connectCluster.getSuitableRequestUrl() + String.format(UPDATE_CONNECTOR_CONFIG_URI, connectorName),
configs,
ConnectorInfo.class
);
this.updateStatus(connectCluster, connectClusterId, connectorName);
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.EDIT.getDesc(),
ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(),
MsgConstant.getConnectorBizStr(connectClusterId, connectorName),
ConvertUtil.obj2Json(configs)
));
return Result.buildSuc();
} catch (Exception e) {
LOGGER.error(
"method=updateConnectorConfig||connectClusterId={}||errMsg=exception",
connectClusterId, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
@Override
public void addNewToDB(KSConnector connector) {
try {
connectorDAO.insert(ConvertUtil.obj2Obj(connector, ConnectorPO.class));
} catch (DuplicateKeyException dke) {
// ignore
}
}
/**************************************************** private method ****************************************************/
private int deleteConnectorInDB(Long connectClusterId, String connectorName) {
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId);
lambdaQueryWrapper.eq(ConnectorPO::getConnectorName, connectorName);
return connectorDAO.delete(lambdaQueryWrapper);
}
private Result<KSConnectorStateInfo> getConnectorStateInfoFromCluster(ConnectCluster connectCluster, String connectorName) {
try {
KSConnectorStateInfo connectorStateInfo = restTool.getForObject(
connectCluster.getSuitableRequestUrl() + String.format(GET_CONNECTOR_STATUS_URI, connectorName),
new HashMap<>(),
KSConnectorStateInfo.class
);
return Result.buildSuc(connectorStateInfo);
} catch (Exception e) {
LOGGER.error(
"method=getConnectorStateInfoFromCluster||connectClusterId={}||connectorName={}||errMsg=exception",
connectCluster.getId(), connectorName, e
);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage());
}
}
private void updateStatus(ConnectCluster connectCluster, Long connectClusterId, String connectorName) {
try {
// 延迟3秒
BackoffUtils.backoff(2000);
Result<KSConnectorStateInfo> stateInfoResult = this.getConnectorStateInfoFromCluster(connectCluster, connectorName);
if (stateInfoResult.failed()) {
return;
}
ConnectorPO po = new ConnectorPO();
po.setConnectClusterId(connectClusterId);
po.setConnectorName(connectorName);
po.setState(stateInfoResult.getData().getConnector().getState());
LambdaQueryWrapper<ConnectorPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId);
lambdaQueryWrapper.eq(ConnectorPO::getConnectorName, connectorName);
connectorDAO.update(po, lambdaQueryWrapper);
} catch (Exception e) {
LOGGER.error(
"method=updateStatus||connectClusterId={}||connectorName={}||errMsg=exception",
connectClusterId, connectorName, e
);
}
}
}

View File

@@ -27,7 +27,7 @@ import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.mm2.MirrorMakerMetricService;
import com.xiaojukeji.know.streaming.km.core.service.health.state.HealthStateService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseConnectorMetricService;
import com.xiaojukeji.know.streaming.km.core.service.version.BaseConnectMetricService;
import com.xiaojukeji.know.streaming.km.persistence.connect.ConnectJMXClient;
import org.springframework.beans.factory.annotation.Autowired;
import com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.mm2.MirrorMakerMetricESDAO;
@@ -49,7 +49,7 @@ import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemT
* @date 2022/12/15
*/
@Service
public class MirrorMakerMetricServiceImpl extends BaseConnectorMetricService implements MirrorMakerMetricService {
public class MirrorMakerMetricServiceImpl extends BaseConnectMetricService implements MirrorMakerMetricService {
protected static final ILog LOGGER = LogFactory.getLog(MirrorMakerMetricServiceImpl.class);
public static final String MIRROR_MAKER_METHOD_DO_NOTHING = "doNothing";
@@ -190,7 +190,7 @@ public class MirrorMakerMetricServiceImpl extends BaseConnectorMetricService imp
multiLinesVO.setMetricLines(metricLines);
multiLinesVOS.add(multiLinesVO);
}catch (Exception e){
} catch (Exception e){
LOGGER.error("method=metricMap2VO||connectClusterId={}||msg=exception!", connectClusterId, e);
}
}

View File

@@ -78,6 +78,7 @@ public class GroupServiceImpl extends BaseKafkaVersionControlService implements
}
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers());
props.put(AdminClientConfig.CLIENT_ID_CONFIG, String.format("KSPartialAdminClient||clusterPhyId=%d||timestamp=%d", clusterPhy.getId(), System.currentTimeMillis()));
adminClient = KSPartialKafkaAdminClient.create(props);
KSListGroupsResult listConsumerGroupsResult = adminClient.listConsumerGroups(
@@ -178,6 +179,7 @@ public class GroupServiceImpl extends BaseKafkaVersionControlService implements
}
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers());
props.put(AdminClientConfig.CLIENT_ID_CONFIG, String.format("KSPartialAdminClient||clusterPhyId=%d||timestamp=%d", clusterPhy.getId(), System.currentTimeMillis()));
adminClient = KSPartialKafkaAdminClient.create(props);

View File

@@ -0,0 +1,51 @@
package com.xiaojukeji.know.streaming.km.core.service.meta;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
* Kafka元信息服务接口
*/
public interface MetaDataService<T> {
/**
* 从Kafka中获取数据
* @param connectCluster connect集群
* @return 全部资源列表, 成功的资源列表
*/
default Result<Tuple<Set<String>, List<T>>> getDataFromKafka(ConnectCluster connectCluster) { return Result.buildSuc(new Tuple<>(new HashSet<>(), new ArrayList<>())); }
/**
* 从Kafka中获取数据
* @param clusterPhy kafka集群
* @return 全部资源集合, 成功的资源列表
*/
default Result<List<T>> getDataFromKafka(ClusterPhy clusterPhy) { return Result.buildSuc(new ArrayList<>()); }
/**
* 元信息同步至DB中
* @param clusterId 集群ID
* @param fullResSet 全部资源列表
* @param dataList 成功的资源列表
*/
default void writeToDB(Long clusterId, Set<String> fullResSet, List<T> dataList) {}
/**
* 元信息同步至DB中
* @param clusterId 集群ID
* @param dataList 成功的资源列表
*/
default void writeToDB(Long clusterId, List<T> dataList) {}
/**
* 依据kafka集群ID删除数据
* @param clusterPhyId kafka集群ID
*/
int deleteInDBByKafkaClusterId(Long clusterPhyId);
}

View File

@@ -9,7 +9,7 @@ import java.util.stream.Collectors;
* @author wyb
* @date 2022/11/9
*/
public abstract class BaseConnectorMetricService extends BaseConnectorVersionControlService{
public abstract class BaseConnectMetricService extends BaseConnectVersionControlService {
private List<String> metricNames = new ArrayList<>();
@PostConstruct

View File

@@ -14,7 +14,7 @@ import javax.annotation.Nullable;
* @author wyb
* @date 2022/11/8
*/
public abstract class BaseConnectorVersionControlService extends BaseVersionControlService {
public abstract class BaseConnectVersionControlService extends BaseVersionControlService {
@Autowired
ConnectClusterService connectClusterService;

View File

@@ -24,6 +24,8 @@ public class ConnectorMetricVersionItems extends BaseMetricVersionMetric {
public static final String CONNECTOR_METRIC_HEALTH_STATE = "HealthState";
public static final String CONNECTOR_METRIC_RUNNING_STATUS = "RunningStatus";
public static final String CONNECTOR_METRIC_CONNECTOR_TOTAL_TASK_COUNT = "ConnectorTotalTaskCount";
public static final String CONNECTOR_METRIC_HEALTH_CHECK_PASSED = "HealthCheckPassed";
@@ -128,6 +130,9 @@ public class ConnectorMetricVersionItems extends BaseMetricVersionMetric {
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_HEALTH_STATE).unit("0:好 1:中 2:差 3:宕机").desc("健康状态(0:好 1:中 2:差 3:宕机)").category(CATEGORY_HEALTH)
.extendMethod(CONNECTOR_METHOD_GET_METRIC_HEALTH_SCORE));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_RUNNING_STATUS).unit("0:UNASSIGNED 1:RUNNING 2:PAUSED 3:FAILED 4:DESTROYED -1:UNKNOWN").desc("运行状态(0:UNASSIGNED 1:RUNNING 2:PAUSED 3:FAILED 4:DESTROYED -1:UNKNOWN)").category(CATEGORY_PERFORMANCE)
.extendMethod(CONNECTOR_METHOD_GET_METRIC_RUNNING_STATUS));
items.add(buildAllVersionsItem()
.name(CONNECTOR_METRIC_HEALTH_CHECK_PASSED).unit("").desc("健康项检查通过数").category(CATEGORY_HEALTH)
.extendMethod(CONNECTOR_METHOD_GET_METRIC_HEALTH_SCORE));

View File

@@ -1,19 +1,11 @@
package com.xiaojukeji.know.streaming.km.core.service.zookeeper;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.ZKConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.ZookeeperInfo;
import com.xiaojukeji.know.streaming.km.core.service.meta.MetaDataService;
import java.util.List;
public interface ZookeeperService {
/**
* 从ZK集群中获取ZK信息
*/
Result<List<ZookeeperInfo>> listFromZookeeper(Long clusterPhyId, String zookeeperAddress, ZKConfig zkConfig);
void batchReplaceDataInDB(Long clusterPhyId, List<ZookeeperInfo> infoList);
public interface ZookeeperService extends MetaDataService<ZookeeperInfo> {
List<ZookeeperInfo> listFromDBByCluster(Long clusterPhyId);
/**

View File

@@ -3,6 +3,7 @@ package com.xiaojukeji.know.streaming.km.core.service.zookeeper.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.ZKConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
@@ -22,10 +23,8 @@ import com.xiaojukeji.know.streaming.km.persistence.mysql.zookeeper.ZookeeperDAO
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.*;
import java.util.stream.Collectors;
@Service
public class ZookeeperServiceImpl implements ZookeeperService {
@@ -35,14 +34,14 @@ public class ZookeeperServiceImpl implements ZookeeperService {
private ZookeeperDAO zookeeperDAO;
@Override
public Result<List<ZookeeperInfo>> listFromZookeeper(Long clusterPhyId, String zookeeperAddress, ZKConfig zkConfig) {
public Result<List<ZookeeperInfo>> getDataFromKafka(ClusterPhy clusterPhy) {
List<Tuple<String, Integer>> addressList = null;
try {
addressList = ZookeeperUtils.connectStringParser(zookeeperAddress);
addressList = ZookeeperUtils.connectStringParser(clusterPhy.getZookeeper());
} catch (Exception e) {
LOGGER.error(
"method=listFromZookeeperCluster||clusterPhyId={}||zookeeperAddress={}||errMsg=exception!",
clusterPhyId, zookeeperAddress, e
"method=getDataFromKafka||clusterPhyId={}||zookeeperAddress={}||errMsg=exception!",
clusterPhy.getId(), clusterPhy.getZookeeper(), e
);
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, e.getMessage());
@@ -51,24 +50,25 @@ public class ZookeeperServiceImpl implements ZookeeperService {
List<ZookeeperInfo> aliveZKList = new ArrayList<>();
for (Tuple<String, Integer> hostPort: addressList) {
aliveZKList.add(this.getFromZookeeperCluster(
clusterPhyId,
clusterPhy.getId(),
hostPort.getV1(),
hostPort.getV2(),
zkConfig
ConvertUtil.str2ObjByJson(clusterPhy.getZkProperties(), ZKConfig.class)
));
}
return Result.buildSuc(aliveZKList);
}
@Override
public void batchReplaceDataInDB(Long clusterPhyId, List<ZookeeperInfo> infoList) {
public void writeToDB(Long clusterId, List<ZookeeperInfo> dataList) {
// DB 中的信息
List<ZookeeperInfoPO> dbInfoList = this.listRawFromDBByCluster(clusterPhyId);
Map<String, ZookeeperInfoPO> dbMap = new HashMap<>();
dbInfoList.stream().forEach(elem -> dbMap.put(elem.getHost() + elem.getPort(), elem));
Map<String, ZookeeperInfoPO> dbMap = this.listRawFromDBByCluster(clusterId)
.stream()
.collect(Collectors.toMap(elem -> elem.getHost() + elem.getPort(), elem -> elem, (oldValue, newValue) -> newValue));
// 新获取到的信息
List<ZookeeperInfoPO> newInfoList = ConvertUtil.list2List(infoList, ZookeeperInfoPO.class);
List<ZookeeperInfoPO> newInfoList = ConvertUtil.list2List(dataList, ZookeeperInfoPO.class);
for (ZookeeperInfoPO newInfo: newInfoList) {
try {
ZookeeperInfoPO oldInfo = dbMap.remove(newInfo.getHost() + newInfo.getPort());
@@ -87,7 +87,7 @@ public class ZookeeperServiceImpl implements ZookeeperService {
zookeeperDAO.updateById(newInfo);
}
} catch (Exception e) {
LOGGER.error("method=batchReplaceDataInDB||clusterPhyId={}||newInfo={}||errMsg=exception", clusterPhyId, newInfo, e);
LOGGER.error("method=writeToDB||clusterPhyId={}||newInfo={}||errMsg=exception", clusterId, newInfo, e);
}
}
@@ -96,11 +96,19 @@ public class ZookeeperServiceImpl implements ZookeeperService {
try {
zookeeperDAO.deleteById(entry.getValue().getId());
} catch (Exception e) {
LOGGER.error("method=batchReplaceDataInDB||clusterPhyId={}||expiredInfo={}||errMsg=exception", clusterPhyId, entry.getValue(), e);
LOGGER.error("method=writeToDB||clusterPhyId={}||expiredInfo={}||errMsg=exception", clusterId, entry.getValue(), e);
}
});
}
@Override
public int deleteInDBByKafkaClusterId(Long clusterPhyId) {
LambdaQueryWrapper<ZookeeperInfoPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ZookeeperInfoPO::getClusterPhyId, clusterPhyId);
return zookeeperDAO.delete(lambdaQueryWrapper);
}
@Override
public List<ZookeeperInfo> listFromDBByCluster(Long clusterPhyId) {
return ConvertUtil.list2List(this.listRawFromDBByCluster(clusterPhyId), ZookeeperInfo.class);

View File

@@ -16,8 +16,8 @@ import com.xiaojukeji.know.streaming.km.account.KmAccountConfig;
import com.xiaojukeji.know.streaming.km.account.common.bizenum.LoginServiceNameEnum;
import com.xiaojukeji.know.streaming.km.account.common.ldap.LdapPrincipal;
import com.xiaojukeji.know.streaming.km.account.login.ldap.remote.LdapAuthentication;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.utils.CommonUtils;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -79,7 +79,11 @@ public class LdapLoginServiceImpl implements LoginExtend {
userService.addUser(userDTO, ldapAttrsInfo.getSAMAccountName());
// user赋值
user = ConvertUtil.obj2Obj(userDTO, User.class);
user = userService.getUserByUserName(ldapAttrsInfo.getSAMAccountName());
} else if (ValidateUtils.isNull(user)) {
// user为空且不自动注册用户时赋值默认id给临时用户
user = new User();
user.setId(Constant.INVALID_CODE);
}
// 记录登录状态

View File

@@ -16,6 +16,11 @@ public enum MonitorSinkTagEnum {
CONSUMER_GROUP("consumerGroup"),
REPLICATION("replication"),
CONNECT_CLUSTER_ID("connectClusterId"),
CONNECT_CONNECTOR("connectConnector"),
;
private final String name;

View File

@@ -3,7 +3,9 @@ package com.xiaojukeji.know.streaming.km.monitor.component;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.*;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectorMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.event.metric.*;
import com.xiaojukeji.know.streaming.km.common.bean.event.metric.connect.ConnectorMetricEvent;
import com.xiaojukeji.know.streaming.km.common.utils.FutureUtil;
import com.xiaojukeji.know.streaming.km.monitor.common.MetricSinkPoint;
import org.springframework.context.ApplicationListener;
@@ -59,6 +61,10 @@ public abstract class AbstractMonitorSinkService implements ApplicationListener<
} else if(event instanceof ZookeeperMetricEvent) {
ZookeeperMetricEvent zookeeperMetricEvent = (ZookeeperMetricEvent)event;
sinkMetrics(zookeeperMetric2SinkPoint(zookeeperMetricEvent.getZookeeperMetrics()));
} else if (event instanceof ConnectorMetricEvent) {
ConnectorMetricEvent connectorMetricEvent = (ConnectorMetricEvent)event;
sinkMetrics(connectConnectorMetric2SinkPoint(connectorMetricEvent.getConnectorMetricsList()));
}
} );
}
@@ -170,6 +176,21 @@ public abstract class AbstractMonitorSinkService implements ApplicationListener<
return pointList;
}
private List<MetricSinkPoint> connectConnectorMetric2SinkPoint(List<ConnectorMetrics> connectorMetricsList){
List<MetricSinkPoint> pointList = new ArrayList<>();
for(ConnectorMetrics metrics : connectorMetricsList){
Map<String, Object> tagsMap = new HashMap<>();
tagsMap.put(CLUSTER_ID.getName(), metrics.getClusterPhyId());
tagsMap.put(CONNECT_CLUSTER_ID.getName(), metrics.getConnectClusterId());
tagsMap.put(CONNECT_CONNECTOR.getName(), metrics.getConnectorName());
pointList.addAll(genSinkPoint("ConnectConnector", metrics.getMetrics(), metrics.getTimestamp(), tagsMap));
}
return pointList;
}
private List<MetricSinkPoint> genSinkPoint(String metricPre,
Map<String, Float> metrics,
long timeStamp,

View File

@@ -1,4 +1,4 @@
package com.xiaojukeji.know.streaming.km.persistence.es.dao.connect;
package com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.cluster;
import com.didiglobal.logi.elasticsearch.client.response.query.query.ESQueryResponse;
import com.didiglobal.logi.elasticsearch.client.response.query.query.aggs.ESAggr;

View File

@@ -12,6 +12,7 @@ import org.apache.kafka.clients.admin.AdminClientConfig;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@@ -76,10 +77,12 @@ public class KafkaAdminClient extends AbstractClusterLoadedChangedHandler {
LOGGER.info("close kafka AdminClient starting, clusterPhyId:{}", clusterPhyId);
boolean allSuccess = this.closeAdminClientList(adminClientList);
boolean allSuccess = this.closeAdminClientList(clusterPhyId, adminClientList);
if (allSuccess) {
LOGGER.info("close kafka AdminClient success, clusterPhyId:{}", clusterPhyId);
} else {
LOGGER.error("close kafka AdminClient exist failed and can ignore this error, clusterPhyId:{}", clusterPhyId);
}
} catch (Exception e) {
LOGGER.error("close kafka AdminClient failed, clusterPhyId:{}", clusterPhyId, e);
@@ -116,6 +119,7 @@ public class KafkaAdminClient extends AbstractClusterLoadedChangedHandler {
adminClientList = new ArrayList<>();
for (int i = 0; i < clientCnt; ++i) {
props.put(AdminClientConfig.CLIENT_ID_CONFIG, String.format("ApacheAdminClient||clusterPhyId=%d||Cnt=%d", clusterPhyId, i));
adminClientList.add(AdminClient.create(props));
}
@@ -125,7 +129,7 @@ public class KafkaAdminClient extends AbstractClusterLoadedChangedHandler {
} catch (Exception e) {
LOGGER.error("create kafka AdminClient failed, clusterPhyId:{} props:{}", clusterPhyId, props, e);
this.closeAdminClientList(adminClientList);
this.closeAdminClientList(clusterPhyId, adminClientList);
} finally {
modifyClientMapLock.unlock();
}
@@ -133,7 +137,7 @@ public class KafkaAdminClient extends AbstractClusterLoadedChangedHandler {
return KAFKA_ADMIN_CLIENT_MAP.get(clusterPhyId).get((int)(System.currentTimeMillis() % clientCnt));
}
private boolean closeAdminClientList(List<AdminClient> adminClientList) {
private boolean closeAdminClientList(Long clusterPhyId, List<AdminClient> adminClientList) {
if (adminClientList == null) {
return true;
}
@@ -141,9 +145,11 @@ public class KafkaAdminClient extends AbstractClusterLoadedChangedHandler {
boolean allSuccess = true;
for (AdminClient adminClient: adminClientList) {
try {
adminClient.close();
// 关闭客户端超时时间为30秒
adminClient.close(Duration.ofSeconds(30));
} catch (Exception e) {
// ignore
LOGGER.error("close kafka AdminClient exist failed, clusterPhyId:{}", clusterPhyId, e);
allSuccess = false;
}
}

View File

@@ -157,3 +157,7 @@ INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `l
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2046', '0', 'know-streaming');
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2048', '0', 'know-streaming');
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2050', '0', 'know-streaming');
-- 多集群管理权限2023-07-18新增
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2052', 'Security-User查看密码', '1593', '1', '2', 'Security-User查看密码', '0', 'know-streaming');
INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2052', '0', 'know-streaming');

View File

@@ -15,7 +15,7 @@ import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectActionEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.OpConnectorService;
import com.xiaojukeji.know.streaming.km.core.service.connect.plugin.PluginService;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
@@ -31,9 +31,8 @@ import org.springframework.web.bind.annotation.*;
@RestController
@RequestMapping(ApiPrefix.API_V3_CONNECT_PREFIX)
public class KafkaConnectorController {
@Autowired
private ConnectorService connectorService;
private OpConnectorService opConnectorService;
@Autowired
private ConnectorManager connectorManager;
@@ -56,7 +55,7 @@ public class KafkaConnectorController {
@DeleteMapping(value ="connectors")
@ResponseBody
public Result<Void> deleteConnectors(@Validated @RequestBody ConnectorDeleteDTO dto) {
return connectorService.deleteConnector(dto.getConnectClusterId(), dto.getConnectorName(), HttpRequestUtil.getOperator());
return opConnectorService.deleteConnector(dto.getConnectClusterId(), dto.getConnectorName(), HttpRequestUtil.getOperator());
}
@ApiOperation(value = "操作Connector", notes = "")
@@ -64,11 +63,11 @@ public class KafkaConnectorController {
@ResponseBody
public Result<Void> operateConnectors(@Validated @RequestBody ConnectorActionDTO dto) {
if (ConnectActionEnum.RESTART.getValue().equals(dto.getAction())) {
return connectorService.restartConnector(dto.getConnectClusterId(), dto.getConnectorName(), HttpRequestUtil.getOperator());
return opConnectorService.restartConnector(dto.getConnectClusterId(), dto.getConnectorName(), HttpRequestUtil.getOperator());
} else if (ConnectActionEnum.STOP.getValue().equals(dto.getAction())) {
return connectorService.stopConnector(dto.getConnectClusterId(), dto.getConnectorName(), HttpRequestUtil.getOperator());
return opConnectorService.stopConnector(dto.getConnectClusterId(), dto.getConnectorName(), HttpRequestUtil.getOperator());
} else if (ConnectActionEnum.RESUME.getValue().equals(dto.getAction())) {
return connectorService.resumeConnector(dto.getConnectClusterId(), dto.getConnectorName(), HttpRequestUtil.getOperator());
return opConnectorService.resumeConnector(dto.getConnectClusterId(), dto.getConnectorName(), HttpRequestUtil.getOperator());
}
return Result.buildFailure(ResultStatus.PARAM_ILLEGAL);

View File

@@ -3,17 +3,15 @@ package com.xiaojukeji.know.streaming.km.task.connect.metadata;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster;
import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
@Task(name = "SyncConnectorTask",
@@ -23,40 +21,21 @@ import java.util.List;
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class SyncConnectorTask extends AbstractAsyncMetadataDispatchTask {
private static final ILog LOGGER = LogFactory.getLog(SyncConnectorTask.class);
@Autowired
private ConnectorService connectorService;
@Override
public TaskResult processClusterTask(ConnectCluster connectCluster, long triggerTimeUnitMs) {
Result<List<String>> nameListResult = connectorService.listConnectorsFromCluster(connectCluster.getId());
if (nameListResult.failed()) {
return TaskResult.FAIL;
// 获取信息
Result<Tuple<Set<String>, List<KSConnector>>> dataResult = connectorService.getDataFromKafka(connectCluster);
if (dataResult.failed()) {
return new TaskResult(TaskResult.FAIL_CODE, dataResult.getMessage());
}
boolean allSuccess = true;
// 更新到DB
connectorService.writeToDB( connectCluster.getId(), dataResult.getData().v1(), dataResult.getData().v2());
List<KSConnector> connectorList = new ArrayList<>();
for (String connectorName: nameListResult.getData()) {
Result<KSConnector> ksConnectorResult = connectorService.getAllConnectorInfoFromCluster(connectCluster.getId(), connectorName);
if (ksConnectorResult.failed()) {
LOGGER.error(
"method=processClusterTask||connectClusterId={}||connectorName={}||result={}",
connectCluster.getId(), connectorName, ksConnectorResult
);
allSuccess = false;
continue;
}
connectorList.add(ksConnectorResult.getData());
}
//mm2相关信息的添加
connectorService.completeMirrorMakerInfo(connectCluster, connectorList);
connectorService.batchReplace(connectCluster.getKafkaClusterPhyId(), connectCluster.getId(), connectorList, new HashSet<>(nameListResult.getData()));
return allSuccess? TaskResult.SUCCESS: TaskResult.FAIL;
// 返回结果
return dataResult.getData().v1().size() == dataResult.getData().v2().size()? TaskResult.SUCCESS: TaskResult.FAIL;
}
}

View File

@@ -3,19 +3,13 @@ package com.xiaojukeji.know.streaming.km.task.kafka.metadata;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.KafkaAclPO;
import com.xiaojukeji.know.streaming.km.common.converter.KafkaAclConverter;
import com.xiaojukeji.know.streaming.km.core.service.acl.KafkaAclService;
import com.xiaojukeji.know.streaming.km.core.service.acl.OpKafkaAclService;
import org.apache.kafka.common.acl.AclBinding;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.List;
import java.util.stream.Collectors;
@Task(name = "SyncKafkaAclTask",
description = "KafkaAcl信息同步到DB",
@@ -24,32 +18,18 @@ import java.util.stream.Collectors;
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class SyncKafkaAclTask extends AbstractAsyncMetadataDispatchTask {
private static final ILog log = LogFactory.getLog(SyncKafkaAclTask.class);
@Autowired
private KafkaAclService kafkaAclService;
@Autowired
private OpKafkaAclService opKafkaAclService;
@Override
public TaskResult processClusterTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) {
Result<List<AclBinding>> aclBindingListResult = kafkaAclService.getAclFromKafka(clusterPhy.getId());
Result<List<AclBinding>> aclBindingListResult = kafkaAclService.getDataFromKafka(clusterPhy);
if (aclBindingListResult.failed()) {
return TaskResult.FAIL;
}
if (!aclBindingListResult.hasData()) {
return TaskResult.SUCCESS;
}
kafkaAclService.writeToDB(clusterPhy.getId(), aclBindingListResult.getData());
// 更新DB数据
List<KafkaAclPO> poList = aclBindingListResult.getData()
.stream()
.map(elem -> KafkaAclConverter.convert2KafkaAclPO(clusterPhy.getId(), elem, triggerTimeUnitMs))
.collect(Collectors.toList());
opKafkaAclService.batchUpdateAcls(clusterPhy.getId(), poList);
return TaskResult.SUCCESS;
}
}

View File

@@ -3,12 +3,8 @@ package com.xiaojukeji.know.streaming.km.task.kafka.metadata;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.ZKConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.ZookeeperInfo;
import com.xiaojukeji.know.streaming.km.core.service.zookeeper.ZookeeperService;
import org.springframework.beans.factory.annotation.Autowired;
@@ -23,24 +19,17 @@ import java.util.List;
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class SyncZookeeperTask extends AbstractAsyncMetadataDispatchTask {
private static final ILog log = LogFactory.getLog(SyncZookeeperTask.class);
@Autowired
private ZookeeperService zookeeperService;
@Override
public TaskResult processClusterTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) {
Result<List<ZookeeperInfo>> infoResult = zookeeperService.listFromZookeeper(
clusterPhy.getId(),
clusterPhy.getZookeeper(),
ConvertUtil.str2ObjByJson(clusterPhy.getZkProperties(), ZKConfig.class)
);
Result<List<ZookeeperInfo>> infoResult = zookeeperService.getDataFromKafka(clusterPhy);
if (infoResult.failed()) {
return new TaskResult(TaskResult.FAIL_CODE, infoResult.getMessage());
}
zookeeperService.batchReplaceDataInDB(clusterPhy.getId(), infoResult.getData());
zookeeperService.writeToDB(clusterPhy.getId(), infoResult.getData());
return TaskResult.SUCCESS;
}

View File

@@ -0,0 +1,53 @@
package com.xiaojukeji.know.streaming.km.task.service.listener;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.event.cluster.connect.ClusterPhyDeletedEvent;
import com.xiaojukeji.know.streaming.km.common.component.SpringTool;
import com.xiaojukeji.know.streaming.km.common.utils.BackoffUtils;
import com.xiaojukeji.know.streaming.km.common.utils.FutureUtil;
import com.xiaojukeji.know.streaming.km.core.service.meta.MetaDataService;
import org.springframework.context.ApplicationListener;
import org.springframework.stereotype.Service;
@Service
public class TaskClusterDeletedListener implements ApplicationListener<ClusterPhyDeletedEvent> {
private static final ILog LOGGER = LogFactory.getLog(TaskClusterDeletedListener.class);
@Override
public void onApplicationEvent(ClusterPhyDeletedEvent event) {
LOGGER.info("method=onApplicationEvent||clusterPhyId={}||msg=listened delete cluster", event.getClusterPhyId());
// 交由KS自定义的线程池异步执行任务
FutureUtil.quickStartupFutureUtil.submitTask(
() -> {
// 延迟60秒避免正在运行的任务将数据写入DB中
BackoffUtils.backoff(60000);
for (MetaDataService metaDataService: SpringTool.getBeansOfType(MetaDataService.class).values()) {
LOGGER.info(
"method=onApplicationEvent||clusterPhyId={}||className={}||msg=delete cluster data in db starting",
event.getClusterPhyId(), metaDataService.getClass().getSimpleName()
);
try {
// 删除数据
metaDataService.deleteInDBByKafkaClusterId(event.getClusterPhyId());
LOGGER.info(
"method=onApplicationEvent||clusterPhyId={}||className={}||msg=delete cluster data in db finished",
event.getClusterPhyId(), metaDataService.getClass().getSimpleName()
);
} catch (Exception e) {
LOGGER.error(
"method=onApplicationEvent||clusterPhyId={}||className={}||msg=delete cluster data in db failed||errMsg=exception",
event.getClusterPhyId(), metaDataService.getClass().getSimpleName(), e
);
}
}
}
);
}
}