初始化3.0.0版本

This commit is contained in:
zengqiao
2022-08-18 17:04:05 +08:00
parent 462303fca0
commit 51832385b1
2446 changed files with 93177 additions and 127211 deletions

52
km-task/pom.xml Normal file
View File

@@ -0,0 +1,52 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>km-task</artifactId>
<version>${km.revision}</version>
<packaging>jar</packaging>
<parent>
<artifactId>km</artifactId>
<groupId>com.xiaojukeji.kafka</groupId>
<version>${km.revision}</version>
</parent>
<properties>
</properties>
<dependencies>
<dependency>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>km-core</artifactId>
<version>${project.parent.version}</version>
</dependency>
<dependency>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>km-collector</artifactId>
<version>${project.parent.version}</version>
</dependency>
<dependency>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>km-biz</artifactId>
<version>${project.parent.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>io.github.zqrferrari</groupId>
<artifactId>logi-job-spring-boot-starter</artifactId>
</dependency>
<dependency>
<groupId>io.github.zqrferrari</groupId>
<artifactId>logi-security-spring-boot-starter</artifactId>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,53 @@
package com.xiaojukeji.know.streaming.km.task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.List;
public abstract class AbstractClusterPhyDispatchTask extends AbstractDispatchTask<ClusterPhy> {
private static final ILog log = LogFactory.getLog(AbstractClusterPhyDispatchTask.class);
@Autowired
private ClusterPhyService clusterPhyService;
/**
* 执行被分配的任务
* @param clusterPhy 任务
* @param triggerTimeUnitMs 任务触发时间
* @return
*/
protected abstract TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception;
@Override
public List<ClusterPhy> listAllTasks() {
return clusterPhyService.listAllClusters();
}
@Override
public TaskResult processTask(List<ClusterPhy> subTaskList, long triggerTimeUnitMs) {
boolean allSuccess = true;
for (ClusterPhy elem: subTaskList) {
try {
log.debug("method=processTask||taskName={}||clusterPhyId={}||msg=start", this.taskName, elem.getId());
TaskResult tr = this.processSubTask(elem, triggerTimeUnitMs);
if (TaskResult.SUCCESS.getCode() != tr.getCode()) {
log.warn("method=processTask||taskName={}||clusterPhyId={}||msg=process failed", this.taskName, elem.getId());
allSuccess = false;
continue;
}
log.debug("method=processTask||taskName={}||clusterPhyId={}||msg=finished", this.taskName, elem.getId());
} catch (Exception e) {
log.error("method=processTask||taskName={}||clusterPhyId={}||errMsg=throw exception", this.taskName, elem.getId(), e);
}
}
return allSuccess? TaskResult.SUCCESS: TaskResult.FAIL;
}
}

View File

@@ -0,0 +1,133 @@
package com.xiaojukeji.know.streaming.km.task;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.job.core.job.Job;
import com.didiglobal.logi.job.core.job.JobContext;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.EntifyIdInterface;
import com.xiaojukeji.know.streaming.km.common.exception.AdminTaskCodeException;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import javax.annotation.PostConstruct;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public abstract class AbstractDispatchTask<E extends Comparable & EntifyIdInterface> implements Job {
private static final ILog LOGGER = LogFactory.getLog(AbstractDispatchTask.class);
/**
* 任务名称
*/
protected String taskName;
/**
* 任务超时时间
*/
protected Long timeoutUnitSec;
/**
* 返回所有的任务
* @return
*/
protected abstract List<E> listAllTasks();
/**
* 执行被分配的任务
* @param subTaskList 子任务列表
* @param triggerTimeUnitMs 任务触发时间
* @return
*/
protected abstract TaskResult processTask(List<E> subTaskList, long triggerTimeUnitMs);
@PostConstruct
void init() {
Task customTask = this.getClass().getAnnotation(Task.class);
if (customTask == null) {
LOGGER.error("method=init||className={}||msg=extend AbstractDispatchTask must use @Task annotation.", getClass().getName());
throw new AdminTaskCodeException("extend AbstractDispatchTask must use @Task annotation");
}
// 必须使用广播模式,否则会有任务丢失,因此如果采用了其他模式,则直接输出异常日志并退出
if (!ConsensualEnum.BROADCAST.equals(customTask.consensual())) {
LOGGER.error("method=init||className={}||msg=extend AbstractDispatchTask and @Task annotation must use BROADCAST.", getClass().getName());
throw new AdminTaskCodeException("extend AbstractDispatchTask and @Task annotation must use BROADCAST");
}
this.taskName = customTask.name();
this.timeoutUnitSec = customTask.timeout();
}
@Override
public TaskResult execute(JobContext jobContext) {
try {
long triggerTimeUnitMs = System.currentTimeMillis();
// 获取所有的任务
List<E> allTaskList = this.listAllTasks();
if (ValidateUtils.isEmptyList(allTaskList)) {
LOGGER.debug("all-task is empty, finish process, taskName:{} jobContext:{}", taskName, jobContext);
return TaskResult.SUCCESS;
}
// 计算当前机器需要执行的任务
List<E> subTaskList = this.selectTask(allTaskList, jobContext.getAllWorkerCodes(), jobContext.getCurrentWorkerCode());
if (ValidateUtils.isEmptyList(allTaskList)) {
LOGGER.debug("sub-task is empty, finish process, taskName:{} jobContext:{}", taskName, jobContext);
return TaskResult.SUCCESS;
}
// 进行任务处理
return this.processTask(subTaskList, triggerTimeUnitMs);
} catch (Exception e) {
LOGGER.error("process task failed, taskName:{}", taskName, e);
return new TaskResult(TaskResult.FAIL_CODE, e.toString());
}
}
/**
* 挑选当前机器需要执行的任务
* @param allTaskList 所有的任务
* @return 需要执行的任务
*/
private List<E> selectTask(List<E> allTaskList, List<String> allWorkCodes, String currentWorkerCode) {
if(ValidateUtils.isEmptyList(allTaskList)) {
return new ArrayList<>();
}
if (ValidateUtils.isEmptyList(allWorkCodes) || ValidateUtils.isBlank(currentWorkerCode)) {
LOGGER.warn("task running, but without registrant, and so scheduled tasks can't execute, taskName:{}.", taskName);
return new ArrayList<>();
}
Collections.sort(allTaskList);
Collections.sort(allWorkCodes);
int idx = 0;
while (idx < allWorkCodes.size()) {
if (allWorkCodes.get(idx).equals(currentWorkerCode)) {
break;
}
idx += 1;
}
if (idx == allWorkCodes.size()) {
LOGGER.debug("task running, registrants not conclude present machine, taskName:{}.", taskName);
return new ArrayList<>();
}
int count = allTaskList.size() / allWorkCodes.size();
if (allTaskList.size() % allWorkCodes.size() != 0) {
count += 1;
}
if (idx * count >= allTaskList.size()) {
return new ArrayList<>();
}
return allTaskList.subList(idx * count, Math.min(idx * count + count, allTaskList.size()));
}
}

View File

@@ -0,0 +1,60 @@
package com.xiaojukeji.know.streaming.km.task.client;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.job.core.job.Job;
import com.didiglobal.logi.job.core.job.JobContext;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaJMXClient;
import org.springframework.beans.factory.annotation.Autowired;
@Task(name = "CheckJmxClientTask",
description = "检查Jmx客户端,",
cron = "0 0/1 * * * ? *",
autoRegister = true,
timeout = 2 * 60,
consensual = ConsensualEnum.BROADCAST)
public class CheckJmxClientTask implements Job {
private static final ILog log = LogFactory.getLog(CheckJmxClientTask.class);
@Autowired
private BrokerService brokerService;
@Autowired
private KafkaJMXClient kafkaJMXClient;
@Override
public TaskResult execute(JobContext jobContext) {
boolean status = true;
for (ClusterPhy clusterPhy: LoadedClusterPhyCache.listAll().values()) {
if (this.checkJmxClient(clusterPhy)) {
continue;
}
status = false;
}
return status? TaskResult.SUCCESS: TaskResult.FAIL;
}
private boolean checkJmxClient(ClusterPhy clusterPhy) {
try {
kafkaJMXClient.checkAndRemoveIfIllegal(
clusterPhy.getId(),
brokerService.listAliveBrokersFromDB(clusterPhy.getId())
);
return true;
} catch (Exception e) {
log.error("method=checkJmxClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
}
return false;
}
}

View File

@@ -0,0 +1,123 @@
package com.xiaojukeji.know.streaming.km.task.health;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.healthcheck.BaseClusterHealthConfig;
import com.xiaojukeji.know.streaming.km.common.bean.entity.health.HealthCheckResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam;
import com.xiaojukeji.know.streaming.km.common.component.SpringTool;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.enums.health.HealthCheckDimensionEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.health.checkresult.HealthCheckResultService;
import com.xiaojukeji.know.streaming.km.core.service.health.checker.AbstractHealthCheckService;
import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask;
import lombok.AllArgsConstructor;
import lombok.NoArgsConstructor;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.*;
@NoArgsConstructor
@AllArgsConstructor
@Task(name = "HealthCheckTask", description = "健康检查", cron = "0 0/1 * * * ? *",
autoRegister = true, consensual = ConsensualEnum.BROADCAST, timeout = 2 * 60)
public class HealthCheckTask extends AbstractClusterPhyDispatchTask {
private static final ILog log = LogFactory.getLog(HealthCheckTask.class);
@Autowired
private HealthCheckResultService healthCheckResultService;
private final List<AbstractHealthCheckService> healthCheckServiceList = new ArrayList<>(
SpringTool.getBeansOfType(AbstractHealthCheckService.class).values()
);
@Override
public TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) {
// 获取配置,<配置名,配置信息>
Map<String, BaseClusterHealthConfig> healthConfigMap = healthCheckResultService.getClusterHealthConfig(clusterPhy.getId());
// 检查结果
List<HealthCheckResult> resultList = new ArrayList<>();
// 遍历Check-Service
for (AbstractHealthCheckService healthCheckService: healthCheckServiceList) {
List<ClusterPhyParam> paramList = healthCheckService.getResList(clusterPhy.getId());
if (ValidateUtils.isEmptyList(paramList)) {
// 当前无该维度的资源,则直接设置为
resultList.addAll(this.getNoResResult(clusterPhy.getId(), healthCheckService, healthConfigMap));
continue;
}
// 遍历资源
for (ClusterPhyParam clusterPhyParam: paramList) {
resultList.addAll(this.checkAndGetResult(healthCheckService, clusterPhyParam, healthConfigMap));
}
}
for (HealthCheckResult checkResult: resultList) {
try {
healthCheckResultService.replace(checkResult);
} catch (Exception e) {
log.error("method=processSubTask||clusterPhyId={}||checkResult={}||errMsg=exception!", clusterPhy.getId(), checkResult, e);
}
}
// 删除10分钟之前的检查结果
try {
healthCheckResultService.deleteByUpdateTimeBeforeInDB(clusterPhy.getId(), new Date(triggerTimeUnitMs - 10 * 60 * 1000));
} catch (Exception e) {
log.error("method=processSubTask||clusterPhyId={}||errMsg=exception!", clusterPhy.getId(), e);
}
return TaskResult.SUCCESS;
}
private List<HealthCheckResult> getNoResResult(Long clusterPhyId, AbstractHealthCheckService healthCheckService, Map<String, BaseClusterHealthConfig> healthConfigMap) {
List<HealthCheckResult> resultList = new ArrayList<>();
// 进行检查
for (BaseClusterHealthConfig clusterHealthConfig: healthConfigMap.values()) {
HealthCheckDimensionEnum dimensionEnum = healthCheckService.getHealthCheckDimensionEnum();
if (!clusterHealthConfig.getCheckNameEnum().getDimensionEnum().equals(dimensionEnum)) {
// 类型不匹配
continue;
}
// 记录
HealthCheckResult checkResult = new HealthCheckResult(
dimensionEnum.getDimension(),
clusterHealthConfig.getCheckNameEnum().getConfigName(),
clusterPhyId,
"-1"
);
checkResult.setPassed(Constant.YES);
resultList.add(checkResult);
}
return resultList;
}
private List<HealthCheckResult> checkAndGetResult(AbstractHealthCheckService healthCheckService,
ClusterPhyParam clusterPhyParam,
Map<String, BaseClusterHealthConfig> healthConfigMap) {
List<HealthCheckResult> resultList = new ArrayList<>();
// 进行检查
for (BaseClusterHealthConfig clusterHealthConfig: healthConfigMap.values()) {
HealthCheckResult healthCheckResult = healthCheckService.checkAndGetResult(clusterPhyParam, clusterHealthConfig);
if (healthCheckResult == null) {
continue;
}
// 记录
resultList.add(healthCheckResult);
}
return resultList;
}
}

View File

@@ -0,0 +1,40 @@
package com.xiaojukeji.know.streaming.km.task.job;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.core.service.reassign.ReassignJobService;
import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask;
import org.springframework.beans.factory.annotation.Autowired;
@Task(name = "CommunityReassignJobTask",
description = "原生副本迁移调度执行任务",
cron = "0 0/1 * * * ? *",
autoRegister = true,
consensual = ConsensualEnum.BROADCAST,
timeout = 6 * 60)
public class CommunityReassignJobTask extends AbstractClusterPhyDispatchTask {
@Autowired
private ReassignJobService reassignJobService;
@Override
protected TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception {
// 获取迁移中的任务
Long jobId = reassignJobService.getOneRunningJobId(clusterPhy.getId());
if (jobId == null) {
// 当前无任务
return TaskResult.SUCCESS;
}
// 更新任务的状态
Result<Void> rv = reassignJobService.verifyAndUpdateStatue(jobId);
// 更新同步进度信息
reassignJobService.getAndUpdateSubJobExtendData(jobId);
return rv.failed()? TaskResult.FAIL: TaskResult.SUCCESS;
}
}

View File

@@ -0,0 +1,27 @@
package com.xiaojukeji.know.streaming.km.task.job;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.core.service.job.JobService;
import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask;
import org.springframework.beans.factory.annotation.Autowired;
@Task(name = "kmJobTask",
description = "km job 模块调度执行任务",
cron = "0 0/1 * * * ? *",
autoRegister = true,
consensual = ConsensualEnum.BROADCAST,
timeout = 6 * 60)
public class KMJobTask extends AbstractClusterPhyDispatchTask {
@Autowired
private JobService jobService;
@Override
protected TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception {
jobService.scheduleJobByClusterId(clusterPhy.getId());
return TaskResult.SUCCESS;
}
}

View File

@@ -0,0 +1,123 @@
package com.xiaojukeji.know.streaming.km.task.metadata;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.config.kafkaconfig.KafkaConfigDetail;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.broker.BrokerConfigPO;
import com.xiaojukeji.know.streaming.km.common.enums.config.ConfigDiffTypeEnum;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerConfigService;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.*;
import java.util.stream.Collectors;
/**
* Broker配置的diff信息同步到DB
* @author zengqiao
* @date 22/02/25
*/
@Task(name = "SyncBrokerConfigDiffTask",
description = "Broker配置的Diff信息同步到DB,",
cron = "0 0/1 * * * ? *",
autoRegister = true,
consensual = ConsensualEnum.BROADCAST,
timeout = 6 * 60)
public class SyncBrokerConfigDiffTask extends AbstractClusterPhyDispatchTask {
protected static final ILog log = LogFactory.getLog(SyncBrokerConfigDiffTask.class);
@Autowired
private BrokerService brokerService;
@Autowired
private BrokerConfigService brokerConfigService;
@Override
public TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) {
// <configName, <BrokerId, ConfigValue>>
Map<String, Map<Integer, String>> allConfigMap = new HashMap<>();
List<Broker> brokerList = brokerService.listAliveBrokersFromDB(clusterPhy.getId());
Set<Integer> brokerIdSet = brokerList.stream().map(elem -> elem.getBrokerId()).collect(Collectors.toSet());
// 获取所有集群的配置
for (Integer brokerId: brokerIdSet) {
Result<List<KafkaConfigDetail>> configResult = brokerConfigService.getBrokerConfigDetailFromKafka(clusterPhy.getId(), brokerId);
if (configResult.failed()) {
log.error("method=processSubTask||clusterPhyId={}||brokerId={}||result={}||errMsg=get config failed!",
clusterPhy.getId(),
brokerId,
configResult
);
continue;
}
List<KafkaConfigDetail> configList = configResult.hasData()? configResult.getData(): new ArrayList<>();
configList.forEach(elem -> {
allConfigMap.putIfAbsent(elem.getName(), new HashMap<>());
allConfigMap.get(elem.getName()).put(brokerId, elem.getValue());
});
}
// 逐个比较
List<BrokerConfigPO> poList = new ArrayList<>();
for (Map.Entry<String, Map<Integer, String>> configEntry: allConfigMap.entrySet()) {
if (brokerIdSet.size() <= 1) {
// 只有一台Broker则无差异
continue;
}
if (configEntry.getValue().size() == 1) {
// 独有的
Integer brokerId = new ArrayList<>(configEntry.getValue().keySet()).get(0);
poList.add(new BrokerConfigPO(
clusterPhy.getId(),
brokerId,
configEntry.getKey(),
configEntry.getValue().getOrDefault(brokerId, ""),
ConfigDiffTypeEnum.ALONE_POSSESS.getCode(),
new Date(triggerTimeUnitMs))
);
}
// 配置value集合
Set<String> configValueSet = new HashSet<>(configEntry.getValue().values());
if (configValueSet.size() <= 1) {
// 无差异
continue;
}
// Broker该配置存在差异
configEntry.getValue().entrySet().stream().forEach(
elem -> poList.add(new BrokerConfigPO(
clusterPhy.getId(),
elem.getKey(),
configEntry.getKey(),
elem.getValue(),
ConfigDiffTypeEnum.UN_EQUAL.getCode(),
new Date(triggerTimeUnitMs))
)
);
}
for (BrokerConfigPO po: poList) {
try {
brokerConfigService.replaceBrokerConfigDiff(po);
} catch (Exception e) {
log.error("method=processSubTask||clusterPhyId={}||data={}||errMsg=exception!", clusterPhy.getId(), po, e);
}
}
// 删除10分钟前的差异
brokerConfigService.deleteByUpdateTimeBeforeInDB(clusterPhy.getId(), new Date(triggerTimeUnitMs - 10 * 60 * 1000L));
return TaskResult.SUCCESS;
}
}

View File

@@ -0,0 +1,39 @@
package com.xiaojukeji.know.streaming.km.task.metadata;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.List;
@Task(name = "SyncBrokerTask",
description = "Broker信息同步到DB,",
cron = "0 0/1 * * * ? *",
autoRegister = true,
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class SyncBrokerTask extends AbstractClusterPhyDispatchTask {
private static final ILog log = LogFactory.getLog(SyncBrokerTask.class);
@Autowired
private BrokerService brokerService;
@Override
public TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) {
Result<List<Broker>> brokersResult = brokerService.listBrokersFromKafka(clusterPhy);
if (brokersResult.failed()) {
return new TaskResult(TaskResult.FAIL_CODE, brokersResult.getMessage());
}
brokerService.updateAliveBrokers(clusterPhy.getId(), brokersResult.getData());
return TaskResult.SUCCESS;
}
}

View File

@@ -0,0 +1,43 @@
package com.xiaojukeji.know.streaming.km.task.metadata;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafkacontroller.KafkaController;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.core.service.kafkacontroller.KafkaControllerService;
import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask;
import org.springframework.beans.factory.annotation.Autowired;
@Task(name = "SyncControllerTask",
description = "Controller信息同步到DB,",
cron = "0 0/1 * * * ? *",
autoRegister = true,
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class SyncControllerTask extends AbstractClusterPhyDispatchTask {
private static final ILog log = LogFactory.getLog(SyncControllerTask.class);
@Autowired
private KafkaControllerService kafkaControllerService;
@Override
public TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) {
Result<KafkaController> controllerResult = kafkaControllerService.getControllerFromKafka(clusterPhy);
if (controllerResult.failed()) {
return new TaskResult(TaskResult.FAIL_CODE, controllerResult.getMessage());
}
if (controllerResult.getData() == null) {
kafkaControllerService.setNoKafkaController(clusterPhy.getId(), System.currentTimeMillis() / 1000L * 1000L);
} else {
kafkaControllerService.insertAndIgnoreDuplicateException(controllerResult.getData());
}
return TaskResult.SUCCESS;
}
}

View File

@@ -0,0 +1,56 @@
package com.xiaojukeji.know.streaming.km.task.metadata;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.KafkaAclPO;
import com.xiaojukeji.know.streaming.km.common.converter.KafkaAclConverter;
import com.xiaojukeji.know.streaming.km.core.service.acl.KafkaAclService;
import com.xiaojukeji.know.streaming.km.core.service.acl.OpKafkaAclService;
import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask;
import org.apache.kafka.common.acl.AclBinding;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.List;
import java.util.stream.Collectors;
@Task(name = "SyncKafkaAclTask",
description = "KafkaAcl信息同步到DB,",
cron = "0 0/1 * * * ? *",
autoRegister = true,
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class SyncKafkaAclTask extends AbstractClusterPhyDispatchTask {
private static final ILog log = LogFactory.getLog(SyncKafkaAclTask.class);
@Autowired
private KafkaAclService kafkaAclService;
@Autowired
private OpKafkaAclService opKafkaAclService;
@Override
public TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) {
Result<List<AclBinding>> aclBindingListResult = kafkaAclService.getAclFromKafka(clusterPhy.getId());
if (aclBindingListResult.failed()) {
return TaskResult.FAIL;
}
if (!aclBindingListResult.hasData()) {
return TaskResult.SUCCESS;
}
// 更新DB数据
List<KafkaAclPO> poList = aclBindingListResult.getData()
.stream()
.map(elem -> KafkaAclConverter.convert2KafkaAclPO(clusterPhy.getId(), elem, triggerTimeUnitMs))
.collect(Collectors.toList());
opKafkaAclService.batchUpdateAcls(clusterPhy.getId(), poList);
return TaskResult.SUCCESS;
}
}

View File

@@ -0,0 +1,118 @@
package com.xiaojukeji.know.streaming.km.task.metadata;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupMemberPO;
import com.xiaojukeji.know.streaming.km.common.enums.group.GroupStateEnum;
import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.group.GroupService;
import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask;
import org.apache.kafka.clients.admin.*;
import org.apache.kafka.common.TopicPartition;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.*;
import java.util.stream.Collectors;
@Task(name = "SyncKafkaGroupTask",
description = "KafkaGroup信息同步到DB,",
cron = "0 0/1 * * * ? *",
autoRegister = true,
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class SyncKafkaGroupTask extends AbstractClusterPhyDispatchTask {
private static final ILog log = LogFactory.getLog(SyncKafkaGroupTask.class);
@Autowired
private GroupService groupService;
@Override
public TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception {
TaskResult tr = TaskResult.SUCCESS;
List<String> groupNameList = groupService.listGroupsFromKafka(clusterPhy.getId());
for (String groupName: groupNameList) {
if (!TaskResult.SUCCESS.equals(this.updateGroupMembersTask(clusterPhy, groupName, triggerTimeUnitMs))) {
tr = TaskResult.FAIL;
}
}
if (!TaskResult.SUCCESS.equals(tr)) {
return tr;
}
// 删除历史的Group
groupService.deleteByUpdateTimeBeforeInDB(clusterPhy.getId(), new Date(triggerTimeUnitMs - 5 * 60 * 1000));
return tr;
}
private TaskResult updateGroupMembersTask(ClusterPhy clusterPhy, String groupName, long triggerTimeUnitMs) {
try {
List<GroupMemberPO> poList = this.getGroupMembers(clusterPhy.getId(), groupName, new Date(triggerTimeUnitMs));
for (GroupMemberPO po: poList) {
groupService.replaceDBData(po);
}
} catch (Exception e) {
log.error("method=updateGroupMembersTask||clusterPhyId={}||groupName={}||errMsg={}", clusterPhy.getId(), groupName, e.getMessage());
return TaskResult.FAIL;
}
return TaskResult.SUCCESS;
}
private List<GroupMemberPO> getGroupMembers(Long clusterPhyId, String groupName, Date updateTime) throws NotExistException, AdminOperateException {
Map<String, GroupMemberPO> groupMap = new HashMap<>();
// 获取消费组消费过哪些Topic
Map<TopicPartition, Long> offsetMap = groupService.getGroupOffset(clusterPhyId, groupName);
for (TopicPartition topicPartition: offsetMap.keySet()) {
GroupMemberPO po = groupMap.get(topicPartition.topic());
if (po == null) {
po = new GroupMemberPO(clusterPhyId, topicPartition.topic(), groupName, updateTime);
}
groupMap.put(topicPartition.topic(), po);
}
// 在上面的基础上,补充消费组的详细信息
ConsumerGroupDescription consumerGroupDescription = groupService.getGroupDescription(clusterPhyId, groupName);
if (consumerGroupDescription == null) {
return new ArrayList<>(groupMap.values());
}
groupMap.forEach((key, val) -> val.setState(GroupStateEnum.getByRawState(consumerGroupDescription.state()).getState()));
for (MemberDescription memberDescription : consumerGroupDescription.members()) {
Set<TopicPartition> partitionList = new HashSet<>();
if (!ValidateUtils.isNull(memberDescription.assignment().topicPartitions())) {
partitionList = memberDescription.assignment().topicPartitions();
}
Set<String> topicNameSet = partitionList.stream().map(elem -> elem.topic()).collect(Collectors.toSet());
for (String topicName: topicNameSet) {
groupMap.putIfAbsent(topicName, new GroupMemberPO(clusterPhyId, topicName, groupName, updateTime));
GroupMemberPO po = groupMap.get(topicName);
po.setMemberCount(po.getMemberCount() + 1);
po.setState(GroupStateEnum.getByRawState(consumerGroupDescription.state()).getState());
}
}
// 如果该消费组没有正在消费任何Topic的特殊情况但是这个Group存在
if (groupMap.isEmpty()) {
GroupMemberPO po = new GroupMemberPO(clusterPhyId, "", groupName, updateTime);
po.setState(GroupStateEnum.getByRawState(consumerGroupDescription.state()).getState());
groupMap.put("", po);
}
return new ArrayList<>(groupMap.values());
}
}

View File

@@ -0,0 +1,50 @@
package com.xiaojukeji.know.streaming.km.task.metadata;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafkauser.KafkaUser;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.core.service.kafkauser.KafkaUserService;
import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.List;
import java.util.stream.Collectors;
@Task(name = "SyncKafkaUserTask",
description = "KafkaUser信息同步到DB,",
cron = "0 0/1 * * * ? *",
autoRegister = true,
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class SyncKafkaUserTask extends AbstractClusterPhyDispatchTask {
private static final ILog log = LogFactory.getLog(SyncKafkaUserTask.class);
@Autowired
private KafkaUserService kafkaUserService;
@Override
public TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) {
// 查询KafkaUser数据
Result<List<KafkaUser>> kafkaUserResult = kafkaUserService.getKafkaUserFromKafka(clusterPhy.getId());
if (kafkaUserResult.failed()) {
return TaskResult.FAIL;
}
if (!kafkaUserResult.hasData()) {
return TaskResult.SUCCESS;
}
// 更新DB中的数据
kafkaUserService.batchReplaceKafkaUserInDB(
clusterPhy.getId(),
kafkaUserResult.getData().stream().map(elem-> elem.getName()).collect(Collectors.toList())
);
return TaskResult.SUCCESS;
}
}

View File

@@ -0,0 +1,62 @@
package com.xiaojukeji.know.streaming.km.task.metadata;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.partition.Partition;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.po.partition.PartitionPO;
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@Task(name = "SyncPartitionTask",
description = "Partition信息同步到DB,",
cron = "0 0/1 * * * ? *",
autoRegister = true,
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class SyncPartitionTask extends AbstractClusterPhyDispatchTask {
private static final ILog log = LogFactory.getLog(SyncPartitionTask.class);
@Autowired
private PartitionService partitionService;
@Override
public TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) {
Result<Map<String, List<Partition>>> partitionsResult = partitionService.listPartitionsFromKafka(clusterPhy);
if (partitionsResult.failed()) {
return new TaskResult(TaskResult.FAIL_CODE, partitionsResult.getMessage());
}
// 获取DB中的partition
Map<String, List<PartitionPO>> dbPartitionMap = new HashMap<>();
partitionService.listPartitionPOByCluster(clusterPhy.getId()).forEach(
elem -> {
dbPartitionMap.putIfAbsent(elem.getTopicName(), new ArrayList<>());
dbPartitionMap.get(elem.getTopicName()).add(elem);
}
);
for (Map.Entry<String, List<Partition>> entry: partitionsResult.getData().entrySet()) {
try {
partitionService.updatePartitions(clusterPhy.getId(), entry.getKey(), entry.getValue(), dbPartitionMap.getOrDefault(entry.getKey(), new ArrayList<>()));
} catch (Exception e) {
log.error("class=SyncPartitionTask||method=processSubTask||clusterPhyId={}||topicName={}||errMsg=exception", clusterPhy.getId(), entry.getKey(), e);
}
}
// 删除不存在的Topic的分区
partitionService.deletePartitionsIfNotIn(clusterPhy.getId(), partitionsResult.getData().keySet());
return TaskResult.SUCCESS;
}
}

View File

@@ -0,0 +1,84 @@
package com.xiaojukeji.know.streaming.km.task.metadata;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.TopicConfig;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicConfigService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Topic配置同步
* @author zengqiao
* @date 22/02/25
*/
@Task(name = "SyncTopicConfigTask",
description = "Topic保存时间配置同步DB,",
cron = "0 0/1 * * * ? *",
autoRegister = true,
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class SyncTopicConfigTask extends AbstractClusterPhyDispatchTask {
protected static final ILog log = LogFactory.getLog(SyncTopicConfigTask.class);
@Autowired
private TopicService topicService;
@Autowired
private TopicConfigService kafkaConfigService;
@Override
public TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) {
boolean success = true;
List<TopicConfig> topicConfigList = new ArrayList<>();
for (Topic topic: topicService.listTopicsFromDB(clusterPhy.getId())) {
Result<TopicConfig> configResult = this.getTopicConfig(clusterPhy.getId(), topic.getTopicName());
if (configResult.failed()) {
success = false;
continue;
}
topicConfigList.add(configResult.getData());
}
topicService.batchReplaceConfig(clusterPhy.getId(), topicConfigList);
return success? TaskResult.SUCCESS: TaskResult.FAIL;
}
private Result<TopicConfig> getTopicConfig(Long clusterPhyId, String topicName) {
Result<Map<String, String>> configResult = kafkaConfigService.getTopicConfigFromKafka(clusterPhyId, topicName);
if (configResult.failed()) {
return Result.buildFromIgnoreData(configResult);
}
Map<String, String> configMap = configResult.getData();
if (configMap == null) {
configMap = new HashMap<>();
}
TopicConfig config = new TopicConfig();
config.setClusterPhyId(clusterPhyId);
config.setTopicName(topicName);
Long retentionMs = ConvertUtil.string2Long(configMap.get(org.apache.kafka.common.config.TopicConfig.RETENTION_MS_CONFIG));
config.setRetentionMs(retentionMs == null? -1L: retentionMs);
return Result.buildSuc(config);
}
}

View File

@@ -0,0 +1,39 @@
package com.xiaojukeji.know.streaming.km.task.metadata;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.List;
@Task(name = "SyncTopicTask",
description = "Topic信息同步到DB,",
cron = "0 0/1 * * * ? *",
autoRegister = true,
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class SyncTopicTask extends AbstractClusterPhyDispatchTask {
private static final ILog log = LogFactory.getLog(SyncTopicTask.class);
@Autowired
private TopicService topicService;
@Override
public TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) {
Result<List<Topic>> topicsResult = topicService.listTopicsFromKafka(clusterPhy);
if (topicsResult.failed()) {
return new TaskResult(TaskResult.FAIL_CODE, topicsResult.getMessage());
}
topicService.batchReplaceMetadata(clusterPhy.getId(), topicsResult.getData());
return TaskResult.SUCCESS;
}
}

View File

@@ -0,0 +1,42 @@
package com.xiaojukeji.know.streaming.km.task.metrics;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.collector.metric.BrokerMetricCollector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask;
import com.xiaojukeji.know.streaming.km.task.service.TaskThreadPoolService;
import org.springframework.beans.factory.annotation.Autowired;
/**
* @author didi
*/
@Task(name = "BrokerMetricCollectorTask",
description = "Broker指标采集任务,",
cron = "0 0/1 * * * ? *",
autoRegister = true,
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class BrokerMetricCollectorTask extends AbstractClusterPhyDispatchTask {
private static final ILog log = LogFactory.getLog(BrokerMetricCollectorTask.class);
@Autowired
private BrokerMetricCollector brokerMetricCollector;
@Autowired
private TaskThreadPoolService taskThreadPoolService;
@Override
public TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception {
taskThreadPoolService.submitHeavenTask(
String.format("TaskName=%s clusterPhyId=%d", this.taskName, clusterPhy.getId()),
100000,
() -> brokerMetricCollector.collectMetrics(clusterPhy)
);
return TaskResult.SUCCESS;
}
}

View File

@@ -0,0 +1,42 @@
package com.xiaojukeji.know.streaming.km.task.metrics;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.collector.metric.ClusterMetricCollector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask;
import com.xiaojukeji.know.streaming.km.task.service.TaskThreadPoolService;
import org.springframework.beans.factory.annotation.Autowired;
/**
* @author didi
*/
@Task(name = "ClusterMetricCollectorTask",
description = "Cluster指标采集任务,",
cron = "0 0/1 * * * ? *",
autoRegister = true,
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class ClusterMetricCollectorTask extends AbstractClusterPhyDispatchTask {
private static final ILog log = LogFactory.getLog(ClusterMetricCollectorTask.class);
@Autowired
private ClusterMetricCollector clusterMetricCollector;
@Autowired
private TaskThreadPoolService taskThreadPoolService;
@Override
public TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception {
taskThreadPoolService.submitHeavenTask(
String.format("TaskName=%s clusterPhyId=%d", this.taskName, clusterPhy.getId()),
100000,
() -> clusterMetricCollector.collectMetrics(clusterPhy)
);
return TaskResult.SUCCESS;
}
}

View File

@@ -0,0 +1,42 @@
package com.xiaojukeji.know.streaming.km.task.metrics;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.collector.metric.GroupMetricCollector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask;
import com.xiaojukeji.know.streaming.km.task.service.TaskThreadPoolService;
import org.springframework.beans.factory.annotation.Autowired;
/**
* @author didi
*/
@Task(name = "GroupMetricCollectorTask",
description = "Group指标采集任务,",
cron = "0 0/1 * * * ? *",
autoRegister = true,
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class GroupMetricCollectorTask extends AbstractClusterPhyDispatchTask {
private static final ILog log = LogFactory.getLog(GroupMetricCollectorTask.class);
@Autowired
private GroupMetricCollector groupMetricCollector;
@Autowired
private TaskThreadPoolService taskThreadPoolService;
@Override
public TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception {
taskThreadPoolService.submitHeavenTask(
String.format("TaskName=%s clusterPhyId=%d", this.taskName, clusterPhy.getId()),
100000,
() -> groupMetricCollector.collectMetrics(clusterPhy)
);
return TaskResult.SUCCESS;
}
}

View File

@@ -0,0 +1,39 @@
package com.xiaojukeji.know.streaming.km.task.metrics;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.xiaojukeji.know.streaming.km.collector.metric.PartitionMetricCollector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask;
import com.xiaojukeji.know.streaming.km.task.service.TaskThreadPoolService;
import org.springframework.beans.factory.annotation.Autowired;
/**
* @author didi
*/
@Task(name = "PartitionMetricCollectorTask",
description = "Partition指标采集任务,",
cron = "0 0/1 * * * ? *",
autoRegister = true,
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class PartitionMetricCollectorTask extends AbstractClusterPhyDispatchTask {
@Autowired
private PartitionMetricCollector partitionMetricCollector;
@Autowired
private TaskThreadPoolService taskThreadPoolService;
@Override
public TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception {
taskThreadPoolService.submitHeavenTask(
String.format("TaskName=%s clusterPhyId=%d", this.taskName, clusterPhy.getId()),
100000,
() -> partitionMetricCollector.collectMetrics(clusterPhy)
);
return TaskResult.SUCCESS;
}
}

View File

@@ -0,0 +1,41 @@
package com.xiaojukeji.know.streaming.km.task.metrics;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.xiaojukeji.know.streaming.km.collector.metric.ReplicaMetricCollector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask;
import com.xiaojukeji.know.streaming.km.task.service.TaskThreadPoolService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
/**
* @author didi
*/
@Slf4j
@Task(name = "ReplicaMetricCollectorTask",
description = "Replica指标采集任务,",
cron = "0 0/1 * * * ? *",
autoRegister = true,
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class ReplicaMetricCollectorTask extends AbstractClusterPhyDispatchTask {
@Autowired
private ReplicaMetricCollector replicaMetricCollector;
@Autowired
private TaskThreadPoolService taskThreadPoolService;
@Override
protected TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception {
taskThreadPoolService.submitHeavenTask(
String.format("TaskName=%s clusterPhyId=%d", this.taskName, clusterPhy.getId()),
100000,
() -> replicaMetricCollector.collectMetrics(clusterPhy)
);
return TaskResult.SUCCESS;
}
}

View File

@@ -0,0 +1,42 @@
package com.xiaojukeji.know.streaming.km.task.metrics;
import com.didiglobal.logi.job.annotation.Task;
import com.didiglobal.logi.job.common.TaskResult;
import com.didiglobal.logi.job.core.consensual.ConsensualEnum;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.collector.metric.TopicMetricCollector;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.task.AbstractClusterPhyDispatchTask;
import com.xiaojukeji.know.streaming.km.task.service.TaskThreadPoolService;
import org.springframework.beans.factory.annotation.Autowired;
/**
* @author didi
*/
@Task(name = "TopicMetricCollectorTask",
description = "Topic指标采集任务",
cron = "0 0/1 * * * ? *",
autoRegister = true,
consensual = ConsensualEnum.BROADCAST,
timeout = 2 * 60)
public class TopicMetricCollectorTask extends AbstractClusterPhyDispatchTask {
private static final ILog log = LogFactory.getLog(TopicMetricCollectorTask.class);
@Autowired
private TopicMetricCollector topicMetricCollector;
@Autowired
private TaskThreadPoolService taskThreadPoolService;
@Override
public TaskResult processSubTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) throws Exception {
taskThreadPoolService.submitHeavenTask(
String.format("TaskName=%s clusterPhyId=%d", this.taskName, clusterPhy.getId()),
100000,
() -> topicMetricCollector.collectMetrics(clusterPhy)
);
return TaskResult.SUCCESS;
}
}

View File

@@ -0,0 +1,38 @@
package com.xiaojukeji.know.streaming.km.task.service;
import com.xiaojukeji.know.streaming.km.common.utils.FutureNoWaitUtil;
import lombok.NoArgsConstructor;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import javax.annotation.PostConstruct;
/**
* 为了尽量避免大任务的执行由LogIJob的线程执行
* 因此在Task模块需要有自己的线程池来执行相关任务
* 而 FutureUtilsService 的职责就是负责任务的执行。
*/
@Service
@NoArgsConstructor
public class TaskThreadPoolService {
/**
* 较重任务,比如指标采集
*/
private FutureNoWaitUtil<Object> heavenTaskThreadPool;
@Value(value = "${thread-pool.task.heaven.thread-num:12}")
private Integer heavenTaskThreadNum;
@Value(value = "${thread-pool.task.heaven.queue-size:1000}")
private Integer heavenTaskQueueSize;
@PostConstruct
private void init() {
heavenTaskThreadPool = FutureNoWaitUtil.init("heavenTaskThreadPool", heavenTaskThreadNum, heavenTaskThreadNum, heavenTaskQueueSize);
}
public void submitHeavenTask(String taskName, Integer timeoutUnisMs, Runnable runnable) {
heavenTaskThreadPool.runnableTask(taskName, timeoutUnisMs, runnable);
}
}