增加rebalance / testing / license能力

This commit is contained in:
zengqiao
2023-02-23 11:56:46 +08:00
parent c27786a257
commit c56d8cfb0f
137 changed files with 10772 additions and 3 deletions

View File

@@ -176,7 +176,10 @@ public class MultiClusterPhyManagerImpl implements MultiClusterPhyManager {
// 获取所有的metrics
List<ClusterMetrics> metricsList = new ArrayList<>();
for (ClusterPhyDashboardVO vo: voList) {
metricsList.add(clusterMetricService.getLatestMetricsFromCache(vo.getId()));
ClusterMetrics clusterMetrics = clusterMetricService.getLatestMetricsFromCache(vo.getId());
clusterMetrics.getMetrics().putIfAbsent(ClusterMetricVersionItems.CLUSTER_METRIC_HEALTH_STATE, (float) HealthStateEnum.UNKNOWN.getDimension());
metricsList.add(clusterMetrics);
}
// 范围搜索

View File

@@ -22,6 +22,12 @@
</properties>
<dependencies>
<dependency>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>km-rebalance</artifactId>
<version>${project.parent.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-web</artifactId>

View File

@@ -0,0 +1,26 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import javax.validation.constraints.NotBlank;
import javax.validation.constraints.NotNull;
@Data
@EnterpriseLoadReBalance
public class ClusterBalanceIntervalDTO {
@NotBlank(message = "clusterBalanceIntervalDTO.type不允许为空")
@ApiModelProperty("均衡维度:cpu,disk,bytesIn,bytesOut")
private String type;
@NotNull(message = "clusterBalanceIntervalDTO.intervalPercent不允许为空")
@ApiModelProperty("平衡区间百分比")
private Double intervalPercent;
@NotNull(message = "clusterBalanceIntervalDTO.priority不允许为空")
@ApiModelProperty("优先级")
private Integer priority;
}

View File

@@ -0,0 +1,20 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import java.util.Map;
@Data
@EnterpriseLoadReBalance
public class ClusterBalanceOverviewDTO extends PaginationBaseDTO {
@ApiModelProperty("host")
private String host;
@ApiModelProperty("key:disk,bytesOut,bytesIn value:均衡状态 0已均衡2未均衡")
private Map<String, Integer> stateParam;
}

View File

@@ -0,0 +1,43 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.bean.dto.BaseDTO;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import java.util.List;
/**
* @author zengqiao
* @date 22/02/24
*/
@Data
@EnterpriseLoadReBalance
public class ClusterBalancePreviewDTO extends BaseDTO {
@ApiModelProperty("集群id")
private Long clusterId;
@ApiModelProperty("均衡节点")
private List<Integer> brokers;
@ApiModelProperty("topic黑名单")
private List<String> topicBlackList;
@ApiModelProperty("均衡区间详情")
private List<ClusterBalanceIntervalDTO> clusterBalanceIntervalList;
@ApiModelProperty("指标计算周期,单位分钟")
private Integer metricCalculationPeriod;
@ApiModelProperty("任务并行数")
private Integer parallelNum;
@ApiModelProperty("执行策略, 1优先最大副本2优先最小副本")
private Integer executionStrategy;
@ApiModelProperty("限流值")
private Long throttleUnitB;
}

View File

@@ -0,0 +1,66 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.bean.dto.BaseDTO;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import javax.validation.constraints.Min;
import javax.validation.constraints.NotBlank;
import javax.validation.constraints.NotNull;
import java.util.List;
/**
* @author zengqiao
* @date 22/02/24
*/
@Data
@EnterpriseLoadReBalance
public class ClusterBalanceStrategyDTO extends BaseDTO {
@ApiModelProperty("是否是周期性任务")
private boolean scheduleJob;
@NotBlank(message = "scheduleCron不允许为空")
@ApiModelProperty("如果是周期任务那么任务的周期cron表达式")
private String scheduleCron;
@NotNull(message = "status不允许为空")
@ApiModelProperty("周期任务状态0:不开启1开启")
private Integer status;
@NotNull(message = "clusterId不允许为空")
@ApiModelProperty("集群id")
private Long clusterId;
@ApiModelProperty("均衡节点")
private List<Integer> brokers;
@ApiModelProperty("topic黑名单")
private List<String> topicBlackList;
@NotNull(message = "clusterBalanceIntervalDTO不允许为空")
@ApiModelProperty("均衡区间详情")
private List<ClusterBalanceIntervalDTO> clusterBalanceIntervalList;
@NotNull(message = "metricCalculationPeriod不允许为空")
@ApiModelProperty("指标计算周期,单位秒")
private Integer metricCalculationPeriod;
@NotNull(message = "parallelNum不允许为空")
@ApiModelProperty("任务并行数0代表不限")
private Integer parallelNum;
@NotNull(message = "executionStrategy不允许为空")
@ApiModelProperty("执行策略, 1优先最大副本2优先最小副本")
private Integer executionStrategy;
@Min(value = 1, message = "throttleUnitB不允许小于1")
@ApiModelProperty("限流值")
private Long throttleUnitB;
@ApiModelProperty("备注说明")
private String description;
}

View File

@@ -0,0 +1,27 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@NoArgsConstructor
@AllArgsConstructor
@EnterpriseLoadReBalance
public class ClusterBalanceInterval {
/**
* 均衡维度:cpu,disk,bytesIn,bytesOut
*/
private String type;
/**
* 平衡区间百分比
*/
private Double intervalPercent;
/**
* 优先级
*/
private Integer priority;
}

View File

@@ -0,0 +1,41 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.rebalance.model.Resource;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.Map;
@Data
@NoArgsConstructor
@AllArgsConstructor
@EnterpriseLoadReBalance
public class ClusterBalanceItemState {
/**
* 是否配置集群平衡:true:已配置false:未配置
*/
private Boolean configureBalance;
/**
* 是否开启均衡:true:开启false: 未开启
*/
private Boolean enable;
/**
* 子项是否均衡:key: disk,bytesIn,bytesOut,cpu ; value:true:已均衡false:未均衡
* @see com.xiaojukeji.know.streaming.km.rebalance.model.Resource
*/
private Map<String, Boolean> itemState;
public Integer getResItemState(Resource res) {
if (itemState == null || !itemState.containsKey(res.resource())) {
return Constant.INVALID_CODE;
}
return itemState.get(res.resource()) ? Constant.YES: Constant.NO;
}
}

View File

@@ -0,0 +1,91 @@
/*
* Copyright (c) 2015, WINIT and/or its affiliates. All rights reserved. Use, Copy is subject to authorized license.
*/
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.bean.entity.BaseEntity;
import lombok.Data;
/**
* 集群均衡任务 实体类
*
* @author fengqiongfeng
* @date 2022-05-23
*/
@Data
@EnterpriseLoadReBalance
public class ClusterBalanceJobConfig extends BaseEntity {
/**
* 序列化版本号
*/
private static final long serialVersionUID=1L;
/**
* 集群id
*/
private Long clusterId;
/**
* 均衡节点
*/
private String brokers;
/**
* topic黑名单
*/
private String topicBlackList;
/**
* 1:立即均衡2周期均衡
*/
private Integer type;
/**
* 任务周期
*/
private String taskCron;
/**
* 均衡区间详情
*/
private String balanceIntervalJson;
/**
* 指标计算周期,单位分钟
*/
private Integer metricCalculationPeriod;
/**
* 迁移脚本
*/
private String reassignmentJson;
/**
* 任务并行数
*/
private Integer parallelNum;
/**
* 执行策略, 1优先最大副本2优先最小副本
*/
private Integer executionStrategy;
/**
* 限流值
*/
private Long throttleUnitByte;
/**
* 操作人
*/
private String creator;
/**
* 任务状态 0未开启1开启
*/
private Integer status;
}

View File

@@ -0,0 +1,64 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import lombok.Data;
import java.util.Date;
/**
* @author zengqiao
* @date 22/05/06
*/
@Data
@EnterpriseLoadReBalance
public class ClusterBalanceReassign {
/**
* jobID
*/
private Long jobId;
/**
* 集群id
*/
private Long clusterId;
/**
* Topic名称
*/
private String topicName;
/**
* 分区ID
*/
private Integer partitionId;
/**
* 源BrokerId列表
*/
private String originalBrokerIds;
/**
* 目标BrokerId列表
*/
private String reassignBrokerIds;
/**
* 任务开始时间
*/
private Date startTime;
/**
* 任务完成时间
*/
private Date finishedTime;
/**
* 扩展数据
*/
private String extendData;
/**
* 任务状态
*/
private Integer status;
}

View File

@@ -0,0 +1,36 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.detail.ClusterBalanceDetailDataGroupByTopic;
import lombok.Data;
import java.util.Date;
import java.util.List;
/**
* @author zengqiao
* @date 22/05/06
*/
@Data
@EnterpriseLoadReBalance
public class ClusterBalanceReassignDetail {
/**
* 限流值
*/
private Long throttleUnitB;
/**
* 开始时间
*/
private Date startTime;
/**
* 完成时间
*/
private Date finishedTime;
/**
* 详细信息
*/
private List<ClusterBalanceDetailDataGroupByTopic> reassignTopicDetailsList;
}

View File

@@ -0,0 +1,47 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import lombok.Data;
/**
* @author zengqiao
* @date 22/05/06
*/
@Data
@EnterpriseLoadReBalance
public class ClusterBalanceReassignExtendData {
/**
* 原本保存时间
*/
private Long originalRetentionTimeUnitMs;
/**
* 迁移时保存时间
*/
private Long reassignRetentionTimeUnitMs;
/**
* 需迁移LogSize
*/
private Long needReassignLogSizeUnitB;
/**
* 已完成迁移LogSize
*/
private Long finishedReassignLogSizeUnitB;
/**
* 预计剩余时长
*/
private Long remainTimeUnitMs;
/**
* 当前副本数
*/
private Integer originReplicaNum;
/**
* 新的副本数
*/
private Integer reassignReplicaNum;
}

View File

@@ -0,0 +1,43 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.content;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.bean.entity.job.content.BaseJobCreateContent;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto.ClusterBalanceIntervalDTO;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import javax.validation.constraints.Min;
import java.util.List;
@Data
@EnterpriseLoadReBalance
public class JobClusterBalanceContent extends BaseJobCreateContent {
@Min(value = 1, message = "clusterId不允许为null或者小于0")
@ApiModelProperty(value = "集群ID, 默认为逻辑集群ID", example = "6")
private Long clusterId;
@Min(value = 1, message = "throttle不允许为null或者小于0")
@ApiModelProperty(value = "限流值", example = "102400000")
private Long throttleUnitB;
@ApiModelProperty("topic黑名单")
private List<String> topicBlackList;
@ApiModelProperty("均衡区间详情")
private List<ClusterBalanceIntervalDTO> clusterBalanceIntervalList;
@ApiModelProperty("指标计算周期,单位分钟")
private Integer metricCalculationPeriod;
@ApiModelProperty("任务并行数")
private Integer parallelNum;
@ApiModelProperty("执行策略, 1优先最大副本2优先最小副本")
private Integer executionStrategy;
@ApiModelProperty("备注说明")
private String description;
@ApiModelProperty("是否是周期性任务")
private boolean scheduleJob;
}

View File

@@ -0,0 +1,79 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.detail;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import lombok.Data;
import java.util.List;
/**
* @author zengqiao
* @date 22/05/06
*/
@Data
@EnterpriseLoadReBalance
public abstract class AbstractClusterBalanceDetailData {
/**
* 物流集群ID
*/
private Long clusterPhyId;
/**
* Topic名称
*/
private String topicName;
/**
* 源Broker列表
*/
private List<Integer> originalBrokerIdList;
/**
* 目标Broker列表
*/
private List<Integer> reassignBrokerIdList;
/**
* 需迁移LogSize
*/
private Long needReassignLogSizeUnitB;
/**
* 已完成迁移LogSize
*/
private Long finishedReassignLogSizeUnitB;
/**
* 预计剩余时长
*/
private Long remainTimeUnitMs;
/**
* 当前副本数
*/
private Integer presentReplicaNum;
/**
* 新的副本数
*/
private Integer oldReplicaNum;
/**
* 新的副本数
*/
private Integer newReplicaNum;
/**
* 原本保存时间
*/
private Long originalRetentionTimeUnitMs;
/**
* 迁移时保存时间
*/
private Long reassignRetentionTimeUnitMs;
/**
* 状态
*/
private Integer status;
}

View File

@@ -0,0 +1,17 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.detail;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import lombok.Data;
/**
* @author zengqiao
* @date 22/05/06
*/
@Data
@EnterpriseLoadReBalance
public class ClusterBalanceDetailDataGroupByPartition extends AbstractClusterBalanceDetailData {
/**
* 分区ID
*/
private Integer partitionId;
}

View File

@@ -0,0 +1,22 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.detail;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import lombok.Data;
import java.util.List;
/**
* @author zengqiao
* @date 22/05/06
*/
@Data
@EnterpriseLoadReBalance
public class ClusterBalanceDetailDataGroupByTopic extends AbstractClusterBalanceDetailData {
/**
* 分区ID列表
*/
private List<Integer> partitionIdList;
private List<ClusterBalanceDetailDataGroupByPartition> reassignPartitionDetailsList;
}

View File

@@ -0,0 +1,76 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.detail;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import java.io.Serializable;
/**
* 集群Topic信息
* @author zengqiao
* @date 22/02/23
*/
@Data
@EnterpriseLoadReBalance
@ApiModel(description = "集群均衡详细信息")
public class ClusterBalancePlanDetail implements Serializable {
@ApiModelProperty(value = "是否均衡1已均衡2未均衡")
private Integer status;
@ApiModelProperty(value = "brokerId")
private Integer brokerId;
@ApiModelProperty(value = "broker host")
private String host;
@ApiModelProperty(value = "均衡前 cpu")
private Double cpuBefore;
@ApiModelProperty(value = "均衡前 disk")
private Double diskBefore;
@ApiModelProperty(value = "均衡前 byteIn")
private Double byteInBefore;
@ApiModelProperty(value = "均衡前 byteOut")
private Double byteOutBefore;
@ApiModelProperty(value = "均衡后 cpu")
private Double cpuAfter;
@ApiModelProperty(value = "是否均衡1已均衡2未均衡")
private Integer cpuStatus;
@ApiModelProperty(value = "均衡后 disk")
private Double diskAfter;
@ApiModelProperty(value = "是否均衡1已均衡2未均衡")
private Integer diskStatus;
@ApiModelProperty(value = "均衡后 byteIn")
private Double byteInAfter;
@ApiModelProperty(value = "是否均衡1已均衡2未均衡")
private Integer byteInStatus;
@ApiModelProperty(value = "均衡后 byteOut")
private Double byteOutAfter;
@ApiModelProperty(value = "是否均衡1已均衡2未均衡")
private Integer byteOutStatus;
@ApiModelProperty(value = "均衡流入大小")
private Double inSize;
@ApiModelProperty(value = "均衡流入副本个数")
private Double inReplica;
@ApiModelProperty(value = "均衡流出大小")
private Double outSize;
@ApiModelProperty(value = "均衡流出副本个数")
private Double outReplica;
}

View File

@@ -0,0 +1,85 @@
/*
* Copyright (c) 2015, WINIT and/or its affiliates. All rights reserved. Use, Copy is subject to authorized license.
*/
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po;
import com.baomidou.mybatisplus.annotation.TableName;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.bean.po.BasePO;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* 集群均衡任务 实体类
*
* @author fengqiongfeng
* @date 2022-05-23
*/
@Data
@EnterpriseLoadReBalance
@NoArgsConstructor
@TableName(Constant.MYSQL_TABLE_NAME_PREFIX + "cluster_balance_job_config")
public class ClusterBalanceJobConfigPO extends BasePO {
/**
* 序列化版本号
*/
private static final long serialVersionUID=1L;
/**
* 集群id
*/
private Long clusterId;
/**
* topic黑名单
*/
private String topicBlackList;
/**
* 任务周期
*/
private String taskCron;
/**
* 均衡区间详情
*/
private String balanceIntervalJson;
/**
* 指标计算周期,单位分钟
*/
private Integer metricCalculationPeriod;
/**
* 迁移脚本
*/
private String reassignmentJson;
/**
* 任务并行数
*/
private Integer parallelNum;
/**
* 执行策略, 1优先最大副本2优先最小副本
*/
private Integer executionStrategy;
/**
* 限流值
*/
private Long throttleUnitB;
/**
* 操作人
*/
private String creator;
/**
* 任务状态 0未开启1开启
*/
private Integer status;
}

View File

@@ -0,0 +1,125 @@
/*
* Copyright (c) 2015, WINIT and/or its affiliates. All rights reserved. Use, Copy is subject to authorized license.
*/
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po;
import com.baomidou.mybatisplus.annotation.TableName;
import com.xiaojukeji.know.streaming.km.common.bean.po.BasePO;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.Date;
/**
* 集群均衡任务 实体类
*
* @author fengqiongfeng
* @date 2022-05-23
*/
@Data
@NoArgsConstructor
@TableName(Constant.MYSQL_TABLE_NAME_PREFIX + "cluster_balance_job")
public class ClusterBalanceJobPO extends BasePO {
/**
* 序列化版本号
*/
private static final long serialVersionUID=1L;
/**
* 集群id
*/
private Long clusterId;
/**
* 均衡节点
*/
private String brokers;
/**
* topic黑名单
*/
private String topicBlackList;
/**
* 1:立即均衡2周期均衡
*/
private Integer type;
/**
* 均衡区间详情
*/
private String balanceIntervalJson;
/**
* 指标计算周期,单位分钟
*/
private Integer metricCalculationPeriod;
/**
* 迁移脚本
*/
private String reassignmentJson;
/**
* 任务并行数
*/
private Integer parallelNum;
/**
* 执行策略, 1优先最大副本2优先最小副本
*/
private Integer executionStrategy;
/**
* 限流值
*/
private Long throttleUnitB;
/**
* 总迁移大小
*/
private Double totalReassignSize;
/**
* 总迁移副本数
*/
private Integer totalReassignReplicaNum;
/**
* 移入topic
*/
private String moveInTopicList;
/**
* 节点均衡详情
*/
private String brokerBalanceDetail;
/**
* 任务状态 1进行中2准备3成功4失败5取消
*/
private Integer status;
/**
* 操作人
*/
private String creator;
/**
* 任务开始时间
*/
private Date startTime;
/**
* 任务完成时间
*/
private Date finishedTime;
/**
* 备注说明
*/
private String description;
}

View File

@@ -0,0 +1,80 @@
/*
* Copyright (c) 2015, WINIT and/or its affiliates. All rights reserved. Use, Copy is subject to authorized license.
*/
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po;
import com.baomidou.mybatisplus.annotation.TableName;
import com.xiaojukeji.know.streaming.km.common.bean.po.BasePO;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.Date;
/**
* 集群平衡迁移详情 实体类
*
* @author fengqiongfeng
* @date 2022-05-23
*/
@Data
@NoArgsConstructor
@TableName(Constant.MYSQL_TABLE_NAME_PREFIX + "cluster_balance_reassign")
public class ClusterBalanceReassignPO extends BasePO {
/**
* 序列化版本号
*/
private static final long serialVersionUID=1L;
/**
* jobID
*/
private Long jobId;
/**
* 集群id
*/
private Long clusterId;
/**
* Topic名称
*/
private String topicName;
/**
* 分区ID
*/
private Integer partitionId;
/**
* 源BrokerId列表
*/
private String originalBrokerIds;
/**
* 目标BrokerId列表
*/
private String reassignBrokerIds;
/**
* 任务开始时间
*/
private Date startTime;
/**
* 任务完成时间
*/
private Date finishedTime;
/**
* 扩展数据
*/
private String extendData;
/**
* 任务状态
*/
private Integer status;
}

View File

@@ -0,0 +1,24 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import java.io.Serializable;
/**
* 集群Topic信息
* @author zengqiao
* @date 22/02/23
*/
@Data
@EnterpriseLoadReBalance
@ApiModel(description = "集群均衡历史信息")
public class ClusterBalanceHistorySubVO implements Serializable {
@ApiModelProperty(value = "均衡成功节点数")
private Long successNu;
@ApiModelProperty(value = "未均衡成功节点数")
private Long failedNu;
}

View File

@@ -0,0 +1,34 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import java.io.Serializable;
import java.util.Date;
import java.util.Map;
/**
* 集群Topic信息
* @author zengqiao
* @date 22/02/23
*/
@Data
@EnterpriseLoadReBalance
@ApiModel(description = "集群均衡历史信息")
public class ClusterBalanceHistoryVO implements Serializable {
@ApiModelProperty(value = "均衡开始执行时间")
private Date begin;
@ApiModelProperty(value = "均衡执行结束时间")
private Date end;
@ApiModelProperty(value = "均衡任务id")
private Long jobId;
@ApiModelProperty(value = "子项均衡历史信息", example = "cpu、disk")
private Map<String, ClusterBalanceHistorySubVO> sub;
}

View File

@@ -0,0 +1,20 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
@Data
@EnterpriseLoadReBalance
public class ClusterBalanceIntervalVO {
@ApiModelProperty("均衡维度:cpu,disk,bytesIn,bytesOut")
private String type;
@ApiModelProperty("平衡区间百分比")
private Double intervalPercent;
@ApiModelProperty("优先级")
private Integer priority;
}

View File

@@ -0,0 +1,55 @@
/*
* Copyright (c) 2015, WINIT and/or its affiliates. All rights reserved. Use, Copy is subject to authorized license.
*/
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.ClusterBalanceInterval;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import java.util.List;
/**
* 集群均衡任务 实体类
*
* @author fengqiongfeng
* @date 2022-05-23
*/
@Data
@EnterpriseLoadReBalance
public class ClusterBalanceJobConfigVO {
/**
* 序列化版本号
*/
private static final long serialVersionUID=1L;
@ApiModelProperty("集群id")
private Long clusterId;
@ApiModelProperty("topic黑名单")
private List<String> topicBlackList;
@ApiModelProperty("任务周期")
private String scheduleCron;
@ApiModelProperty("均衡区间详情")
private List<ClusterBalanceInterval> clusterBalanceIntervalList;
@ApiModelProperty("指标计算周期,单位分钟")
private Integer metricCalculationPeriod;
@ApiModelProperty("任务并行数")
private Integer parallelNum;
@ApiModelProperty("执行策略, 1优先最大副本2优先最小副本")
private Integer executionStrategy;
@ApiModelProperty("限流值")
private Long throttleUnitB;
@ApiModelProperty("任务状态 0未开启1开启")
private Integer status;
}

View File

@@ -0,0 +1,32 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.io.Serializable;
/**
* 集群Topic信息
* @author zengqiao
* @date 22/02/23
*/
@Data
@AllArgsConstructor
@NoArgsConstructor
@EnterpriseLoadReBalance
@ApiModel(description = "集群均衡列表信息")
public class ClusterBalanceOverviewSubVO implements Serializable {
@ApiModelProperty(value = "平均值", example = "cpu的平均值43.4")
private Double avg;
@ApiModelProperty(value = "规格", example = "1000")
private Double spec;
@ApiModelProperty(value = "均衡状态", example = "0:已均衡,-1:低于均衡值1高于均衡值")
private Integer status ;
}

View File

@@ -0,0 +1,37 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import java.io.Serializable;
import java.util.Map;
/**
* 集群Topic信息
* @author zengqiao
* @date 22/02/23
*/
@Data
@EnterpriseLoadReBalance
@ApiModel(description = "集群均衡列表信息")
public class ClusterBalanceOverviewVO implements Serializable {
@ApiModelProperty(value = "brokerId", example = "123")
private Integer brokerId;
@ApiModelProperty(value = "broker host")
private String host;
@ApiModelProperty(value = "broker 对应的 rack")
private String rack;
@ApiModelProperty(value = "leader")
private Integer leader;
@ApiModelProperty(value = "replicas")
private Integer replicas;
@ApiModelProperty(value = "子项统计详细信息", example = "cpu、disk")
private Map<String, ClusterBalanceOverviewSubVO> sub;
}

View File

@@ -0,0 +1,64 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import java.io.Serializable;
/**
* 集群Topic信息
* @author zengqiao
* @date 22/02/23
*/
@Data
@EnterpriseLoadReBalance
@ApiModel(description = "集群均衡历史信息")
public class ClusterBalancePlanDetailVO implements Serializable {
@ApiModelProperty(value = "是否均衡0已均衡2未均衡")
private Integer status;
@ApiModelProperty(value = "brokerId")
private Integer brokerId;
@ApiModelProperty(value = "broker host")
private String host;
@ApiModelProperty(value = "均衡前 cpu")
private Double cpuBefore;
@ApiModelProperty(value = "均衡前 disk")
private Double diskBefore;
@ApiModelProperty(value = "均衡前 byteIn")
private Double byteInBefore;
@ApiModelProperty(value = "均衡前 byteOut")
private Double byteOutBefore;
@ApiModelProperty(value = "均衡后 cpu")
private Double cpuAfter;
@ApiModelProperty(value = "均衡后 disk")
private Double diskAfter;
@ApiModelProperty(value = "均衡后 byteIn")
private Double byteInAfter;
@ApiModelProperty(value = "均衡后 byteOut")
private Double byteOutAfter;
@ApiModelProperty(value = "均衡流入大小")
private Double inSize;
@ApiModelProperty(value = "均衡流入副本个数")
private Double inReplica;
@ApiModelProperty(value = "均衡流出大小")
private Double outSize;
@ApiModelProperty(value = "均衡流出副本个数")
private Double outReplica;
}

View File

@@ -0,0 +1,49 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import java.io.Serializable;
import java.util.List;
/**
* 集群Topic信息
* @author zengqiao
* @date 22/02/23
*/
@Data
@EnterpriseLoadReBalance
@ApiModel(description = "集群均衡信息")
public class ClusterBalancePlanVO implements Serializable {
@ApiModelProperty(value = "均衡计划类型1立即均衡2周期均衡")
private Integer type;
@ApiModelProperty(value = "均衡执行的节点范围")
private List<String> brokers;
@ApiModelProperty(value = "均衡执行的Topic黑名单")
private List<String> blackTopics;
@ApiModelProperty(value = "均衡执行移入的Topic名单")
private List<String> topics;
@ApiModelProperty(value = "均衡总迁移的磁盘大小单位byte")
private Double moveSize;
@ApiModelProperty(value = "均衡总迁移的副本个数")
private Integer replicas;
@ApiModelProperty(value = "均衡阈值")
private String threshold;
@ApiModelProperty(value = "reassignment json")
private String reassignmentJson;
@ApiModelProperty(value = "均衡区间信息")
private List<ClusterBalanceIntervalVO> clusterBalanceIntervalList;
@ApiModelProperty(value = "均衡计划明细")
private List<ClusterBalancePlanDetailVO> detail;
}

View File

@@ -0,0 +1,32 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@EnterpriseLoadReBalance
@AllArgsConstructor
@NoArgsConstructor
@ApiModel(description = "集群均衡状态子项的详细统计信息")
public class ClusterBalanceStateSubVO {
@ApiModelProperty(value = "平均值", example = "cpu的平均值43.4")
private Double avg;
@ApiModelProperty(value = "周期均衡时的均衡区间", example = "cpu的均衡值")
private Double interval;
@ApiModelProperty(value = "处于周期均衡时的均衡区间的最小值以下的broker个数", example = "4")
private Long smallNu;
@ApiModelProperty(value = "处于周期均衡时的均衡区间的broker个数", example = "4")
private Long betweenNu;
@ApiModelProperty(value = "处于周期均衡时的均衡区间的最大值以上的broker个数", example = "4")
private Long bigNu;
}

View File

@@ -0,0 +1,32 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import java.io.Serializable;
import java.util.Date;
import java.util.Map;
/**
* 集群Topic信息
* @author zengqiao
* @date 22/02/23
*/
@Data
@EnterpriseLoadReBalance
@ApiModel(description = "集群均衡状态信息")
public class ClusterBalanceStateVO implements Serializable {
@ApiModelProperty(value = "均衡状态", example = "0:已均衡2:未均衡")
private Integer status;
@ApiModelProperty(value = "是否开启均衡", example = "true:开启false:未开启")
private Boolean enable;
@ApiModelProperty(value = "下次均衡开始时间")
private Date next;
@ApiModelProperty(value = "子项统计详细信息", example = "cpu、disk")
private Map<String, ClusterBalanceStateSubVO> sub;
}

View File

@@ -0,0 +1,476 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.converter;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto.ClusterBalanceIntervalDTO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto.ClusterBalancePreviewDTO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto.ClusterBalanceStrategyDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.BrokerSpec;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.ClusterBalanceInterval;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.ClusterBalanceReassignExtendData;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.detail.ClusterBalancePlanDetail;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.content.JobClusterBalanceContent;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceJobConfigPO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceJobPO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceReassignPO;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo.*;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.enums.ClusterBalanceStateEnum;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.enums.ClusterBalanceTypeEnum;
import com.xiaojukeji.know.streaming.km.common.enums.job.JobStatusEnum;
import com.xiaojukeji.know.streaming.km.common.enums.job.JobTypeEnum;
import com.xiaojukeji.know.streaming.km.common.utils.CommonUtils;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.*;
import com.xiaojukeji.know.streaming.km.rebalance.model.Resource;
import org.apache.kafka.clients.CommonClientConfigs;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.constant.ESIndexConstant.PARTITION_INDEX;
@EnterpriseLoadReBalance
public class ClusterBalanceConverter {
private ClusterBalanceConverter() {
}
public static BalanceParameter convert2BalanceParameter(ClusterBalanceJobConfigPO configPO, Map<Integer, Broker> brokerMap, Map<Integer, BrokerSpec> brokerSpecMap, ClusterPhy clusterPhy, String esUrl, List<String> topicNames) {
BalanceParameter balanceParameter = new BalanceParameter();
List<ClusterBalanceIntervalDTO> clusterBalanceIntervalDTOS = ConvertUtil.str2ObjArrayByJson(configPO.getBalanceIntervalJson(), ClusterBalanceIntervalDTO.class);
List<String> goals = new ArrayList<>();
for(ClusterBalanceIntervalDTO clusterBalanceIntervalDTO : clusterBalanceIntervalDTOS){
if (Resource.DISK.resource().equals(clusterBalanceIntervalDTO.getType())){
balanceParameter.setDiskThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
goals.add(BalanceGoal.DISK.goal());
}else if (Resource.CPU.resource().equals(clusterBalanceIntervalDTO.getType())){
balanceParameter.setCpuThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
// todo cpu底层暂未实现先不加goal
}else if (Resource.NW_IN.resource().equals(clusterBalanceIntervalDTO.getType())){
balanceParameter.setNetworkInThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
goals.add(BalanceGoal.NW_IN.goal());
}else if (Resource.NW_OUT.resource().equals(clusterBalanceIntervalDTO.getType())){
balanceParameter.setNetworkOutThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
goals.add(BalanceGoal.NW_OUT.goal());
}
}
balanceParameter.setGoals(goals);
balanceParameter.setCluster(clusterPhy.getId().toString());
balanceParameter.setExcludedTopics(configPO.getTopicBlackList());
balanceParameter.setEsIndexPrefix(PARTITION_INDEX + "_");
balanceParameter.setEsRestURL(esUrl);
balanceParameter.setBalanceBrokers(CommonUtils.intSet2String(brokerMap.keySet()));
balanceParameter.setHardwareEnv(convert2ListHostEnv(brokerMap, brokerSpecMap));
balanceParameter.setBeforeSeconds(configPO.getMetricCalculationPeriod());
balanceParameter.setIgnoredTopics(CommonUtils.strList2String(topicNames));
Properties kafkaConfig = new Properties();
kafkaConfig.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers());
kafkaConfig.putAll(ConvertUtil.str2ObjByJson(clusterPhy.getClientProperties(), Properties.class));
balanceParameter.setKafkaConfig(kafkaConfig);
return balanceParameter;
}
public static BalanceParameter convert2BalanceParameter(ClusterBalanceJobPO clusterBalanceJobPO, Map<Integer, Broker> brokerMap, Map<Integer, BrokerSpec> brokerSpecMap, ClusterPhy clusterPhy, String esUrl, List<String> topicNames) {
BalanceParameter balanceParameter = new BalanceParameter();
List<ClusterBalanceIntervalDTO> clusterBalanceIntervalDTOS = ConvertUtil.str2ObjArrayByJson(clusterBalanceJobPO.getBalanceIntervalJson(), ClusterBalanceIntervalDTO.class);
List<String> goals = new ArrayList<>();
for(ClusterBalanceIntervalDTO clusterBalanceIntervalDTO : clusterBalanceIntervalDTOS){
if (Resource.DISK.resource().equals(clusterBalanceIntervalDTO.getType())){
balanceParameter.setDiskThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
goals.add(BalanceGoal.DISK.goal());
}else if (Resource.CPU.resource().equals(clusterBalanceIntervalDTO.getType())){
balanceParameter.setCpuThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
// todo cpu底层暂未实现先不加goal
}else if (Resource.NW_IN.resource().equals(clusterBalanceIntervalDTO.getType())){
balanceParameter.setNetworkInThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
goals.add(BalanceGoal.NW_IN.goal());
}else if (Resource.NW_OUT.resource().equals(clusterBalanceIntervalDTO.getType())){
balanceParameter.setNetworkOutThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
goals.add(BalanceGoal.NW_OUT.goal());
}
}
balanceParameter.setGoals(goals);
balanceParameter.setCluster(clusterPhy.getId().toString());
balanceParameter.setExcludedTopics(clusterBalanceJobPO.getTopicBlackList());
balanceParameter.setEsIndexPrefix(PARTITION_INDEX + "_");
balanceParameter.setEsRestURL(esUrl);
balanceParameter.setBalanceBrokers(clusterBalanceJobPO.getBrokers());
balanceParameter.setHardwareEnv(convert2ListHostEnv(brokerMap, brokerSpecMap));
balanceParameter.setBeforeSeconds(clusterBalanceJobPO.getMetricCalculationPeriod());
balanceParameter.setIgnoredTopics(CommonUtils.strList2String(topicNames));
Properties kafkaConfig = new Properties();
kafkaConfig.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers());
kafkaConfig.putAll(ConvertUtil.str2ObjByJson(clusterPhy.getClientProperties(), Properties.class));
balanceParameter.setKafkaConfig(kafkaConfig);
return balanceParameter;
}
public static BalanceParameter convert2BalanceParameter(JobClusterBalanceContent dto, List<Broker> brokers, Map<Integer, BrokerSpec> brokerSpecMap, ClusterPhy clusterPhy, String esUrl, List<String> topicNames) {
BalanceParameter balanceParameter = new BalanceParameter();
List<ClusterBalanceIntervalDTO> clusterBalanceIntervalDTOS = dto.getClusterBalanceIntervalList().stream()
.sorted(Comparator.comparing(ClusterBalanceIntervalDTO::getPriority)).collect(Collectors.toList());
List<String> goals = new ArrayList<>();
for(ClusterBalanceIntervalDTO clusterBalanceIntervalDTO : clusterBalanceIntervalDTOS){
if (Resource.DISK.resource().equals(clusterBalanceIntervalDTO.getType())){
balanceParameter.setDiskThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
goals.add(BalanceGoal.DISK.goal());
}else if (Resource.CPU.resource().equals(clusterBalanceIntervalDTO.getType())){
balanceParameter.setCpuThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
// todo cpu底层暂未实现先不加goal
}else if (Resource.NW_IN.resource().equals(clusterBalanceIntervalDTO.getType())){
balanceParameter.setNetworkInThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
goals.add(BalanceGoal.NW_IN.goal());
}else if (Resource.NW_OUT.resource().equals(clusterBalanceIntervalDTO.getType())){
balanceParameter.setNetworkOutThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
goals.add(BalanceGoal.NW_OUT.goal());
}
}
Map<Integer, Broker> brokerMap = brokers.stream().collect(Collectors.toMap(Broker::getBrokerId, Function.identity()));
balanceParameter.setGoals(goals);
balanceParameter.setCluster(clusterPhy.getId().toString());
balanceParameter.setExcludedTopics(CommonUtils.strList2String(dto.getTopicBlackList()));
balanceParameter.setEsIndexPrefix(PARTITION_INDEX + "_");
balanceParameter.setEsRestURL(esUrl);
balanceParameter.setBalanceBrokers(CommonUtils.intSet2String(brokerMap.keySet()));
balanceParameter.setHardwareEnv(convert2ListHostEnv(brokerMap, brokerSpecMap));
balanceParameter.setBeforeSeconds(dto.getMetricCalculationPeriod());
balanceParameter.setIgnoredTopics(CommonUtils.strList2String(topicNames));
Properties kafkaConfig = new Properties();
kafkaConfig.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers());
kafkaConfig.putAll(ConvertUtil.str2ObjByJson(clusterPhy.getClientProperties(), Properties.class));
balanceParameter.setKafkaConfig(kafkaConfig);
return balanceParameter;
}
public static BalanceParameter convert2BalanceParameter(ClusterBalancePreviewDTO dto, Map<Integer, Broker> brokerMap, Map<Integer, BrokerSpec> brokerSpecMap, ClusterPhy clusterPhy, String esUrl, List<String> topicNames) {
BalanceParameter balanceParameter = new BalanceParameter();
List<ClusterBalanceIntervalDTO> clusterBalanceIntervalDTOS = dto.getClusterBalanceIntervalList().stream()
.sorted(Comparator.comparing(ClusterBalanceIntervalDTO::getPriority)).collect(Collectors.toList());
List<String> goals = new ArrayList<>();
for(ClusterBalanceIntervalDTO clusterBalanceIntervalDTO : clusterBalanceIntervalDTOS){
if (Resource.DISK.resource().equals(clusterBalanceIntervalDTO.getType())){
balanceParameter.setDiskThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
goals.add(BalanceGoal.DISK.goal());
}else if (Resource.CPU.resource().equals(clusterBalanceIntervalDTO.getType())){
balanceParameter.setCpuThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
// todo cpu底层暂未实现先不加goal
}else if (Resource.NW_IN.resource().equals(clusterBalanceIntervalDTO.getType())){
balanceParameter.setNetworkInThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
goals.add(BalanceGoal.NW_IN.goal());
}else if (Resource.NW_OUT.resource().equals(clusterBalanceIntervalDTO.getType())){
balanceParameter.setNetworkOutThreshold(clusterBalanceIntervalDTO.getIntervalPercent()/100);
goals.add(BalanceGoal.NW_OUT.goal());
}
}
balanceParameter.setGoals(goals);
balanceParameter.setCluster(clusterPhy.getId().toString());
balanceParameter.setExcludedTopics(CommonUtils.strList2String(dto.getTopicBlackList()));
balanceParameter.setEsIndexPrefix(PARTITION_INDEX + "_");
balanceParameter.setEsRestURL(esUrl);
balanceParameter.setBalanceBrokers(CommonUtils.intList2String(dto.getBrokers()));
balanceParameter.setHardwareEnv(convert2ListHostEnv(brokerMap, brokerSpecMap));
balanceParameter.setBeforeSeconds(dto.getMetricCalculationPeriod());
balanceParameter.setIgnoredTopics(CommonUtils.strList2String(topicNames));
Properties kafkaConfig = new Properties();
kafkaConfig.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers());
kafkaConfig.putAll(ConvertUtil.str2ObjByJson(clusterPhy.getClientProperties(), Properties.class));
balanceParameter.setKafkaConfig(kafkaConfig);
return balanceParameter;
}
public static ClusterBalanceJobPO convert2ClusterBalanceJobPO(Long jobId, JobClusterBalanceContent jobDTO, OptimizerResult optimizerResult, List<Broker> brokers, String operator, String json) {
if (ValidateUtils.anyNull(jobDTO, optimizerResult, optimizerResult.resultJsonOverview(),
optimizerResult.resultJsonDetailed(), optimizerResult.resultDetailed(), optimizerResult.resultJsonTask())){
return null;
}
ClusterBalanceJobPO clusterBalanceJobPO = new ClusterBalanceJobPO();
clusterBalanceJobPO.setId(jobId);
clusterBalanceJobPO.setType(jobDTO.isScheduleJob()?
ClusterBalanceTypeEnum.CYCLE.getType():ClusterBalanceTypeEnum.IMMEDIATELY.getType());
clusterBalanceJobPO.setStatus(JobStatusEnum.WAITING.getStatus());
clusterBalanceJobPO.setCreator(operator);
clusterBalanceJobPO.setParallelNum(jobDTO.getParallelNum());
clusterBalanceJobPO.setThrottleUnitB(jobDTO.getThrottleUnitB());
clusterBalanceJobPO.setDescription(jobDTO.getDescription());
clusterBalanceJobPO.setBrokers(CommonUtils.intList2String(brokers.stream().map(Broker::getBrokerId).collect(Collectors.toList())));
clusterBalanceJobPO.setClusterId(jobDTO.getClusterId());
clusterBalanceJobPO.setTopicBlackList(CommonUtils.strList2String(jobDTO.getTopicBlackList()));
clusterBalanceJobPO.setMoveInTopicList(optimizerResult.resultOverview().getMoveTopics());
clusterBalanceJobPO.setExecutionStrategy(jobDTO.getExecutionStrategy());
clusterBalanceJobPO.setBalanceIntervalJson(ConvertUtil.obj2Json(jobDTO.getClusterBalanceIntervalList()));
clusterBalanceJobPO.setBrokerBalanceDetail(ConvertUtil.obj2Json(convert2ClusterBalancePlanDetail(optimizerResult.resultDetailed())));
clusterBalanceJobPO.setMetricCalculationPeriod(jobDTO.getMetricCalculationPeriod());
clusterBalanceJobPO.setReassignmentJson(json);
clusterBalanceJobPO.setTotalReassignSize(optimizerResult.resultOverview().getTotalMoveSize());
clusterBalanceJobPO.setTotalReassignReplicaNum(optimizerResult.resultOverview().getMoveReplicas());
clusterBalanceJobPO.setDescription(optimizerResult.resultJsonBalanceActionHistory());
return clusterBalanceJobPO;
}
public static ClusterBalanceReassignPO convert2ClusterBalanceReassignPO(BalanceTask balanceTask, Topic topic, Long jobId, Long clusterId) {
ClusterBalanceReassignPO reassignPO = new ClusterBalanceReassignPO();
reassignPO.setClusterId(clusterId);
reassignPO.setJobId(jobId);
reassignPO.setPartitionId(balanceTask.getPartition());
reassignPO.setOriginalBrokerIds(CommonUtils.intList2String(topic.getPartitionMap().get(balanceTask.getPartition())));
reassignPO.setReassignBrokerIds(CommonUtils.intList2String(balanceTask.getReplicas()));
reassignPO.setTopicName(balanceTask.getTopic());
ClusterBalanceReassignExtendData extendData = new ClusterBalanceReassignExtendData();
extendData.setOriginalRetentionTimeUnitMs(topic.getRetentionMs());
extendData.setReassignRetentionTimeUnitMs(topic.getRetentionMs());
extendData.setOriginReplicaNum(topic.getReplicaNum());
extendData.setReassignReplicaNum(balanceTask.getReplicas().size());
reassignPO.setExtendData(ConvertUtil.obj2Json(extendData));
reassignPO.setStatus(JobStatusEnum.WAITING.getStatus());
return reassignPO;
}
public static List<ClusterBalanceReassignPO> convert2ListClusterBalanceReassignPO(List<BalanceTask> balanceTasks, Map<String, Topic> topicMap, Long jobId, Long clusterId) {
List<ClusterBalanceReassignPO> reassignPOs = new ArrayList<>();
//生成迁移详情
Map<String, List<BalanceTask>> balanceTaskMap = balanceTasks.stream().collect(Collectors.groupingBy(BalanceTask::getTopic));
for (Map.Entry<String, List<BalanceTask>> entry : balanceTaskMap.entrySet()){
Topic topic = topicMap.get(entry.getKey());
if (topic == null || topic.getPartitionMap() == null){
continue;
}
for (BalanceTask balanceTask : entry.getValue()){
reassignPOs.add(ClusterBalanceConverter.convert2ClusterBalanceReassignPO(balanceTask, topic, jobId, clusterId));
}
}
return reassignPOs;
}
public static ClusterBalanceJobConfigPO convert2ClusterBalanceJobConfigPO(ClusterBalanceStrategyDTO dto, String operator) {
ClusterBalanceJobConfigPO jobConfigPO = new ClusterBalanceJobConfigPO();
jobConfigPO.setCreator(operator);
jobConfigPO.setParallelNum(dto.getParallelNum());
jobConfigPO.setThrottleUnitB(dto.getThrottleUnitB());
jobConfigPO.setClusterId(dto.getClusterId());
jobConfigPO.setExecutionStrategy(dto.getExecutionStrategy());
jobConfigPO.setBalanceIntervalJson(ConvertUtil.obj2Json(dto.getClusterBalanceIntervalList()));
jobConfigPO.setTaskCron(dto.getScheduleCron());
jobConfigPO.setMetricCalculationPeriod(dto.getMetricCalculationPeriod());
jobConfigPO.setStatus(dto.getStatus());
return jobConfigPO;
}
public static JobClusterBalanceContent convert2JobClusterBalanceContent(ClusterBalanceJobConfigPO configPO) {
JobClusterBalanceContent content = new JobClusterBalanceContent();
content.setType(JobTypeEnum.CLUSTER_BALANCE.getType());
content.setParallelNum(configPO.getParallelNum());
content.setThrottleUnitB(configPO.getThrottleUnitB());
content.setClusterId(configPO.getClusterId());
content.setExecutionStrategy(configPO.getExecutionStrategy());
content.setClusterBalanceIntervalList(ConvertUtil.str2ObjArrayByJson(configPO.getBalanceIntervalJson(), ClusterBalanceIntervalDTO.class));
content.setMetricCalculationPeriod(configPO.getMetricCalculationPeriod());
content.setTopicBlackList(CommonUtils.string2StrList(configPO.getTopicBlackList()));
content.setScheduleJob(Boolean.TRUE);
return content;
}
public static List<ClusterBalancePlanDetail> convert2ClusterBalancePlanDetail(Map<Integer, BalanceDetailed> detailedMap) {
List<ClusterBalancePlanDetail> details = new ArrayList<>();
for(Map.Entry<Integer, BalanceDetailed> entry : detailedMap.entrySet()){
BalanceDetailed balanceDetailed = entry.getValue();
if (balanceDetailed == null){
continue ;
}
ClusterBalancePlanDetail planDetail = new ClusterBalancePlanDetail();
planDetail.setStatus(balanceDetailed.getBalanceState()==ClusterBalanceStateEnum.BALANCE.getState()?ClusterBalanceStateEnum.BALANCE.getState():ClusterBalanceStateEnum.UNBALANCED.getState());
planDetail.setHost(balanceDetailed.getHost());
planDetail.setBrokerId(entry.getKey());
planDetail.setCpuBefore(balanceDetailed.getCurrentCPUUtilization()*Constant.ONE_HUNDRED);
planDetail.setCpuAfter(balanceDetailed.getLastCPUUtilization()*Constant.ONE_HUNDRED);
planDetail.setDiskBefore(balanceDetailed.getCurrentDiskUtilization()*Constant.ONE_HUNDRED);
planDetail.setDiskAfter(balanceDetailed.getLastDiskUtilization()*Constant.ONE_HUNDRED);
planDetail.setByteInBefore(balanceDetailed.getCurrentNetworkInUtilization()*Constant.ONE_HUNDRED);
planDetail.setByteInAfter(balanceDetailed.getLastNetworkInUtilization()*Constant.ONE_HUNDRED);
planDetail.setByteOutBefore(balanceDetailed.getCurrentNetworkOutUtilization()*Constant.ONE_HUNDRED);
planDetail.setByteOutAfter(balanceDetailed.getLastNetworkOutUtilization()*Constant.ONE_HUNDRED);
planDetail.setInReplica(balanceDetailed.getMoveInReplicas());
planDetail.setOutReplica(balanceDetailed.getMoveOutReplicas());
planDetail.setInSize(balanceDetailed.getMoveInDiskSize());
planDetail.setOutSize(balanceDetailed.getMoveOutDiskSize());
details.add(planDetail);
}
return details;
}
//更新平衡任务完成后的集群均衡状态
public static List<ClusterBalancePlanDetail> convert2ClusterBalancePlanDetail(List<ClusterBalancePlanDetail> details, Map<Integer, BrokerBalanceState> stateMap) {
details.forEach(planDetail ->{
BrokerBalanceState state = stateMap.get(planDetail.getBrokerId());
if (state == null){
return;
}
planDetail.setCpuStatus(state.getCpuBalanceState());
planDetail.setDiskStatus(state.getDiskBalanceState());
planDetail.setByteInStatus(state.getBytesInBalanceState());
planDetail.setByteOutStatus(state.getBytesOutBalanceState());
if ((state.getCpuBalanceState() == null || ClusterBalanceStateEnum.BALANCE.getState().equals(state.getCpuBalanceState()))
&& (state.getDiskBalanceState() == null || ClusterBalanceStateEnum.BALANCE.getState().equals(state.getDiskBalanceState()))
&& (state.getBytesInBalanceState() == null || ClusterBalanceStateEnum.BALANCE.getState().equals(state.getBytesInBalanceState()))
&& (state.getBytesOutBalanceState() == null || ClusterBalanceStateEnum.BALANCE.getState().equals(state.getBytesOutBalanceState()))) {
planDetail.setStatus(ClusterBalanceStateEnum.BALANCE.getState());
}else {
planDetail.setStatus(ClusterBalanceStateEnum.UNBALANCED.getState());
}
});
return details;
}
public static List<ClusterBalancePlanDetailVO> convert2ClusterBalancePlanDetailVO(List<Integer> balanceBrokerIds, Map<Integer, BalanceDetailed> detailedMap) {
List<ClusterBalancePlanDetailVO> detailVOS = new ArrayList<>();
for(Map.Entry<Integer, BalanceDetailed> entry : detailedMap.entrySet()){
BalanceDetailed value = entry.getValue();
if (value == null || !balanceBrokerIds.contains(entry.getKey())){
continue ;
}
ClusterBalancePlanDetailVO planDetailVO = new ClusterBalancePlanDetailVO();
planDetailVO.setStatus(value.getBalanceState()==ClusterBalanceStateEnum.BALANCE.getState()?ClusterBalanceStateEnum.BALANCE.getState():ClusterBalanceStateEnum.UNBALANCED.getState());
planDetailVO.setHost(value.getHost());
planDetailVO.setBrokerId(entry.getKey());
planDetailVO.setCpuBefore(value.getCurrentCPUUtilization()*Constant.ONE_HUNDRED);
planDetailVO.setCpuAfter(value.getLastCPUUtilization()*Constant.ONE_HUNDRED);
planDetailVO.setDiskBefore(value.getCurrentDiskUtilization()*Constant.ONE_HUNDRED);
planDetailVO.setDiskAfter(value.getLastDiskUtilization()*Constant.ONE_HUNDRED);
planDetailVO.setByteInBefore(value.getCurrentNetworkInUtilization()*Constant.ONE_HUNDRED);
planDetailVO.setByteInAfter(value.getLastNetworkInUtilization()*Constant.ONE_HUNDRED);
planDetailVO.setByteOutBefore(value.getCurrentNetworkOutUtilization()*Constant.ONE_HUNDRED);
planDetailVO.setByteOutAfter(value.getLastNetworkOutUtilization()*Constant.ONE_HUNDRED);
planDetailVO.setInReplica(value.getMoveInReplicas());
planDetailVO.setOutReplica(value.getMoveOutReplicas());
planDetailVO.setInSize(value.getMoveInDiskSize());
planDetailVO.setOutSize(value.getMoveOutDiskSize());
detailVOS.add(planDetailVO);
}
return detailVOS;
}
public static ClusterBalancePlanVO convert2ClusterBalancePlanVO(ClusterBalancePreviewDTO jobDTO, OptimizerResult optimizerResult, List<Broker> allBrokers) {
if (ValidateUtils.anyNull(jobDTO, optimizerResult, optimizerResult.resultJsonOverview(),
optimizerResult.resultJsonDetailed(), optimizerResult.resultDetailed(), optimizerResult.resultJsonTask())){
return null;
}
ClusterBalancePlanVO planVO = new ClusterBalancePlanVO();
planVO.setTopics(CommonUtils.string2StrList(optimizerResult.resultOverview().getMoveTopics()));
planVO.setType(ClusterBalanceTypeEnum.IMMEDIATELY.getType());
planVO.setReplicas(optimizerResult.resultOverview().getMoveReplicas());
planVO.setBlackTopics(jobDTO.getTopicBlackList());
planVO.setMoveSize(optimizerResult.resultOverview().getTotalMoveSize());
planVO.setThreshold(ConvertUtil.obj2Json(jobDTO.getClusterBalanceIntervalList()));
planVO.setBrokers(convert2HostList(allBrokers, optimizerResult.resultOverview().getNodeRange()));
planVO.setDetail(convert2ClusterBalancePlanDetailVO(jobDTO.getBrokers(), optimizerResult.resultDetailed()));
planVO.setClusterBalanceIntervalList(ConvertUtil.list2List(jobDTO.getClusterBalanceIntervalList(), ClusterBalanceIntervalVO.class));
planVO.setReassignmentJson(optimizerResult.resultJsonTask());
return planVO;
}
public static ClusterBalancePreviewDTO convert2ClusterBalancePreviewDTO(ClusterBalanceJobPO clusterBalanceJobPO) {
ClusterBalancePreviewDTO planVO = new ClusterBalancePreviewDTO();
planVO.setBrokers(CommonUtils.string2IntList(clusterBalanceJobPO.getBrokers()));
planVO.setClusterBalanceIntervalList(ConvertUtil.str2ObjArrayByJson(clusterBalanceJobPO.getBalanceIntervalJson(), ClusterBalanceIntervalDTO.class));
planVO.setClusterId(clusterBalanceJobPO.getClusterId());
planVO.setExecutionStrategy(clusterBalanceJobPO.getExecutionStrategy());
planVO.setParallelNum(clusterBalanceJobPO.getParallelNum());
planVO.setThrottleUnitB(clusterBalanceJobPO.getThrottleUnitB());
planVO.setMetricCalculationPeriod(clusterBalanceJobPO.getMetricCalculationPeriod());
planVO.setTopicBlackList(CommonUtils.string2StrList(clusterBalanceJobPO.getTopicBlackList()));
return planVO;
}
public static Map<String, ClusterBalanceOverviewSubVO> convert2MapClusterBalanceOverviewSubVO(BrokerSpec brokerSpec, BrokerBalanceState state) {
Map<String, ClusterBalanceOverviewSubVO> subVOMap = new HashMap<>();
if (brokerSpec == null){
brokerSpec = new BrokerSpec();
}
if (state == null){
state = new BrokerBalanceState();
}
Double cpuSpec = brokerSpec.getCpu()!=null?brokerSpec.getCpu()*Constant.ONE_HUNDRED:null;//转成基础单位
subVOMap.put(Resource.DISK.resource(),
new ClusterBalanceOverviewSubVO(
state.getDiskAvgResource(), brokerSpec.getDisk(),
state.getDiskBalanceState() == null || state.getDiskBalanceState().equals(ClusterBalanceStateEnum.BALANCE.getState())?state.getDiskBalanceState():ClusterBalanceStateEnum.UNBALANCED.getState()));
subVOMap.put(Resource.CPU.resource(),
new ClusterBalanceOverviewSubVO(state.getCpuAvgResource(), cpuSpec,
state.getCpuBalanceState() == null || state.getCpuBalanceState().equals(ClusterBalanceStateEnum.BALANCE.getState())?state.getCpuBalanceState():ClusterBalanceStateEnum.UNBALANCED.getState()));
subVOMap.put(Resource.NW_IN.resource(),
new ClusterBalanceOverviewSubVO(
state.getBytesInAvgResource(), brokerSpec.getFlow(),
state.getBytesInBalanceState() == null || state.getBytesInBalanceState().equals(ClusterBalanceStateEnum.BALANCE.getState())?state.getBytesInBalanceState():ClusterBalanceStateEnum.UNBALANCED.getState()));
subVOMap.put(Resource.NW_OUT.resource(),
new ClusterBalanceOverviewSubVO(
state.getBytesOutAvgResource(), brokerSpec.getFlow(),
state.getBytesOutBalanceState() == null || state.getBytesOutBalanceState().equals(ClusterBalanceStateEnum.BALANCE.getState())?state.getBytesOutBalanceState():ClusterBalanceStateEnum.UNBALANCED.getState()));
return subVOMap;
}
public static ClusterBalanceJobConfigVO convert2ClusterBalanceJobConfigVO(ClusterBalanceJobConfigPO clusterBalanceJobConfigPO){
ClusterBalanceJobConfigVO configVO = new ClusterBalanceJobConfigVO();
configVO.setScheduleCron(clusterBalanceJobConfigPO.getTaskCron());
configVO.setClusterBalanceIntervalList(ConvertUtil.str2ObjArrayByJson(clusterBalanceJobConfigPO.getBalanceIntervalJson(), ClusterBalanceInterval.class));
configVO.setClusterId(clusterBalanceJobConfigPO.getClusterId());
configVO.setExecutionStrategy(clusterBalanceJobConfigPO.getExecutionStrategy());
configVO.setParallelNum(clusterBalanceJobConfigPO.getParallelNum());
configVO.setMetricCalculationPeriod(clusterBalanceJobConfigPO.getMetricCalculationPeriod());
configVO.setThrottleUnitB(clusterBalanceJobConfigPO.getThrottleUnitB());
configVO.setTopicBlackList(CommonUtils.string2StrList(clusterBalanceJobConfigPO.getTopicBlackList()));
configVO.setStatus(clusterBalanceJobConfigPO.getStatus());
return configVO;
}
public static List<String> convert2HostList(List<Broker> allBrokers, String brokerIdStr){
if (allBrokers.isEmpty() || ValidateUtils.isBlank(brokerIdStr)){
return new ArrayList<>();
}
List<Integer> brokerIds = CommonUtils.string2IntList(brokerIdStr);
return allBrokers.stream().filter(broker -> brokerIds.contains(broker.getBrokerId()))
.map(Broker::getHost).collect(Collectors.toList());
}
private static List<HostEnv> convert2ListHostEnv(Map<Integer, Broker> brokerMap, Map<Integer, BrokerSpec> brokerSpecMap) {
List<HostEnv> hostEnvs = new ArrayList<>();
for (Map.Entry<Integer, Broker> entry : brokerMap.entrySet()) {
HostEnv hostEnv = new HostEnv();
hostEnv.setId(entry.getKey());
hostEnv.setHost(entry.getValue().getHost());
hostEnv.setRackId(entry.getValue().getRack());
BrokerSpec brokerSpec = brokerSpecMap.get(entry.getKey());
if (brokerSpec == null){
continue;
}
hostEnv.setCpu(brokerSpec.getCpu().intValue() * Constant.ONE_HUNDRED);
hostEnv.setDisk(brokerSpec.getDisk() * Constant.B_TO_GB);
hostEnv.setNetwork(brokerSpec.getFlow() * Constant.B_TO_MB);
hostEnvs.add(hostEnv);
}
return hostEnvs;
}
}

View File

@@ -0,0 +1,218 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.converter;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.ClusterBalanceReassignDetail;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.ClusterBalanceReassignExtendData;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.detail.ClusterBalanceDetailDataGroupByPartition;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.detail.ClusterBalanceDetailDataGroupByTopic;
import com.xiaojukeji.know.streaming.km.common.bean.entity.job.Job;
import com.xiaojukeji.know.streaming.km.common.bean.entity.job.JobStatus;
import com.xiaojukeji.know.streaming.km.common.bean.entity.job.detail.JobDetail;
import com.xiaojukeji.know.streaming.km.common.bean.entity.job.detail.SubJobReplicaMoveDetail;
import com.xiaojukeji.know.streaming.km.common.bean.entity.reassign.strategy.ReplaceReassignSub;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceJobPO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceReassignPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.job.sub.SubJobClusterBalanceReplicaMoveVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.job.sub.SubJobPartitionDetailVO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.job.sub.SubJobVO;
import com.xiaojukeji.know.streaming.km.common.enums.job.JobStatusEnum;
import com.xiaojukeji.know.streaming.km.common.utils.CommonUtils;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import java.util.*;
import java.util.stream.Collectors;
@EnterpriseLoadReBalance
public class ClusterBalanceReassignConverter {
private ClusterBalanceReassignConverter() {
}
public static JobDetail convert2JobDetail(Job job, ClusterBalanceReassignDetail reassignDetail) {
JobDetail jobDetail = new JobDetail();
jobDetail.setId(job.getId());
jobDetail.setJobType(job.getJobType());
jobDetail.setJobName(job.getJobName());
jobDetail.setJobStatus(job.getJobStatus());
jobDetail.setPlanTime(job.getPlanTime());
jobDetail.setStartTime(reassignDetail.getStartTime());
jobDetail.setEndTime(reassignDetail.getFinishedTime());
jobDetail.setFlowLimit(reassignDetail.getThrottleUnitB().doubleValue());
JobStatus jobStatus = new JobStatus(reassignDetail.getReassignTopicDetailsList().stream().map(elem -> elem.getStatus()).collect(Collectors.toList()));
jobDetail.setTotal(jobStatus.getTotal());
jobDetail.setSuccess(jobStatus.getSuccess());
jobDetail.setFail(jobStatus.getFailed());
jobDetail.setDoing(jobStatus.getDoing());
List<SubJobVO> subJobDetailList = new ArrayList<>();
subJobDetailList.addAll(
ConvertUtil.list2List(convert2SubJobReplicaMoveDetailList(reassignDetail.getReassignTopicDetailsList()), SubJobClusterBalanceReplicaMoveVO.class)
);
jobDetail.setSubJobs(subJobDetailList);
return jobDetail;
}
public static ClusterBalanceReassignDetail convert2ClusterBalanceReassignDetail(ClusterBalanceJobPO jobPO, List<ClusterBalanceReassignPO> reassignPOS) {
// 按照Topic做聚合
Map<String, List<ClusterBalanceReassignPO>> topicJobPOMap = new HashMap<>();
reassignPOS.forEach(elem -> {
topicJobPOMap.putIfAbsent(elem.getTopicName(), new ArrayList<>());
topicJobPOMap.get(elem.getTopicName()).add(elem);
});
List<ClusterBalanceDetailDataGroupByTopic> reassignTopicDetailsList = new ArrayList<>();
for (Map.Entry<String, List<ClusterBalanceReassignPO>> entry: topicJobPOMap.entrySet()) {
reassignTopicDetailsList.add(convert2ClusterBalanceDetailDataGroupByTopic(entry.getValue()));
}
ClusterBalanceReassignDetail jobDetail = new ClusterBalanceReassignDetail();
jobDetail.setThrottleUnitB(jobPO.getThrottleUnitB());
jobDetail.setReassignTopicDetailsList(reassignTopicDetailsList);
jobDetail.setStartTime(jobPO.getStartTime());
if (JobStatusEnum.isFinished(jobPO.getStatus())) {
jobDetail.setFinishedTime(jobPO.getFinishedTime());
}
return jobDetail;
}
private static ClusterBalanceDetailDataGroupByTopic convert2ClusterBalanceDetailDataGroupByTopic(List<ClusterBalanceReassignPO> reassingns) {
Set<Integer> originalBrokerIdSet = new HashSet<>();
Set<Integer> reassignBrokerIdSet = new HashSet<>();
// 分区的信息
List<ClusterBalanceDetailDataGroupByPartition> partitionDetailList = new ArrayList<>();
for (ClusterBalanceReassignPO reassignPO : reassingns) {
ClusterBalanceDetailDataGroupByPartition detail = new ClusterBalanceDetailDataGroupByPartition();
detail.setPartitionId(reassignPO.getPartitionId());
detail.setClusterPhyId(reassignPO.getClusterId());
detail.setTopicName(reassignPO.getTopicName());
detail.setOriginalBrokerIdList(CommonUtils.string2IntList(reassignPO.getOriginalBrokerIds()));
detail.setReassignBrokerIdList(CommonUtils.string2IntList(reassignPO.getReassignBrokerIds()));
detail.setStatus(reassignPO.getStatus());
ClusterBalanceReassignExtendData extendData = ConvertUtil.str2ObjByJson(reassignPO.getExtendData(), ClusterBalanceReassignExtendData.class);
if (extendData != null) {
detail.setNeedReassignLogSizeUnitB(extendData.getNeedReassignLogSizeUnitB());
detail.setFinishedReassignLogSizeUnitB(extendData.getFinishedReassignLogSizeUnitB());
detail.setRemainTimeUnitMs(extendData.getRemainTimeUnitMs());
detail.setPresentReplicaNum(extendData.getOriginReplicaNum());
detail.setNewReplicaNum(extendData.getReassignReplicaNum());
detail.setOriginalRetentionTimeUnitMs(extendData.getOriginalRetentionTimeUnitMs());
detail.setReassignRetentionTimeUnitMs(extendData.getReassignRetentionTimeUnitMs());
}
originalBrokerIdSet.addAll(detail.getOriginalBrokerIdList());
reassignBrokerIdSet.addAll(detail.getReassignBrokerIdList());
partitionDetailList.add(detail);
}
// Topic的详细信息
ClusterBalanceDetailDataGroupByTopic topicDetail = new ClusterBalanceDetailDataGroupByTopic();
topicDetail.setPartitionIdList(partitionDetailList.stream().map(elem -> elem.getPartitionId()).collect(Collectors.toList()));
topicDetail.setReassignPartitionDetailsList(partitionDetailList);
topicDetail.setClusterPhyId(reassingns.get(0).getClusterId());
topicDetail.setTopicName(reassingns.get(0).getTopicName());
topicDetail.setOriginalBrokerIdList(new ArrayList<>(originalBrokerIdSet));
topicDetail.setReassignBrokerIdList(new ArrayList<>(reassignBrokerIdSet));
List<Long> needSizeList = partitionDetailList
.stream()
.filter(elem -> elem.getNeedReassignLogSizeUnitB() != null)
.map(item -> item.getNeedReassignLogSizeUnitB()).collect(Collectors.toList());
topicDetail.setNeedReassignLogSizeUnitB(needSizeList.isEmpty()? null: needSizeList.stream().reduce(Long::sum).get());
List<Long> finishedSizeList = partitionDetailList
.stream()
.filter(elem -> elem.getFinishedReassignLogSizeUnitB() != null)
.map(item -> item.getFinishedReassignLogSizeUnitB()).collect(Collectors.toList());
topicDetail.setFinishedReassignLogSizeUnitB(finishedSizeList.isEmpty()? null: finishedSizeList.stream().reduce(Long::sum).get());
List<Long> remainList = partitionDetailList
.stream()
.filter(elem -> elem.getRemainTimeUnitMs() != null)
.map(item -> item.getRemainTimeUnitMs()).collect(Collectors.toList());
topicDetail.setRemainTimeUnitMs(remainList.isEmpty()? null: remainList.stream().reduce(Long::max).get());
topicDetail.setPresentReplicaNum(partitionDetailList.get(0).getPresentReplicaNum());
topicDetail.setNewReplicaNum(partitionDetailList.get(0).getNewReplicaNum());
topicDetail.setOriginalRetentionTimeUnitMs(partitionDetailList.get(0).getOriginalRetentionTimeUnitMs());
topicDetail.setReassignRetentionTimeUnitMs(partitionDetailList.get(0).getReassignRetentionTimeUnitMs());
topicDetail.setStatus(
new JobStatus(
partitionDetailList.stream().map(elem -> elem.getStatus()).collect(Collectors.toList())
).getStatus()
);
return topicDetail;
}
public static List<SubJobPartitionDetailVO> convert2SubJobPartitionDetailVOList(ClusterBalanceDetailDataGroupByTopic detailDataGroupByTopic) {
List<SubJobPartitionDetailVO> voList = new ArrayList<>();
for (ClusterBalanceDetailDataGroupByPartition groupByPartition: detailDataGroupByTopic.getReassignPartitionDetailsList()) {
SubJobPartitionDetailVO vo = new SubJobPartitionDetailVO();
vo.setPartitionId(groupByPartition.getPartitionId());
vo.setSourceBrokerIds(groupByPartition.getOriginalBrokerIdList());
vo.setDesBrokerIds(groupByPartition.getReassignBrokerIdList());
vo.setTotalSize(groupByPartition.getNeedReassignLogSizeUnitB() != null ? groupByPartition.getNeedReassignLogSizeUnitB().doubleValue(): null);
vo.setMovedSize(groupByPartition.getFinishedReassignLogSizeUnitB() != null ? groupByPartition.getFinishedReassignLogSizeUnitB().doubleValue(): null);
vo.setStatus(groupByPartition.getStatus());
vo.setRemainTime(groupByPartition.getRemainTimeUnitMs());
voList.add(vo);
}
return voList;
}
private static List<SubJobReplicaMoveDetail> convert2SubJobReplicaMoveDetailList(List<ClusterBalanceDetailDataGroupByTopic> reassignTopicDetailsList) {
List<SubJobReplicaMoveDetail> detailList = new ArrayList<>();
for (ClusterBalanceDetailDataGroupByTopic detailDataGroupByTopic: reassignTopicDetailsList) {
SubJobReplicaMoveDetail detail = new SubJobReplicaMoveDetail();
detail.setTopicName(detailDataGroupByTopic.getTopicName());
detail.setPartitions(detailDataGroupByTopic.getPartitionIdList());
detail.setCurrentTimeSpent(detailDataGroupByTopic.getOriginalRetentionTimeUnitMs());
detail.setMoveTimeSpent(detailDataGroupByTopic.getReassignRetentionTimeUnitMs());
detail.setSourceBrokers(detailDataGroupByTopic.getOriginalBrokerIdList());
detail.setDesBrokers(detailDataGroupByTopic.getReassignBrokerIdList());
detail.setStatus(detailDataGroupByTopic.getStatus());
if (detailDataGroupByTopic.getNeedReassignLogSizeUnitB() != null) {
detail.setTotalSize(detailDataGroupByTopic.getNeedReassignLogSizeUnitB().doubleValue());
}
if (detailDataGroupByTopic.getFinishedReassignLogSizeUnitB() != null) {
detail.setMovedSize(detailDataGroupByTopic.getFinishedReassignLogSizeUnitB().doubleValue());
}
JobStatus jobStatus = new JobStatus(detailDataGroupByTopic.getReassignPartitionDetailsList().stream().map(elem -> elem.getStatus()).collect(Collectors.toList())); detail.setTotal(jobStatus.getTotal());
detail.setSuccess(jobStatus.getSuccess());
detail.setFail(jobStatus.getFailed());
detail.setDoing(jobStatus.getDoing());
detail.setRemainTime(detailDataGroupByTopic.getRemainTimeUnitMs());
detailList.add(detail);
}
return detailList;
}
public static List<ReplaceReassignSub> convert2ReplaceReassignSubList(List<ClusterBalanceReassignPO> reassignPOList) {
List<ReplaceReassignSub> voList = new ArrayList<>();
for (ClusterBalanceReassignPO reassignPO: reassignPOList) {
voList.add(convert2ReplaceReassignSub(reassignPO));
}
return voList;
}
public static ReplaceReassignSub convert2ReplaceReassignSub(ClusterBalanceReassignPO reassignPO) {
ReplaceReassignSub reassignSub = new ReplaceReassignSub();
reassignSub.setClusterPhyId(reassignPO.getClusterId());
reassignSub.setOriginalBrokerIdList(CommonUtils.string2IntList(reassignPO.getOriginalBrokerIds()));
reassignSub.setReassignBrokerIdList(CommonUtils.string2IntList(reassignPO.getReassignBrokerIds()));
reassignSub.setPartitionId(reassignPO.getPartitionId());
reassignSub.setTopicName(reassignPO.getTopicName());
return reassignSub;
}
}

View File

@@ -0,0 +1,31 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.enums;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import lombok.Getter;
/**
* 集群平衡状态
* @author zengqiao
* @date 22/03/08
*/
@Getter
@EnterpriseLoadReBalance
public enum ClusterBalanceStateEnum {
BELOW_BALANCE(-1, "低于均衡范围"),
BALANCE(0, "均衡范围内"),
ABOVE_BALANCE(1, "高于均衡范围"),
UNBALANCED(2, "不均衡"),
;
private final Integer state;
private final String message;
ClusterBalanceStateEnum(int state, String message) {
this.state = state;
this.message = message;
}
}

View File

@@ -0,0 +1,28 @@
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.enums;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import lombok.Getter;
/**
* 集群平衡任务类型
* @author zengqiao
* @date 22/03/08
*/
@Getter
@EnterpriseLoadReBalance
public enum ClusterBalanceTypeEnum {
IMMEDIATELY(1, "立即"),
CYCLE(2, "周期"),
;
private final int type;
private final String message;
ClusterBalanceTypeEnum(int type, String message) {
this.type = type;
this.message = message;
}
}

View File

@@ -0,0 +1,9 @@
/**
* Load-reBalance相关功能模块
* km-extends/km-rebalance 模块,是依据指标生成迁移 plan 的模块,是底层的一个基础功能
* 当前 package 模块是依据产品的要求,依赖 km-extends/km-rebalance 模块,构建产品实际使用功能
*/
@EnterpriseLoadReBalance
package com.xiaojukeji.know.streaming.km.common.enterprise.rebalance;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;

View File

@@ -2,6 +2,7 @@ package com.xiaojukeji.know.streaming.km.common.enums.operaterecord;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import java.util.List;
@@ -37,6 +38,9 @@ public enum ModuleEnum {
JOB_KAFKA_REPLICA_REASSIGN(110, "Job-KafkaReplica迁移"),
@EnterpriseLoadReBalance
JOB_CLUSTER_BALANCE(111, "Job-ClusterBalance"),
;
ModuleEnum(int code, String desc) {

View File

@@ -0,0 +1,355 @@
package com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.job;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.BrokerSpec;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.ClusterBalanceReassignDetail;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.detail.ClusterBalanceDetailDataGroupByTopic;
import com.xiaojukeji.know.streaming.km.common.bean.entity.job.Job;
import com.xiaojukeji.know.streaming.km.common.bean.entity.job.JobStatus;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.content.JobClusterBalanceContent;
import com.xiaojukeji.know.streaming.km.common.bean.entity.job.detail.JobDetail;
import com.xiaojukeji.know.streaming.km.common.bean.entity.job.detail.JobModifyDetail;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceJobPO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceReassignPO;
import com.xiaojukeji.know.streaming.km.common.bean.po.job.JobPO;
import com.xiaojukeji.know.streaming.km.common.bean.vo.job.sub.SubJobPartitionDetailVO;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.constant.JobConstant;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.converter.ClusterBalanceConverter;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.converter.ClusterBalanceReassignConverter;
import com.xiaojukeji.know.streaming.km.common.enums.job.JobActionEnum;
import com.xiaojukeji.know.streaming.km.common.enums.job.JobTypeEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerSpecService;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service.ClusterBalanceJobService;
import com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service.ClusterBalanceReassignService;
import com.xiaojukeji.know.streaming.km.core.service.config.ConfigUtils;
import com.xiaojukeji.know.streaming.km.core.service.job.JobHandler;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.persistence.mysql.job.JobDAO;
import com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.service.KafkaZKDAO;
import com.xiaojukeji.know.streaming.km.rebalance.executor.ExecutionRebalance;
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.BalanceParameter;
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.OptimizerResult;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.transaction.interceptor.TransactionAspectSupport;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
@EnterpriseLoadReBalance
@Component(JobConstant.CLUSTER_BALANCE)
public class ClusterBalanceJobHandler implements JobHandler {
private static final ILog logger = LogFactory.getLog(ClusterBalanceJobHandler.class);
@Value("${es.client.address:}")
private String esAddress;
@Autowired
private ClusterBalanceJobService clusterBalanceJobService;
@Autowired
private ClusterPhyService clusterPhyService;
@Autowired
private ClusterBalanceReassignService clusterBalanceReassignService;
@Autowired
private KafkaZKDAO kafkaZKDAO;
@Autowired
private JobDAO jobDAO;
@Autowired
private BrokerService brokerService;
@Autowired
private BrokerSpecService brokerSpecService;
@Autowired
private TopicService topicService;
@Autowired
private ConfigUtils configUtils;
@Override
public JobTypeEnum type() {
return JobTypeEnum.CLUSTER_BALANCE;
}
@Override
@Transactional
public Result<Void> submit(Job job, String operator) {
// 获取任务详情信息
JobClusterBalanceContent dto = ConvertUtil.str2ObjByJson(job.getJobData(), JobClusterBalanceContent.class);
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(dto.getClusterId());
if (clusterPhy == null){
return Result.buildFrom(ResultStatus.CLUSTER_NOT_EXIST);
}
//获取broke规格信息
Map<Integer, BrokerSpec> brokerSpecMap = brokerSpecService.getBrokerSpecMap(clusterPhy.getId());
//获取集群所有broker信息
List<Broker> brokers = brokerService.listAllBrokersFromDB(clusterPhy.getId());
for(Broker broker:brokers){
if (brokerSpecMap.get(broker.getBrokerId()) == null){
return Result.buildFromRSAndMsg(ResultStatus.BROKER_SPEC_NOT_EXIST,String.format("Broker规格信息不存在:brokerId:%s", broker.getBrokerId()));
}
}
//获取任务计划
List<String> topicNames = topicService.listRecentUpdateTopicNamesFromDB(dto.getClusterId(), configUtils.getClusterBalanceIgnoredTopicsTimeSecond());
BalanceParameter balanceParameter = ClusterBalanceConverter.convert2BalanceParameter(dto, brokers, brokerSpecMap, clusterPhy, esAddress, topicNames);
try {
ExecutionRebalance executionRebalance = new ExecutionRebalance();
OptimizerResult optimizerResult = executionRebalance.optimizations(balanceParameter);
Result cRs = checkOptimizerResult(optimizerResult, job.getId());
if (cRs.failed()){
return cRs;
}
Map<String, Topic> topicMap = topicService.listTopicsFromDB(clusterPhy.getId()).stream().collect(Collectors.toMap(Topic::getTopicName, Function.identity()));
List<ClusterBalanceReassignPO> reassignPOS = ClusterBalanceConverter.convert2ListClusterBalanceReassignPO(
optimizerResult.resultTask(), topicMap, job.getId(), clusterPhy.getId());
String generateReassignmentJson = optimizerResult.resultJsonTask();
if (dto.getParallelNum() > 0){
//根据执行策略生成迁移json
Result<String> jResult = clusterBalanceJobService.generateReassignmentJson(job.getClusterId(),dto.getParallelNum(), dto.getExecutionStrategy(), Constant.NUM_ONE, reassignPOS);
if (jResult.failed()){
return Result.buildFromIgnoreData(jResult);
}
generateReassignmentJson = jResult.getData();
}
//生成平衡job
ClusterBalanceJobPO clusterBalanceJobPO = ClusterBalanceConverter.convert2ClusterBalanceJobPO(job.getId(), dto, optimizerResult, brokers, operator, generateReassignmentJson);
Result<Void> result = clusterBalanceJobService.createClusterBalanceJob(clusterBalanceJobPO, operator);
if (result.failed()){
logger.error("method=clusterBalanceJobHandler.submit||job={}||errMsg={}!",
job, result.getMessage());
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return result;
}
//生成迁移明细
Result<Void> cbrResult = clusterBalanceReassignService.addBatchBalanceReassign(reassignPOS);
if (cbrResult.failed()){
logger.error("method=clusterBalanceJobHandler.submit||job={}||errMsg={}!",
job, cbrResult.getMessage());
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return cbrResult;
}
//更新job执行对象
job.setTarget(optimizerResult.resultOverview().getMoveTopics());
int count = jobDAO.updateById(ConvertUtil.obj2Obj(job, JobPO.class));
if (count < 0){
logger.error("method=clusterBalanceJobHandler.submit||job={}||errMsg={}!",
job, result.getMessage());
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return Result.buildFrom(ResultStatus.MYSQL_OPERATE_FAILED);
}
}catch (Exception e){
logger.error("method=clusterBalanceJobHandler.submit||job={}||errMsg=exception", job, e);
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return Result.buildFailure(e.getMessage());
}
return Result.buildSuc();
}
@Override
@Transactional
public Result<Void> delete(Job job, String operator) {
//删除balanceJob
Result<Void> balanceJobResult = clusterBalanceJobService.deleteByJobId(job.getId(), operator);
if (balanceJobResult.failed()){
logger.error("method=clusterBalanceJobHandler.delete||job={}||operator:{}||errMsg={}", job, operator, balanceJobResult);
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return balanceJobResult;
}
return Result.buildSuc();
}
@Override
public Result<Void> modify(Job job, String operator) {
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(job.getClusterId());
if (clusterPhy == null){
return Result.buildFrom(ResultStatus.CLUSTER_NOT_EXIST);
}
Result<ClusterBalanceJobPO> balanceJobPOResult = clusterBalanceJobService.getClusterBalanceJobById(job.getId());
if (!balanceJobPOResult.hasData()){
return Result.buildFrom(ResultStatus.NOT_EXIST);
}
List<Broker> brokers = brokerService.listAllBrokersFromDB(clusterPhy.getId());
Map<Integer, BrokerSpec> brokerSpecMap = brokerSpecService.getBrokerSpecMap(clusterPhy.getId());
List<String> topicNames = topicService.listRecentUpdateTopicNamesFromDB(job.getClusterId(), configUtils.getClusterBalanceIgnoredTopicsTimeSecond());
JobClusterBalanceContent dto = ConvertUtil.str2ObjByJson(job.getJobData(), JobClusterBalanceContent.class);
BalanceParameter balanceParameter = ClusterBalanceConverter.convert2BalanceParameter(dto, brokers, brokerSpecMap, clusterPhy, esAddress, topicNames);
ExecutionRebalance executionRebalance = new ExecutionRebalance();
try {
OptimizerResult optimizerResult = executionRebalance.optimizations(balanceParameter);
Result cRs = checkOptimizerResult(optimizerResult, job.getId());
if (cRs.failed()){
return cRs;
}
Map<String, Topic> topicMap = kafkaZKDAO.getAllTopicMetadata(clusterPhy.getId(), false).stream().collect(Collectors.toMap(Topic::getTopicName, Function.identity()));
List<ClusterBalanceReassignPO> reassignPOS = ClusterBalanceConverter.convert2ListClusterBalanceReassignPO(optimizerResult.resultTask(),
topicMap, job.getId(), clusterPhy.getId());
String generateReassignmentJson = optimizerResult.resultJsonTask();
if (dto.getParallelNum() > 0){
//根据执行策略生成迁移json
Result<String> jResult = clusterBalanceJobService.generateReassignmentJson(job.getClusterId(),dto.getParallelNum(), dto.getExecutionStrategy(), Constant.NUM_ONE, reassignPOS);
if (jResult.failed()){
return Result.buildFromIgnoreData(jResult);
}
generateReassignmentJson = jResult.getData();
}
//生成平衡job
ClusterBalanceJobPO clusterBalanceJobPO = ClusterBalanceConverter.convert2ClusterBalanceJobPO(job.getId(), dto ,optimizerResult, brokers, operator, generateReassignmentJson);
Result<Void> result = clusterBalanceJobService.modifyClusterBalanceJob(clusterBalanceJobPO, operator);
if (result.failed()){
return result;
}
//删除原迁移详情,生成新的迁移详情
Result<Void> delReassignResult = clusterBalanceReassignService.delete(job.getId(), operator);
if (delReassignResult.failed()){
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return delReassignResult;
}
Result<Void> cbrResult = clusterBalanceReassignService.addBatchBalanceReassign(reassignPOS);
if (cbrResult.failed()){
logger.error("method=clusterBalanceJobHandler.submit||job={}||errMsg={}!",
job, cbrResult.getMessage());
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return cbrResult;
}
}catch (Exception e){
logger.error("method=clusterBalanceJobHandler.modify||job={}||operator:{}||errMsg=exception", job, operator, e);
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
}
return Result.buildSuc();
}
@Override
public Result<Void> updateLimit(Job job, Long limit, String operator) {
return clusterBalanceReassignService.modifyThrottle(job.getId(), limit, operator);
}
@Override
public Result<Void> process(Job job, JobActionEnum action, String operator) {
if (JobActionEnum.START.equals(action)) {
return clusterBalanceReassignService.execute(job.getId());
}
if (JobActionEnum.CANCEL.equals(action)) {
return clusterBalanceReassignService.cancel(job.getId());
}
// 迁移中,不支持该操作
return Result.buildFromRSAndMsg(ResultStatus.OPERATION_FORBIDDEN, String.format("不支持[%s]操作", action.getValue()));
}
@Override
public Result<JobStatus> status(Job job) {
// Topic下每个分区的状态
Map<String, List<ClusterBalanceReassignPO>> topicJobsMap = new HashMap<>();
// 获取子任务并按照Topic进行聚合
List<ClusterBalanceReassignPO> allSubJobPOList = clusterBalanceReassignService.getBalanceReassignsByJobId(job.getId());
allSubJobPOList.forEach(elem -> {
topicJobsMap.putIfAbsent(elem.getTopicName(), new ArrayList<>());
topicJobsMap.get(elem.getTopicName()).add(elem);
});
// 获取每个Topic的状态
List<Integer> topicStatusList = new ArrayList<>();
for (List<ClusterBalanceReassignPO> topicJobPOList: topicJobsMap.values()) {
topicStatusList.add(new JobStatus(
topicJobPOList.stream().map(elem -> elem.getStatus()).collect(Collectors.toList())
).getStatus());
}
// 聚合Topic的结果
return Result.buildSuc(new JobStatus(topicStatusList));
}
@Override
public Result<JobDetail> getTaskDetail(Job job) {
Result<ClusterBalanceReassignDetail> detailResult = clusterBalanceReassignService.getJobDetailsGroupByTopic(job.getId());
if (detailResult.failed()) {
return Result.buildFromIgnoreData(detailResult);
}
return Result.buildSuc(ClusterBalanceReassignConverter.convert2JobDetail(job, detailResult.getData()));
}
@Override
public Result<JobModifyDetail> getTaskModifyDetail(Job job) {
// 获取任务详情信息
JobClusterBalanceContent dto = ConvertUtil.str2ObjByJson(job.getJobData(), JobClusterBalanceContent.class);
if (dto == null) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "jobData格式错误");
}
JobModifyDetail detail = ConvertUtil.obj2Obj(job, JobModifyDetail.class);
detail.setJobData(ConvertUtil.obj2Json(dto));
return Result.buildSuc(detail);
}
@Override
public Result<List<SubJobPartitionDetailVO>> getSubJobPartitionDetail(Job job, String topic) {
Result<ClusterBalanceReassignDetail> detailResult = clusterBalanceReassignService.getJobDetailsGroupByTopic(job.getId());
if (detailResult.failed()) {
return Result.buildFromIgnoreData(detailResult);
}
List<ClusterBalanceDetailDataGroupByTopic> detailDataGroupByTopicList = detailResult.getData().getReassignTopicDetailsList()
.stream()
.filter(elem -> elem.getTopicName().equals(topic))
.collect(Collectors.toList());
if (detailDataGroupByTopicList.isEmpty()) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getTopicNotExist(job.getClusterId(), topic));
}
return Result.buildSuc(ClusterBalanceReassignConverter.convert2SubJobPartitionDetailVOList(detailDataGroupByTopicList.get(0)));
}
private Result<Void> checkOptimizerResult(OptimizerResult optimizerResult, Long jobId){
if (optimizerResult == null){
return Result.buildFrom(ResultStatus.KAFKA_OPERATE_FAILED);
}
if (optimizerResult.resultOverview().getMoveReplicas() == 0){
logger.info("method=checkOptimizerResult||jobId:{}||msg=the cluster has reached equilibrium", jobId);
return Result.buildFailure("该集群已达到均衡要求,不需要再执行均衡任务。");
}
return Result.buildSuc();
}
}

View File

@@ -0,0 +1,9 @@
/**
* Load-reBalance相关功能模块
* km-extends/km-rebalance 模块,是依据指标生成迁移 plan 的模块,是底层的一个基础功能
* 当前 package 模块是依据产品的要求,依赖 km-extends/km-rebalance 模块,构建产品实际使用功能
*/
@EnterpriseLoadReBalance
package com.xiaojukeji.know.streaming.km.core.enterprise.rebalance;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;

View File

@@ -0,0 +1,23 @@
package com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceJobConfigPO;
@EnterpriseLoadReBalance
public interface ClusterBalanceJobConfigService {
/**
* 新增平衡配置
* @param clusterBalanceJobConfigPO
* @return
*/
Result<Void> replaceClusterBalanceJobConfigByClusterId(ClusterBalanceJobConfigPO clusterBalanceJobConfigPO);
/**
*
* @param clusterId
* @return
*/
Result<ClusterBalanceJobConfigPO> getByClusterId(Long clusterId);
}

View File

@@ -0,0 +1,93 @@
package com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceJobPO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceReassignPO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo.ClusterBalanceHistoryVO;
import java.util.List;
import java.util.Map;
@EnterpriseLoadReBalance
public interface ClusterBalanceJobService {
/**
*
* @param jobId jobId
* @return
*/
Result<Void> deleteByJobId(Long jobId, String operator);
/**
*
* @param clusterBalanceJobPO
* @return
*/
Result<Void> createClusterBalanceJob(ClusterBalanceJobPO clusterBalanceJobPO, String operator);
/**
*
* @param clusterBalanceJobPO
* @return
*/
Result<Void> modifyClusterBalanceJob(ClusterBalanceJobPO clusterBalanceJobPO, String operator);
/**
*
* @param id id
* @return
*/
Result<ClusterBalanceJobPO> getClusterBalanceJobById(Long id);
/**
*
* @param clusterPhyId
* @return
*/
ClusterBalanceJobPO getLastOneByClusterId(Long clusterPhyId);
/**
*
* @param clusterPhyId
* @return
*/
Map<String, Double> getBalanceInterval(Long clusterPhyId);
/**
*
* @param clusterPhyId
* @return
*/
PaginationResult<ClusterBalanceHistoryVO> page(Long clusterPhyId, PaginationBaseDTO dto);
/**
* 依据任务状态或者其中一个任务ID
*/
Long getOneRunningJob(Long clusterPhyId);
/**
* 检查平衡任务
*/
Result<Void> verifyClusterBalanceAndUpdateStatue(Long jobId);
/**
* 根据jobId生成迁移json
* @param parallelNum 并行数
* @param clusterId 集群id
* @param executionStrategy 执行策略
* @param reassignPOList 迁移任务详情
* @return
*/
Result<String> generateReassignmentJson(Long clusterId, Integer parallelNum, Integer jsonVersion, Integer executionStrategy, List<ClusterBalanceReassignPO> reassignPOList);
/**
* 根据迁移策略更新迁移任务
* @param jobId jobId
* @param clusterPhyId 集群id
* @return
*/
Result<Void> generateReassignmentForStrategy(Long clusterPhyId, Long jobId);
}

View File

@@ -0,0 +1,72 @@
package com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.ClusterBalanceReassignDetail;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceJobPO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceReassignPO;
import java.util.List;
@EnterpriseLoadReBalance
public interface ClusterBalanceReassignService {
/**
*新增迁移任务
* @param clusterBalanceReassignPO
* @return
*/
Result<Void> addBalanceReassign(ClusterBalanceReassignPO clusterBalanceReassignPO);
/**
*批量新增迁移任务
* @param reassignPOList
* @return
*/
Result<Void> addBatchBalanceReassign(List<ClusterBalanceReassignPO> reassignPOList);
/**
* 删除迁移任务
*/
Result<Void> delete(Long jobId, String operator);
/**
* 执行迁移任务
*/
Result<Void> execute(Long jobId);
/**
* 取消迁移任务
*/
Result<Void> cancel(Long jobId);
/**
* 检查迁移任务
*/
Result<Boolean> verifyAndUpdateStatue(ClusterBalanceJobPO clusterBalanceJobPO);
/**
* 修改限流值
*/
Result<Void> modifyThrottle(Long jobId, Long throttleUnitB, String operator);
/**
* 更新子任务中扩展字段的数据
*/
Result<Void> getAndUpdateSubJobExtendData(Long jobId);
/**
* 获取迁移任务信息
*/
List<ClusterBalanceReassignPO> getBalanceReassignsByJobId(Long jobId);
/**
* 获取按照Topic维度聚合的详情
*/
Result<ClusterBalanceReassignDetail> getJobDetailsGroupByTopic(Long jobId);
/**
* leader重新选举
*/
Result<Void> preferredReplicaElection(Long jobId);
}

View File

@@ -0,0 +1,82 @@
package com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto.ClusterBalanceOverviewDTO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto.ClusterBalancePreviewDTO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto.ClusterBalanceStrategyDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.ClusterBalanceItemState;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo.*;
@EnterpriseLoadReBalance
public interface ClusterBalanceService {
/**
* @param clusterPhyId
* @return
*/
Result<ClusterBalanceStateVO> state(Long clusterPhyId);
/**
* @param clusterPhyId
* @return
*/
Result<ClusterBalanceJobConfigVO> config(Long clusterPhyId);
/**
* @param clusterPhyId
* @param dto
* @return
*/
PaginationResult<ClusterBalanceOverviewVO> overview(Long clusterPhyId, ClusterBalanceOverviewDTO dto);
/**
* @param clusterPhyId
* @return
*/
Result<ClusterBalanceItemState> getItemState(Long clusterPhyId);
/**
* @param clusterPhyId
* @param dto
* @return
*/
PaginationResult<ClusterBalanceHistoryVO> history(Long clusterPhyId, PaginationBaseDTO dto);
/**
* @param clusterPhyId
* @param jobId
* @return
*/
Result<ClusterBalancePlanVO> plan(Long clusterPhyId, Long jobId);
/**
* @param clusterBalancePreviewDTO
* @return
*/
Result<ClusterBalancePlanVO> preview(Long clusterPhyId, ClusterBalancePreviewDTO clusterBalancePreviewDTO);
/**
* @param jobId
* @return
*/
Result<ClusterBalancePlanVO> schedule(Long clusterPhyId, Long jobId);
/**
* @param clusterPhyId
* @param dto
* @return
*/
Result<Void> strategy(Long clusterPhyId, ClusterBalanceStrategyDTO dto, String operator);
/**
* @param clusterPhyId
* @return
*/
Result<Void> createScheduleJob(Long clusterPhyId, long triggerTimeUnitMs);
}

View File

@@ -0,0 +1,79 @@
package com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.BrokerSpec;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceJobConfigPO;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerSpecService;
import com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service.ClusterBalanceJobConfigService;
import com.xiaojukeji.know.streaming.km.persistence.mysql.enterprise.rebalance.ClusterBalanceJobConfigDao;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.List;
import java.util.Map;
@Service
@EnterpriseLoadReBalance
public class ClusterBalanceJobConfigServiceImpl implements ClusterBalanceJobConfigService {
private static final ILog logger = LogFactory.getLog(ClusterBalanceJobConfigServiceImpl.class);
@Autowired
private ClusterBalanceJobConfigDao clusterBalanceJobConfigDao;
@Autowired
private BrokerSpecService brokerSpecService;
@Autowired
private BrokerService brokerService;
@Override
public Result<Void> replaceClusterBalanceJobConfigByClusterId(ClusterBalanceJobConfigPO clusterBalanceJobConfigPO) {
List<Broker> brokers = brokerService.listAllBrokersFromDB(clusterBalanceJobConfigPO.getClusterId());
Map<Integer, BrokerSpec> brokerSpecMap = brokerSpecService.getBrokerSpecMap(clusterBalanceJobConfigPO.getClusterId());
for(Broker broker: brokers){
if (brokerSpecMap.get(broker.getBrokerId())==null){
return Result.buildFrom(ResultStatus.CLUSTER_SPEC_INCOMPLETE);
}
}
try {
LambdaQueryWrapper<ClusterBalanceJobConfigPO> queryWrapper = new LambdaQueryWrapper();
queryWrapper.eq(ClusterBalanceJobConfigPO::getClusterId, clusterBalanceJobConfigPO.getClusterId());
ClusterBalanceJobConfigPO oldConfig = clusterBalanceJobConfigDao.selectOne(queryWrapper);
int count;
if (oldConfig == null){
count = clusterBalanceJobConfigDao.insert(clusterBalanceJobConfigPO);
}else{
clusterBalanceJobConfigPO.setId(oldConfig.getId());
count = clusterBalanceJobConfigDao.updateById(clusterBalanceJobConfigPO);
}
if (count < 1){
logger.error("replace cluster balance job config detail failed! clusterBalanceJobConfigPO:{}", clusterBalanceJobConfigPO);
return Result.buildFrom(ResultStatus.MYSQL_OPERATE_FAILED);
}
}catch (Exception e){
logger.error("replace cluster balance job config failed! clusterBalanceJobConfigPO:{}", clusterBalanceJobConfigPO, e);
return Result.buildFrom(ResultStatus.MYSQL_OPERATE_FAILED);
}
return Result.buildSuc();
}
@Override
public Result<ClusterBalanceJobConfigPO> getByClusterId(Long clusterId) {
ClusterBalanceJobConfigPO queryParam = new ClusterBalanceJobConfigPO();
queryParam.setClusterId(clusterId);
return Result.buildSuc(clusterBalanceJobConfigDao.selectOne(new QueryWrapper<>(queryParam)));
}
}

View File

@@ -0,0 +1,458 @@
package com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service.impl;
import com.alibaba.fastjson.JSON;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.google.common.collect.Lists;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.BrokerSpec;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.reassign.ExecuteReassignParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.reassign.strategy.ReassignExecutionStrategy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.reassign.strategy.ReassignTask;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.ClusterBalanceInterval;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.detail.ClusterBalancePlanDetail;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceJobConfigPO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceJobPO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceReassignPO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo.ClusterBalanceHistorySubVO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo.ClusterBalanceHistoryVO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.converter.ClusterBalanceConverter;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.converter.ClusterBalanceReassignConverter;
import com.xiaojukeji.know.streaming.km.common.enums.job.JobStatusEnum;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service.ClusterBalanceJobConfigService;
import com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service.ClusterBalanceJobService;
import com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service.ClusterBalanceReassignService;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerSpecService;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.config.ConfigUtils;
import com.xiaojukeji.know.streaming.km.core.service.partition.OpPartitionService;
import com.xiaojukeji.know.streaming.km.core.service.reassign.ReassignService;
import com.xiaojukeji.know.streaming.km.core.service.reassign.ReassignStrategyService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.persistence.mysql.enterprise.rebalance.ClusterBalanceJobDao;
import com.xiaojukeji.know.streaming.km.persistence.mysql.enterprise.rebalance.ClusterBalanceReassignDao;
import com.xiaojukeji.know.streaming.km.rebalance.executor.ExecutionRebalance;
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.BrokerBalanceState;
import com.xiaojukeji.know.streaming.km.rebalance.model.Resource;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.transaction.interceptor.TransactionAspectSupport;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
@Service
@EnterpriseLoadReBalance
public class ClusterBalanceJobServiceImpl implements ClusterBalanceJobService {
private static final ILog logger = LogFactory.getLog(ClusterBalanceJobServiceImpl.class);
@Value("${es.client.address}")
private String esAddress;
@Autowired
private ClusterBalanceJobDao clusterBalanceJobDao;
@Autowired
private ClusterBalanceReassignDao clusterBalanceReassignDao;
@Autowired
private ClusterBalanceReassignService clusterBalanceReassignService;
@Autowired
private ClusterBalanceJobConfigService clusterBalanceJobConfigService;
@Autowired
private BrokerSpecService brokerSpecService;
@Autowired
private BrokerService brokerService;
@Autowired
private ClusterPhyService clusterPhyService;
@Autowired
private TopicService topicService;
@Autowired
private ConfigUtils configUtils;
@Autowired
private ReassignService reassignService;
@Autowired
private ReassignStrategyService reassignStrategyService;
@Autowired
private OpPartitionService opPartitionService;
@Override
public Result<Void> deleteByJobId(Long jobId, String operator) {
if (jobId == null) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "jobId不允许为空");
}
try {
ClusterBalanceJobPO jobPO = clusterBalanceJobDao.selectById(jobId);
if (jobPO == null) {
// 任务不存在
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, String.format("jobId:[%d] not exist", jobId));
}
if (JobStatusEnum.canNotDeleteJob(jobPO.getStatus())) {
// 状态错误,禁止执行
return this.buildActionForbidden(jobId, jobPO.getStatus());
}
clusterBalanceJobDao.deleteById(jobId);
LambdaQueryWrapper<ClusterBalanceReassignPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ClusterBalanceReassignPO::getJobId, jobId);
clusterBalanceReassignDao.delete(lambdaQueryWrapper);
return Result.buildSuc();
} catch (Exception e) {
logger.error("method=delete||jobId={}||errMsg=exception", jobId, e);
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return Result.buildFromRSAndMsg(ResultStatus.MYSQL_OPERATE_FAILED, e.getMessage());
}
}
@Override
public Result<Void> createClusterBalanceJob(ClusterBalanceJobPO clusterBalanceJobPO, String operator) {
if (ValidateUtils.isNull(clusterBalanceJobPO)){
return Result.buildFrom(ResultStatus.NOT_EXIST);
}
try {
clusterBalanceJobDao.addClusterBalanceJob(clusterBalanceJobPO);
}catch (Exception e){
logger.error("method=createClusterBalanceJob||clusterBalanceJobPO:{}||errMsg=exception", clusterBalanceJobPO, e);
return Result.buildFrom(ResultStatus.MYSQL_OPERATE_FAILED);
}
return Result.buildSuc();
}
@Override
public Result<Void> modifyClusterBalanceJob(ClusterBalanceJobPO clusterBalanceJobPO, String operator) {
ClusterBalanceJobPO oldJobPo = clusterBalanceJobDao.selectById(clusterBalanceJobPO.getId());
if (oldJobPo == null){
return Result.buildFrom(ResultStatus.NOT_EXIST);
}
try {
int count = clusterBalanceJobDao.updateById(clusterBalanceJobPO);
if (count < 1){
logger.error("method=modifyClusterBalanceJob||clusterBalanceJobPO:{}||errMsg=modify clusterBalanceJob failed", clusterBalanceJobPO);
return Result.buildFrom(ResultStatus.MYSQL_OPERATE_FAILED);
}
}catch (Exception e){
logger.error("method=modifyClusterBalanceJob||clusterBalanceJobPO:{}||errMsg=exception", clusterBalanceJobPO, e);
return Result.buildFrom(ResultStatus.MYSQL_OPERATE_FAILED);
}
return Result.buildSuc();
}
@Override
public Result<ClusterBalanceJobPO> getClusterBalanceJobById(Long id) {
return Result.buildSuc(clusterBalanceJobDao.selectById(id));
}
@Override
public ClusterBalanceJobPO getLastOneByClusterId(Long clusterPhyId) {
ClusterBalanceJobPO clusterBalanceJobPO = new ClusterBalanceJobPO();
clusterBalanceJobPO.setClusterId(clusterPhyId);
QueryWrapper<ClusterBalanceJobPO> queryWrapper = new QueryWrapper<>();
queryWrapper.setEntity(clusterBalanceJobPO);
queryWrapper.orderByDesc("id");
List<ClusterBalanceJobPO> clusterBalanceJobPOS = clusterBalanceJobDao.selectList(queryWrapper);
if (clusterBalanceJobPOS.isEmpty()){
return null;
}
return clusterBalanceJobPOS.get(0);
}
@Override
public Map<String, Double> getBalanceInterval(Long clusterPhyId) {
Result<ClusterBalanceJobConfigPO> configPOResult = clusterBalanceJobConfigService.getByClusterId(clusterPhyId);
if (!configPOResult.hasData()){
return new HashMap();
}
List<ClusterBalanceInterval> clusterBalanceIntervals = ConvertUtil.str2ObjArrayByJson(configPOResult.getData().getBalanceIntervalJson(), ClusterBalanceInterval.class);
return clusterBalanceIntervals.stream().collect(Collectors.toMap(ClusterBalanceInterval::getType,ClusterBalanceInterval::getIntervalPercent));
}
@Override
public PaginationResult<ClusterBalanceHistoryVO> page(Long clusterPhyId, PaginationBaseDTO dto) {
List<ClusterBalanceHistoryVO> historyVOS = new ArrayList<>();
LambdaQueryWrapper<ClusterBalanceJobPO> queryWrapper = new LambdaQueryWrapper<>();
queryWrapper.eq(ClusterBalanceJobPO::getClusterId, clusterPhyId);
List<Integer> status = Lists.newArrayList(JobStatusEnum.SUCCESS.getStatus(), JobStatusEnum.CANCELED.getStatus(), JobStatusEnum.FAILED.getStatus());
queryWrapper.in(ClusterBalanceJobPO::getStatus, status);
queryWrapper.orderByDesc(ClusterBalanceJobPO::getStartTime);
IPage<ClusterBalanceJobPO> page = clusterBalanceJobDao.selectPage(new Page<>(dto.getPageNo(), dto.getPageSize()), queryWrapper);
page.setTotal(clusterBalanceJobDao.selectCount(queryWrapper));
for (ClusterBalanceJobPO clusterBalanceJobPO : page.getRecords()){
ClusterBalanceHistoryVO clusterBalanceHistoryVO = new ClusterBalanceHistoryVO();
clusterBalanceHistoryVO.setBegin(clusterBalanceJobPO.getStartTime());
clusterBalanceHistoryVO.setEnd(clusterBalanceJobPO.getFinishedTime());
clusterBalanceHistoryVO.setJobId(clusterBalanceJobPO.getId());
List<ClusterBalancePlanDetail> detailVOS = ConvertUtil.str2ObjArrayByJson(clusterBalanceJobPO.getBrokerBalanceDetail(), ClusterBalancePlanDetail.class);
Map<String, ClusterBalanceHistorySubVO> subMap = new HashMap<>();
ClusterBalanceHistorySubVO diskSubVO = new ClusterBalanceHistorySubVO();
diskSubVO.setSuccessNu(detailVOS.stream().filter(clusterBalancePlanDetail -> clusterBalancePlanDetail.getDiskStatus() != null && clusterBalancePlanDetail.getDiskStatus() == 0).count());
diskSubVO.setFailedNu(detailVOS.stream().filter(clusterBalancePlanDetail -> clusterBalancePlanDetail.getDiskStatus() != null && clusterBalancePlanDetail.getDiskStatus() != 0).count());
subMap.put(Resource.DISK.resource(), diskSubVO);
ClusterBalanceHistorySubVO cupSubVO = new ClusterBalanceHistorySubVO();
cupSubVO.setSuccessNu(detailVOS.stream().filter(clusterBalancePlanDetail -> clusterBalancePlanDetail.getCpuStatus() != null && clusterBalancePlanDetail.getCpuStatus() == 0).count());
cupSubVO.setFailedNu(detailVOS.stream().filter(clusterBalancePlanDetail -> clusterBalancePlanDetail.getCpuStatus() != null && clusterBalancePlanDetail.getCpuStatus() != 0).count());
subMap.put(Resource.CPU.resource(), cupSubVO);
ClusterBalanceHistorySubVO bytesInSubVO = new ClusterBalanceHistorySubVO();
bytesInSubVO.setSuccessNu(detailVOS.stream().filter(clusterBalancePlanDetail -> clusterBalancePlanDetail.getByteInStatus() != null && clusterBalancePlanDetail.getByteInStatus() == 0).count());
bytesInSubVO.setFailedNu(detailVOS.stream().filter(clusterBalancePlanDetail -> clusterBalancePlanDetail.getByteInStatus() != null && clusterBalancePlanDetail.getByteInStatus() != 0).count());
subMap.put(Resource.NW_IN.resource(), bytesInSubVO);
ClusterBalanceHistorySubVO bytesOutSubVO = new ClusterBalanceHistorySubVO();
bytesOutSubVO.setSuccessNu(detailVOS.stream().filter(clusterBalancePlanDetail -> clusterBalancePlanDetail.getByteOutStatus() != null && clusterBalancePlanDetail.getByteOutStatus() == 0).count());
bytesOutSubVO.setFailedNu(detailVOS.stream().filter(clusterBalancePlanDetail -> clusterBalancePlanDetail.getByteOutStatus() != null && clusterBalancePlanDetail.getByteOutStatus() != 0).count());
subMap.put(Resource.NW_OUT.resource(), bytesOutSubVO);
clusterBalanceHistoryVO.setSub(subMap);
historyVOS.add(clusterBalanceHistoryVO);
}
return PaginationResult.buildSuc(historyVOS, page);
}
@Override
public Long getOneRunningJob(Long clusterPhyId) {
LambdaQueryWrapper<ClusterBalanceJobPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ClusterBalanceJobPO::getClusterId, clusterPhyId);
lambdaQueryWrapper.eq(ClusterBalanceJobPO::getStatus, JobStatusEnum.RUNNING.getStatus());
List<ClusterBalanceJobPO> poList = clusterBalanceJobDao.selectList(lambdaQueryWrapper);
if (!ValidateUtils.isEmptyList(poList)) {
// 默认获取第一个
return poList.get(0).getId();
}
// 获取子任务中待执行的任务,避免主任务和子任务状态不一致
LambdaQueryWrapper<ClusterBalanceReassignPO> subLambdaQueryWrapper = new LambdaQueryWrapper<>();
subLambdaQueryWrapper.eq(ClusterBalanceReassignPO::getClusterId, clusterPhyId);
subLambdaQueryWrapper.eq(ClusterBalanceReassignPO::getStatus, JobStatusEnum.RUNNING.getStatus());
List<ClusterBalanceReassignPO> subPOList = clusterBalanceReassignDao.selectList(subLambdaQueryWrapper);
if (ValidateUtils.isEmptyList(subPOList)) {
return null;
}
return subPOList.get(0).getJobId();
}
@Override
@Transactional
public Result<Void> verifyClusterBalanceAndUpdateStatue(Long jobId) {
ClusterBalanceJobPO clusterBalanceJobPO = clusterBalanceJobDao.selectById(jobId);
if (clusterBalanceJobPO == null) {
// 任务不存在
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, String.format("jobId:[%d] not exist", jobId));
}
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(clusterBalanceJobPO.getClusterId());
if (clusterPhy == null){
return Result.buildFrom(ResultStatus.CLUSTER_NOT_EXIST);
}
Result<Void> rv = reassignService.changReassignmentThrottles(
new ExecuteReassignParam(clusterBalanceJobPO.getClusterId(), clusterBalanceJobPO.getReassignmentJson(), clusterBalanceJobPO.getThrottleUnitB())
);
if (rv.failed()) {
logger.error("method=verifyClusterBalanceAndUpdateStatue||jobId={}||result={}||msg=change throttle failed", jobId, rv);
return rv;
}
//获取规格信息
Map<Integer, BrokerSpec> brokerSpecMap = brokerSpecService.getBrokerSpecMap(clusterBalanceJobPO.getClusterId());
//获取broker信息
Map<Integer, Broker> brokerMap = brokerService.listAllBrokersFromDB(clusterBalanceJobPO.getClusterId()).stream().collect(Collectors.toMap(Broker::getBrokerId, Function.identity()));
//更新平衡任务状态信息
List<String> topicNames = topicService.listRecentUpdateTopicNamesFromDB(clusterPhy.getId(), configUtils.getClusterBalanceIgnoredTopicsTimeSecond());
Map<Integer, BrokerBalanceState> brokerBalanceStateMap = ExecutionRebalance
.getBrokerResourcesBalanceState(ClusterBalanceConverter.convert2BalanceParameter(clusterBalanceJobPO, brokerMap, brokerSpecMap, clusterPhy, esAddress, topicNames));
List<ClusterBalancePlanDetail> oldDetails = ConvertUtil.str2ObjArrayByJson(clusterBalanceJobPO.getBrokerBalanceDetail(), ClusterBalancePlanDetail.class);
List<ClusterBalancePlanDetail> newDetails = ClusterBalanceConverter.convert2ClusterBalancePlanDetail(oldDetails, brokerBalanceStateMap);
clusterBalanceJobPO.setBrokerBalanceDetail(ConvertUtil.obj2Json(newDetails));
Result<Void> modifyResult = this.modifyClusterBalanceJob(clusterBalanceJobPO, Constant.SYSTEM);
if (modifyResult.failed()){
logger.error("method=verifyClusterBalanceAndUpdateStatue||jobId:{}||errMsg={}", jobId, modifyResult);
return modifyResult;
}
//更新迁移任务状态信息
Result<Boolean> result = clusterBalanceReassignService.verifyAndUpdateStatue(clusterBalanceJobPO);
if (!result.hasData()){
return Result.buildFromIgnoreData(result);
}
rv = clusterBalanceReassignService.preferredReplicaElection(jobId);
if (rv.failed()){
logger.error("method=verifyClusterBalanceAndUpdateStatue||jobId={}||result={}||msg=preferred replica election failed", jobId, rv);
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return rv;
}
return Result.buildSuc();
}
@Override
public Result<String> generateReassignmentJson(Long clusterId,
Integer parallelNum,
Integer executionStrategy,
Integer jsonVersion,
List<ClusterBalanceReassignPO> reassignPOList){
Result<List<ReassignTask>> result = reassignStrategyService.generateReassignmentTask(
new ReassignExecutionStrategy(
clusterId,
parallelNum,
executionStrategy,
ClusterBalanceReassignConverter.convert2ReplaceReassignSubList(reassignPOList)
)
);
if (result.failed() || result.getData().isEmpty()){
return Result.buildFromIgnoreData(result);
}
Map<String, Object> reassign = new HashMap<>();
reassign.put(KafkaConstant.PARTITIONS, result.getData());
reassign.put(KafkaConstant.VERSION, jsonVersion);
String generateReassignmentJson = ConvertUtil.obj2Json(reassign);
// 检查生成的迁移Json是否合法
Result<Void> rv = reassignService.parseExecuteAssignmentArgs(clusterId, generateReassignmentJson);
if (rv.failed()) {
return Result.buildFromIgnoreData(rv);
}
return Result.buildSuc(generateReassignmentJson);
}
@Override
@Transactional
public Result<Void> generateReassignmentForStrategy(Long clusterPhyId, Long jobId) {
ClusterBalanceJobPO job = clusterBalanceJobDao.selectById(jobId);
if (job == null){
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST,
MsgConstant.getReassignJobBizStr(jobId, clusterPhyId));
}
if (!JobStatusEnum.isRunning(job.getStatus()) || job.getParallelNum() < 1){
return Result.buildSuc();
}
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(clusterPhyId);
if (clusterPhy == null){
return Result.buildFrom(ResultStatus.CLUSTER_NOT_EXIST);
}
List<ClusterBalanceReassignPO> reassignPOS = clusterBalanceReassignService.getBalanceReassignsByJobId(jobId);
//2.4以内版本因为无法动态增加副本迁移,所以需要等一部分任务完成之后再根据并行度重新获下一部分迁移任务
Double version = new Double(clusterPhy.getKafkaVersion().substring(0,3));
if (version < 2.4 && reassignPOS.stream()
.filter(reassignPO -> reassignPO.getStatus()==JobStatusEnum.RUNNING.getStatus()).count() > 0){
return Result.buildSuc();
}
//过滤已完成子任务
reassignPOS = reassignPOS.stream()
.filter(reassignPO -> reassignPO.getStatus()==JobStatusEnum.RUNNING.getStatus()
||reassignPO.getStatus()==JobStatusEnum.WAITING.getStatus())
.collect(Collectors.toList());
if (reassignPOS.isEmpty()){
return Result.buildSuc();
}
Map<String, Object> reassign = JSON.parseObject(job.getBalanceIntervalJson());
Result<String> r = this.generateReassignmentJson(job.getClusterId(),
job.getParallelNum(),
job.getExecutionStrategy(),
reassign.get(KafkaConstant.VERSION)==null?1:(Integer)reassign.get(KafkaConstant.VERSION) + 1,
reassignPOS);
if (!r.hasData()){
return Result.buildFromIgnoreData(r);
}
try {
//更新任务json
job.setReassignmentJson(r.getData());
job.setUpdateTime(new Date());
clusterBalanceJobDao.updateById(job);
//更新任务状态
modifyReassignStatus(r.getData(), reassignPOS);
}catch (Exception e){
logger.error("method=generateReassignmentForStrategy||jobId={}||errMsg=exception", jobId, e);
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return Result.buildFrom(ResultStatus.MYSQL_OPERATE_FAILED);
}
return reassignService.executePartitionReassignments(
new ExecuteReassignParam(job.getClusterId(), r.getData(), job.getThrottleUnitB()));
}
private Result<Void> buildActionForbidden(Long jobId, Integer jobStatus) {
return Result.buildFromRSAndMsg(
ResultStatus.OPERATION_FORBIDDEN,
String.format("jobId:[%d] 当前 status:[%s], 不允许被执行", jobId, JobStatusEnum.valueOfStatus(jobStatus))
);
}
private void modifyReassignStatus(String reassignmentJson, List<ClusterBalanceReassignPO> reassignPOS){
Map<String, Object> reassign = JSON.parseObject(reassignmentJson);
List<ReassignTask> reassignTasks = (List<ReassignTask>)reassign.get(KafkaConstant.PARTITIONS);
if (reassignTasks == null || reassignTasks.isEmpty()){
return;
}
// 更新子任务状态
reassignTasks.forEach(reassignTask -> {
for (ClusterBalanceReassignPO reassignPO: reassignPOS) {
if (reassignPO.getStatus().equals(JobStatusEnum.WAITING.getStatus())
&& reassignTask.getTopic().equals(reassignPO.getTopicName())
&& reassignTask.getPartition() == reassignPO.getPartitionId()) {
ClusterBalanceReassignPO newReassignPO = new ClusterBalanceReassignPO();
newReassignPO.setId(reassignPO.getId());
newReassignPO.setStatus(JobStatusEnum.RUNNING.getStatus());
newReassignPO.setUpdateTime(new Date());
clusterBalanceReassignDao.updateById(newReassignPO);
break;
}
}
});
}
}

View File

@@ -0,0 +1,514 @@
package com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service.impl;
import com.alibaba.fastjson.JSON;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.didiglobal.logi.security.common.dto.oplog.OplogDTO;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.ClusterBalanceReassignDetail;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ReplicationMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.param.reassign.ExecuteReassignParam;
import com.xiaojukeji.know.streaming.km.common.bean.entity.partition.Partition;
import com.xiaojukeji.know.streaming.km.common.bean.entity.reassign.ReassignResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.reassign.job.ReassignSubJobExtendData;
import com.xiaojukeji.know.streaming.km.common.bean.entity.reassign.strategy.ReassignTask;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceJobPO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceReassignPO;
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.converter.ClusterBalanceReassignConverter;
import com.xiaojukeji.know.streaming.km.common.enums.job.JobStatusEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.ModuleEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum;
import com.xiaojukeji.know.streaming.km.common.utils.CommonUtils;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service.ClusterBalanceReassignService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.partition.OpPartitionService;
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
import com.xiaojukeji.know.streaming.km.core.service.reassign.ReassignService;
import com.xiaojukeji.know.streaming.km.core.service.replica.ReplicaMetricService;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.ReplicaMetricVersionItems;
import com.xiaojukeji.know.streaming.km.persistence.mysql.enterprise.rebalance.ClusterBalanceJobDao;
import com.xiaojukeji.know.streaming.km.persistence.mysql.enterprise.rebalance.ClusterBalanceReassignDao;
import org.apache.kafka.common.TopicPartition;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.transaction.interceptor.TransactionAspectSupport;
import java.util.*;
@Service
@EnterpriseLoadReBalance
public class ClusterBalanceReassignServiceImpl implements ClusterBalanceReassignService {
private static final ILog logger = LogFactory.getLog(ClusterBalanceReassignServiceImpl.class);
@Autowired
private ClusterBalanceReassignDao clusterBalanceReassignDao;
@Autowired
private ClusterBalanceJobDao clusterBalanceJobDao;
@Autowired
private ReassignService reassignService;
@Autowired
private ReplicaMetricService replicationMetricService;
@Autowired
private PartitionService partitionService;
@Autowired
private OpLogWrapService opLogWrapService;
@Autowired
private OpPartitionService opPartitionService;
@Override
public Result<Void> addBalanceReassign(ClusterBalanceReassignPO clusterBalanceReassignPO) {
if (clusterBalanceReassignPO == null) {
return Result.buildFrom(ResultStatus.NOT_EXIST);
}
try {
int count = clusterBalanceReassignDao.insert(clusterBalanceReassignPO);
if (count < 1) {
logger.error("create cluster balance reassign detail failed! clusterBalanceReassignPO:{}", clusterBalanceReassignPO);
return Result.buildFrom(ResultStatus.MYSQL_OPERATE_FAILED);
}
} catch (Exception e) {
logger.error("create cluster balance reassign detail failed! clusterBalanceReassignPO:{}", clusterBalanceReassignPO, e);
return Result.buildFrom(ResultStatus.MYSQL_OPERATE_FAILED);
}
return Result.buildSuc();
}
@Override
public Result<Void> addBatchBalanceReassign(List<ClusterBalanceReassignPO> reassignPOList) {
try {
int count = clusterBalanceReassignDao.addBatch(reassignPOList);
if (count < 1) {
logger.error("method=addBatchBalanceReassign||reassignPOList:{}||msg=create cluster balance reassign detail failed! ", reassignPOList);
return Result.buildFrom(ResultStatus.MYSQL_OPERATE_FAILED);
}
} catch (Exception e) {
logger.error("method=addBatchBalanceReassign||reassignPOList:{}||msg=create cluster balance reassign detail failed! ", reassignPOList, e);
return Result.buildFrom(ResultStatus.MYSQL_OPERATE_FAILED);
}
return Result.buildSuc();
}
@Override
public Result<Void> delete(Long jobId, String operator) {
if (jobId == null) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, MsgConstant.getJobIdCanNotNull());
}
try {
ClusterBalanceJobPO jobPO = clusterBalanceJobDao.selectById(jobId);
if (jobPO == null) {
// 任务不存在
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getJobNotExist(jobId));
}
LambdaQueryWrapper<ClusterBalanceReassignPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ClusterBalanceReassignPO::getJobId, jobId);
clusterBalanceReassignDao.delete(lambdaQueryWrapper);
return Result.buildSuc();
} catch (Exception e) {
logger.error("method=delete||jobId={}||errMsg=exception", jobId, e);
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return Result.buildFromRSAndMsg(ResultStatus.MYSQL_OPERATE_FAILED, e.getMessage());
}
}
@Override
public Result<Void> execute(Long jobId) {
if (jobId == null) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, MsgConstant.getJobIdCanNotNull());
}
ClusterBalanceJobPO jobPO = clusterBalanceJobDao.selectById(jobId);
if (jobPO == null) {
// 任务不存在
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getJobNotExist(jobId));
}
if (!JobStatusEnum.canExecuteJob(jobPO.getStatus())) {
// 状态错误,禁止执行
return this.buildActionForbidden(jobId, jobPO.getStatus());
}
// 修改DB状态
this.setJobInRunning(jobPO);
// 执行任务
Result<Void> rv = reassignService.executePartitionReassignments(new ExecuteReassignParam(jobPO.getClusterId(), jobPO.getReassignmentJson(), jobPO.getThrottleUnitB()));
if (rv.failed()) {
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return rv;
}
return Result.buildSuc();
}
@Override
public Result<Void> cancel(Long jobId) {
if (jobId == null) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, MsgConstant.getJobIdCanNotNull());
}
try {
ClusterBalanceJobPO jobPO = clusterBalanceJobDao.selectById(jobId);
if (jobPO == null) {
// 任务不存在
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getJobNotExist(jobId));
}
if (!JobStatusEnum.canCancelJob(jobPO.getStatus())) {
// 状态错误,禁止执行
return this.buildActionForbidden(jobId, jobPO.getStatus());
}
this.setJobCanceled(jobPO);
return Result.buildSuc();
} catch (Exception e) {
logger.error("method=cancel||jobId={}||errMsg=exception", jobId, e);
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return Result.buildFromRSAndMsg(ResultStatus.MYSQL_OPERATE_FAILED, e.getMessage());
}
}
@Override
@Transactional
public Result<Boolean> verifyAndUpdateStatue(ClusterBalanceJobPO jobPO) {
if (jobPO == null) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, "job not exist");
}
// 检查迁移的结果
Result<ReassignResult> reassignResult = reassignService.verifyPartitionReassignments(
new ExecuteReassignParam(jobPO.getClusterId(), jobPO.getReassignmentJson(), jobPO.getThrottleUnitB())
);
if (reassignResult.failed()) {
return Result.buildFromIgnoreData(reassignResult);
}
// 更新任务状态
return this.checkAndSetSuccessIfFinished(jobPO, reassignResult.getData());
}
@Override
public Result<Void> modifyThrottle(Long jobId, Long throttleUnitB, String operator) {
if (jobId == null) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, MsgConstant.getJobIdCanNotNull());
}
try {
ClusterBalanceJobPO jobPO = clusterBalanceJobDao.selectById(jobId);
if (jobPO == null) {
// 任务不存在
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getJobNotExist(jobId));
}
if (JobStatusEnum.isFinished(jobPO.getStatus())) {
// 状态错误,禁止执行
return this.buildActionForbidden(jobId, jobPO.getStatus());
}
// 修改限流值
jobPO.setThrottleUnitB(throttleUnitB);
clusterBalanceJobDao.updateById(jobPO);
// 记录操作
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.EDIT.getDesc(),
ModuleEnum.JOB_CLUSTER_BALANCE.getDesc(),
MsgConstant.getReassignJobBizStr(
jobId,
jobPO.getClusterId()
),
String.format("新的限流值:[%d]", throttleUnitB)
));
return Result.buildSuc();
} catch (Exception e) {
logger.error("method=modifyThrottle||jobId={}||throttleUnitB={}||errMsg=exception", jobId, throttleUnitB, e);
}
return Result.buildSuc();
}
@Override
public Result<Void> getAndUpdateSubJobExtendData(Long jobId) {
if (jobId == null) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, MsgConstant.getJobIdCanNotNull());
}
ClusterBalanceJobPO jobPO = clusterBalanceJobDao.selectById(jobId);
if (jobPO == null) {
// 任务不存在
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getJobNotExist(jobId));
}
List<ClusterBalanceReassignPO> reassigns = this.getBalanceReassignsByJobId(jobId);
for (ClusterBalanceReassignPO reassignPO: reassigns) {
Result<ReassignSubJobExtendData> extendDataResult = this.getReassignSubJobExtendData(reassignPO);
if (extendDataResult.failed()) {
continue;
}
reassignPO.setExtendData(ConvertUtil.obj2Json(extendDataResult.getData()));
clusterBalanceReassignDao.updateById(reassignPO);
}
return Result.buildSuc();
}
@Override
public List<ClusterBalanceReassignPO> getBalanceReassignsByJobId(Long jobId) {
if (jobId == null) {
return new ArrayList<>();
}
LambdaQueryWrapper<ClusterBalanceReassignPO> lambdaQueryWrapper = new LambdaQueryWrapper<>();
lambdaQueryWrapper.eq(ClusterBalanceReassignPO::getJobId, jobId);
return clusterBalanceReassignDao.selectList(lambdaQueryWrapper);
}
@Override
public Result<ClusterBalanceReassignDetail> getJobDetailsGroupByTopic(Long jobId) {
if (jobId == null) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, MsgConstant.getJobIdCanNotNull());
}
// 获取任务
ClusterBalanceJobPO jobPO = clusterBalanceJobDao.selectById(jobId);
if (jobPO == null) {
// 任务不存在
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getJobNotExist(jobId));
}
// 获取子任务
List<ClusterBalanceReassignPO> subJobPOList = this.getBalanceReassignsByJobId(jobId);
// 数据组装并放回
return Result.buildSuc(ClusterBalanceReassignConverter.convert2ClusterBalanceReassignDetail(jobPO, subJobPOList));
}
@Override
public Result<Void> preferredReplicaElection(Long jobId) {
// 获取任务
ClusterBalanceJobPO jobPO = clusterBalanceJobDao.selectById(jobId);
if (jobPO == null) {
// 任务不存在
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getJobNotExist(jobId));
}
if (!JobStatusEnum.isFinished(jobPO.getStatus())){
return Result.buildSuc();
}
// 获取子任务
List<ClusterBalanceReassignPO> subJobPOList = this.getBalanceReassignsByJobId(jobId);
List<TopicPartition> topicPartitions = new ArrayList<>();
subJobPOList.stream().forEach(reassignPO -> {
Integer targetLeader = CommonUtils.string2IntList(reassignPO.getReassignBrokerIds()).get(0);
Integer originalLeader = CommonUtils.string2IntList(reassignPO.getOriginalBrokerIds()).get(0);
//替换过leader的添加到优先副本选举任务列表
if (!originalLeader.equals(targetLeader)){
topicPartitions.add(new TopicPartition(reassignPO.getTopicName(), reassignPO.getPartitionId()));
}
});
if (!topicPartitions.isEmpty()){
return opPartitionService.preferredReplicaElection(jobPO.getClusterId(), topicPartitions);
}
return Result.buildSuc();
}
private Result<Void> buildActionForbidden(Long jobId, Integer jobStatus) {
return Result.buildFromRSAndMsg(
ResultStatus.OPERATION_FORBIDDEN,
String.format("jobId:[%d] 当前 status:[%s], 不允许被执行", jobId, JobStatusEnum.valueOfStatus(jobStatus))
);
}
private Result<Void> setJobInRunning(ClusterBalanceJobPO jobPO) {
Map<String, Object> reassign = JSON.parseObject(jobPO.getReassignmentJson());
if (reassign.isEmpty()){
return Result.buildSuc();
}
List<ReassignTask> reassignTasks = JSON.parseArray(JSON.toJSONString(reassign.get(KafkaConstant.PARTITIONS)), ReassignTask.class);
if (reassignTasks == null || reassignTasks.isEmpty()){
return Result.buildSuc();
}
long now = System.currentTimeMillis();
// 更新子任务状态
List<ClusterBalanceReassignPO> reassignPOS = this.getBalanceReassignsByJobId(jobPO.getId());
reassignTasks.forEach(reassignTask -> {
for (ClusterBalanceReassignPO reassignPO: reassignPOS) {
if (reassignTask.getTopic().equals(reassignPO.getTopicName())
&& reassignTask.getPartition() == reassignPO.getPartitionId()) {
ClusterBalanceReassignPO newReassignPO = new ClusterBalanceReassignPO();
newReassignPO.setId(reassignPO.getId());
newReassignPO.setStatus(JobStatusEnum.RUNNING.getStatus());
newReassignPO.setStartTime(new Date(now));
newReassignPO.setUpdateTime(new Date(now));
clusterBalanceReassignDao.updateById(newReassignPO);
break;
}
}
});
// 更新父任务状态
ClusterBalanceJobPO newJobPO = new ClusterBalanceJobPO();
newJobPO.setId(jobPO.getId());
newJobPO.setStatus(JobStatusEnum.RUNNING.getStatus());
newJobPO.setStartTime(new Date(now));
newJobPO.setUpdateTime(new Date(now));
clusterBalanceJobDao.updateById(newJobPO);
return Result.buildSuc();
}
private Result<Void> setJobCanceled(ClusterBalanceJobPO jobPO) {
// 更新子任务状态
List<ClusterBalanceReassignPO> reassignPOS = this.getBalanceReassignsByJobId(jobPO.getId());
for (ClusterBalanceReassignPO reassignPO: reassignPOS) {
ClusterBalanceReassignPO newReassignPO = new ClusterBalanceReassignPO();
newReassignPO.setId(reassignPO.getId());
newReassignPO.setStatus(JobStatusEnum.CANCELED.getStatus());
clusterBalanceReassignDao.updateById(newReassignPO);
}
// 更新父任务状态
ClusterBalanceJobPO newJobPO = new ClusterBalanceJobPO();
newJobPO.setId(jobPO.getId());
newJobPO.setStatus(JobStatusEnum.CANCELED.getStatus());
clusterBalanceJobDao.updateById(newJobPO);
return Result.buildSuc();
}
private Result<Boolean> checkAndSetSuccessIfFinished(ClusterBalanceJobPO jobPO, ReassignResult reassignmentResult) {
long now = System.currentTimeMillis();
List<ClusterBalanceReassignPO> reassignPOS = this.getBalanceReassignsByJobId(jobPO.getId());
boolean existNotFinished = false;
for (ClusterBalanceReassignPO balanceReassignPO: reassignPOS) {
if (!reassignmentResult.checkPartitionFinished(balanceReassignPO.getTopicName(), balanceReassignPO.getPartitionId())) {
existNotFinished = true;
continue;
}
// 更新状态
ClusterBalanceReassignPO newReassignPO = new ClusterBalanceReassignPO();
newReassignPO.setId(balanceReassignPO.getId());
newReassignPO.setStatus(JobStatusEnum.SUCCESS.getStatus());
newReassignPO.setFinishedTime(new Date(now));
clusterBalanceReassignDao.updateById(newReassignPO);
}
// 更新任务状态
if (!existNotFinished && !reassignmentResult.isPartsOngoing()) {
ClusterBalanceJobPO newBalanceJobPO = new ClusterBalanceJobPO();
newBalanceJobPO.setId(jobPO.getId());
newBalanceJobPO.setStatus(JobStatusEnum.SUCCESS.getStatus());
newBalanceJobPO.setFinishedTime(new Date(now));
clusterBalanceJobDao.updateById(newBalanceJobPO);
}
return Result.buildSuc(reassignmentResult.isPartsOngoing());
}
private Result<ReassignSubJobExtendData> getReassignSubJobExtendData(ClusterBalanceReassignPO subJobPO) {
Partition partition = partitionService.getPartitionByTopicAndPartitionId(
subJobPO.getClusterId(),
subJobPO.getTopicName(),
subJobPO.getPartitionId()
);
if (partition == null) {
// 分区不存在
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getPartitionNotExist(subJobPO.getClusterId(),
subJobPO.getTopicName(),
subJobPO.getPartitionId())
);
}
// 获取leader副本
Float leaderLogSize = this.getReplicaLogSize(subJobPO.getClusterId(),
partition.getLeaderBrokerId(),
subJobPO.getTopicName(),
subJobPO.getPartitionId()
);
// 获取新增的副本
Set<Integer> newReplicas = new HashSet<>(CommonUtils.string2IntList(subJobPO.getReassignBrokerIds()));
newReplicas.removeAll(CommonUtils.string2IntList(subJobPO.getOriginalBrokerIds()));
Long finishedLogSizeUnitB = 0L;
for (Integer brokerId: newReplicas) {
Float replicaLogSize = this.getReplicaLogSize(subJobPO.getClusterId(),
brokerId,
subJobPO.getTopicName(),
subJobPO.getPartitionId()
);
if (replicaLogSize == null) {
continue;
}
finishedLogSizeUnitB += replicaLogSize.longValue();
}
ReassignSubJobExtendData extendData = ConvertUtil.str2ObjByJson(subJobPO.getExtendData(), ReassignSubJobExtendData.class);
extendData.setFinishedReassignLogSizeUnitB(finishedLogSizeUnitB);
if (leaderLogSize != null) {
extendData.setNeedReassignLogSizeUnitB(leaderLogSize.longValue() * newReplicas.size());
}
//迁移任务已完成时若分区指标未更新已完成logSize等于需要迁移logSize
if (JobStatusEnum.isFinished(subJobPO.getStatus()) && finishedLogSizeUnitB.equals(0L)){
extendData.setFinishedReassignLogSizeUnitB(extendData.getNeedReassignLogSizeUnitB());
}
if (extendData.getNeedReassignLogSizeUnitB().equals(0L) || JobStatusEnum.isFinished(subJobPO.getStatus())) {
extendData.setRemainTimeUnitMs(0L);
} else if (extendData.getFinishedReassignLogSizeUnitB().equals(0L)) {
// 未知
extendData.setRemainTimeUnitMs(null);
} else {
Long usedTime = System.currentTimeMillis() - subJobPO.getStartTime().getTime();
// (需迁移LogSize / 已迁移LogSize) = (总时间 / 已进行时间)
extendData.setRemainTimeUnitMs(extendData.getNeedReassignLogSizeUnitB() * usedTime / extendData.getFinishedReassignLogSizeUnitB());
}
return Result.buildSuc(extendData);
}
private Float getReplicaLogSize(Long clusterPhyId, Integer brokerId, String topicName, Integer partitionId) {
Result<ReplicationMetrics> replicaMetricsResult = replicationMetricService.collectReplicaMetricsFromKafka(
clusterPhyId,
topicName,
partitionId,
brokerId,
ReplicaMetricVersionItems.REPLICATION_METRIC_LOG_SIZE
);
if (!replicaMetricsResult.hasData()) {
return null;
}
return replicaMetricsResult.getData().getMetric(ReplicaMetricVersionItems.REPLICATION_METRIC_LOG_SIZE);
}
}

View File

@@ -0,0 +1,481 @@
package com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto.ClusterBalanceIntervalDTO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto.ClusterBalanceOverviewDTO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto.ClusterBalancePreviewDTO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.dto.ClusterBalanceStrategyDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.job.JobDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.BrokerSpec;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.ClusterBalanceItemState;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.job.content.JobClusterBalanceContent;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BrokerMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceJobConfigPO;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.po.ClusterBalanceJobPO;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.vo.*;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.converter.ClusterBalanceConverter;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.enums.ClusterBalanceStateEnum;
import com.xiaojukeji.know.streaming.km.common.enums.job.JobHandleEnum;
import com.xiaojukeji.know.streaming.km.common.enums.job.JobStatusEnum;
import com.xiaojukeji.know.streaming.km.common.enums.job.JobTypeEnum;
import com.xiaojukeji.know.streaming.km.common.utils.CommonUtils;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.PaginationUtil;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerMetricService;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerSpecService;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service.ClusterBalanceJobConfigService;
import com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service.ClusterBalanceJobService;
import com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service.ClusterBalanceService;
import com.xiaojukeji.know.streaming.km.core.service.config.ConfigUtils;
import com.xiaojukeji.know.streaming.km.core.service.job.JobService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.BrokerMetricVersionItems;
import com.xiaojukeji.know.streaming.km.rebalance.executor.ExecutionRebalance;
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.BalanceParameter;
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.BrokerBalanceState;
import com.xiaojukeji.know.streaming.km.rebalance.model.Resource;
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.OptimizerResult;
import org.apache.logging.log4j.core.util.CronExpression;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import java.text.ParseException;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
@Service
@EnterpriseLoadReBalance
public class ClusterBalanceServiceImpl implements ClusterBalanceService {
private static final ILog logger = LogFactory.getLog(ClusterBalanceServiceImpl.class);
@Value("${es.client.address}")
private String esAddress;
@Autowired
private JobService jobService;
@Autowired
private ClusterBalanceJobService clusterBalanceJobService;
@Autowired
private ClusterPhyService clusterPhyService;
@Autowired
private BrokerService brokerService;
@Autowired
private BrokerSpecService brokerSpecService;
@Autowired
private ClusterBalanceJobConfigService clusterBalanceJobConfigService;
@Autowired
private BrokerMetricService brokerMetricService;
@Autowired
private TopicService topicService;
@Autowired
private ConfigUtils configUtils;
@Override
public Result<ClusterBalanceStateVO> state(Long clusterPhyId) {
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(clusterPhyId);
if (clusterPhy == null){
return Result.buildFrom(ResultStatus.CLUSTER_NOT_EXIST);
}
Result<ClusterBalanceJobConfigPO> configPOResult = clusterBalanceJobConfigService.getByClusterId(clusterPhyId);
if(!configPOResult.hasData()){
return Result.buildFromIgnoreData(configPOResult);
}
Map<Integer, Broker> brokerMap = brokerService.listAllBrokersFromDB(clusterPhy.getId()).stream().collect(Collectors.toMap(Broker::getBrokerId, Function.identity()));
Map<Integer, BrokerSpec> brokerSpecMap = brokerSpecService.getBrokerSpecMap(clusterPhy.getId());
ClusterBalanceStateVO clusterBalanceStateVO = new ClusterBalanceStateVO();
try {
CronExpression cronExpression = new CronExpression(configPOResult.getData().getTaskCron());
//是否到满足周期时间
clusterBalanceStateVO.setNext(cronExpression.getTimeAfter(new Date()));
} catch (ParseException e) {
logger.error("method=state||clusterId:{}||errMsg=exception", clusterPhyId, e);
}
List<String> topicNames = topicService.listRecentUpdateTopicNamesFromDB(clusterPhyId, configUtils.getClusterBalanceIgnoredTopicsTimeSecond());
clusterBalanceStateVO.setEnable(configPOResult.getData().getStatus() == 1);
Map<Resource, Double> resourceDoubleMap;
Map<Integer, BrokerBalanceState> brokerBalanceStateMap;
try {
resourceDoubleMap = ExecutionRebalance.getClusterAvgResourcesState(ClusterBalanceConverter.convert2BalanceParameter(configPOResult.getData(), brokerMap, brokerSpecMap, clusterPhy, esAddress, topicNames));
brokerBalanceStateMap = ExecutionRebalance
.getBrokerResourcesBalanceState(ClusterBalanceConverter.convert2BalanceParameter(configPOResult.getData(), brokerMap, brokerSpecMap, clusterPhy, esAddress, topicNames));
}catch (Exception e){
logger.error("method=state||clusterPhyId={}||errMsg=exception", clusterPhyId, e);
return Result.buildFailure(e.getMessage());
}
//集群状态信息
ArrayList<BrokerBalanceState> balanceStates = new ArrayList(brokerBalanceStateMap.values());
clusterBalanceStateVO.setStatus(ClusterBalanceStateEnum.BALANCE.getState());
balanceStates.forEach(brokerBalanceState ->{
if ((brokerBalanceState.getDiskBalanceState() != null && !brokerBalanceState.getDiskBalanceState().equals(ClusterBalanceStateEnum.BALANCE.getState()))
|| (brokerBalanceState.getCpuBalanceState() != null && !brokerBalanceState.getCpuBalanceState().equals(ClusterBalanceStateEnum.BALANCE.getState()))
|| (brokerBalanceState.getBytesOutBalanceState() != null && !brokerBalanceState.getBytesOutBalanceState().equals(ClusterBalanceStateEnum.BALANCE.getState()))
|| (brokerBalanceState.getBytesInBalanceState() != null && !brokerBalanceState.getBytesInBalanceState().equals(ClusterBalanceStateEnum.BALANCE.getState()))){
clusterBalanceStateVO.setStatus(ClusterBalanceStateEnum.UNBALANCED.getState());
}
});
clusterBalanceStateVO.setSub(getStateSubVOMap(resourceDoubleMap, balanceStates, clusterPhyId));
return Result.buildSuc(clusterBalanceStateVO);
}
@Override
public Result<ClusterBalanceJobConfigVO> config(Long clusterPhyId) {
Result<ClusterBalanceJobConfigPO> configPOResult = clusterBalanceJobConfigService.getByClusterId(clusterPhyId);
if (!configPOResult.hasData()){
return Result.buildFromIgnoreData(configPOResult);
}
return Result.buildSuc(ClusterBalanceConverter.convert2ClusterBalanceJobConfigVO(configPOResult.getData()));
}
@Override
public PaginationResult<ClusterBalanceOverviewVO> overview(Long clusterPhyId, ClusterBalanceOverviewDTO dto) {
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(clusterPhyId);
if (clusterPhy == null){
return PaginationResult.buildFailure(ResultStatus.CLUSTER_NOT_EXIST, dto);
}
Result<ClusterBalanceJobConfigPO> configPOResult = clusterBalanceJobConfigService.getByClusterId(clusterPhyId);
if(configPOResult.failed()){
return PaginationResult.buildFailure(configPOResult, dto);
}
//获取规格信息
Map<Integer, BrokerSpec> brokerSpecMap = brokerSpecService.getBrokerSpecMap(clusterPhyId);
List<ClusterBalanceOverviewVO> clusterBalanceOverviewVOS = new ArrayList<>();
List<Broker> brokerList = brokerService.listAllBrokersFromDB(clusterPhyId);
Map<Integer, Broker> brokerMap = brokerList.stream().collect(Collectors.toMap(Broker::getBrokerId, Function.identity()));
Map<Integer, BrokerBalanceState> brokerBalanceStateMap = new HashMap<>();
if (configPOResult.hasData()) {
try {
List<String> topicNames = topicService.listRecentUpdateTopicNamesFromDB(clusterPhyId, configUtils.getClusterBalanceIgnoredTopicsTimeSecond());
brokerBalanceStateMap = ExecutionRebalance
.getBrokerResourcesBalanceState(ClusterBalanceConverter.convert2BalanceParameter(configPOResult.getData(), brokerMap, brokerSpecMap, clusterPhy, esAddress, topicNames));
} catch (Exception e) {
logger.error("method=overview||clusterBalanceOverviewDTO={}||errMsg=exception", dto, e);
return PaginationResult.buildFailure(e.getMessage(), dto);
}
}
// 获取指标
Result<List<BrokerMetrics>> metricsResult = brokerMetricService.getLatestMetricsFromES(
clusterPhyId,
brokerList.stream().filter(elem1 -> elem1.alive()).map(elem2 -> elem2.getBrokerId()).collect(Collectors.toList())
);
if (metricsResult.failed()){
return PaginationResult.buildFailure(metricsResult, dto);
}
Map<Integer, BrokerMetrics> brokerMetricsMap = new HashMap<>();
if (metricsResult.hasData()){
brokerMetricsMap = metricsResult.getData().stream().collect(Collectors.toMap(BrokerMetrics::getBrokerId, Function.identity()));
}
for(Map.Entry<Integer, Broker> entry : brokerMap.entrySet()){
Broker broker = entry.getValue();
if (broker == null){
continue;
}
ClusterBalanceOverviewVO clusterBalanceOverviewVO = new ClusterBalanceOverviewVO();
clusterBalanceOverviewVO.setBrokerId(entry.getKey());
clusterBalanceOverviewVO.setHost(broker.getHost());
clusterBalanceOverviewVO.setRack(broker.getRack());
BrokerMetrics brokerMetrics = brokerMetricsMap.get(entry.getKey());
if (brokerMetrics != null){
clusterBalanceOverviewVO.setLeader(brokerMetrics.getMetric( BrokerMetricVersionItems.BROKER_METRIC_LEADERS)!=null
?brokerMetrics.getMetric( BrokerMetricVersionItems.BROKER_METRIC_LEADERS).intValue():null);
clusterBalanceOverviewVO.setReplicas(brokerMetrics.getMetric( BrokerMetricVersionItems.BROKER_METRIC_PARTITIONS)!=null
?brokerMetrics.getMetric( BrokerMetricVersionItems.BROKER_METRIC_PARTITIONS).intValue():null);
}
clusterBalanceOverviewVO.setSub(ClusterBalanceConverter.convert2MapClusterBalanceOverviewSubVO(brokerSpecMap.get(entry.getKey()), brokerBalanceStateMap.get(entry.getKey())));
clusterBalanceOverviewVOS.add(clusterBalanceOverviewVO);
}
//过滤status
if (dto.getStateParam()!= null && dto.getStateParam().size()>0){
clusterBalanceOverviewVOS = filterState(dto.getStateParam(), clusterBalanceOverviewVOS);
}
clusterBalanceOverviewVOS = PaginationUtil.pageByFuzzyFilter(ConvertUtil.list2List(clusterBalanceOverviewVOS, ClusterBalanceOverviewVO.class), dto.getSearchKeywords(), Arrays.asList("host"));
return PaginationResult.buildSuc(clusterBalanceOverviewVOS, clusterBalanceOverviewVOS.size(), dto.getPageNo(), dto.getPageSize());
}
@Override
public Result<ClusterBalanceItemState> getItemState(Long clusterPhyId) {
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(clusterPhyId);
if (clusterPhy == null){
return Result.buildFrom(ResultStatus.CLUSTER_NOT_EXIST);
}
ClusterBalanceItemState clusterBalanceState = new ClusterBalanceItemState();
Result<ClusterBalanceJobConfigPO> configPOResult = clusterBalanceJobConfigService.getByClusterId(clusterPhyId);
if(!configPOResult.hasData()){
clusterBalanceState.setConfigureBalance(Boolean.FALSE);
clusterBalanceState.setEnable(Boolean.FALSE);
return Result.buildSuc(clusterBalanceState);
}
Map<Integer, Broker> brokerMap = brokerService.listAllBrokersFromDB(clusterPhy.getId()).stream().collect(Collectors.toMap(Broker::getBrokerId, Function.identity()));
Map<Integer, BrokerSpec> brokerSpecMap = brokerSpecService.getBrokerSpecMap(clusterPhy.getId());
List<String> topicNames = topicService.listRecentUpdateTopicNamesFromDB(clusterPhyId, configUtils.getClusterBalanceIgnoredTopicsTimeSecond());
clusterBalanceState.setConfigureBalance(Boolean.TRUE);
clusterBalanceState.setEnable(configPOResult.getData().getStatus() == 1);
Map<Integer, BrokerBalanceState> brokerBalanceStateMap;
try {
brokerBalanceStateMap = ExecutionRebalance
.getBrokerResourcesBalanceState(ClusterBalanceConverter.convert2BalanceParameter(configPOResult.getData(), brokerMap, brokerSpecMap, clusterPhy, esAddress, topicNames));
}catch (Exception e){
logger.error("method=state||clusterPhyId={}||errMsg=exception", clusterPhyId, e);
return Result.buildFailure(e.getMessage());
}
Map<String, Boolean> itemStateMap = new HashMap<>();
//集群状态信息
ArrayList<BrokerBalanceState> balanceStates = new ArrayList(brokerBalanceStateMap.values());
List<ClusterBalanceIntervalDTO> intervalDTOS = ConvertUtil.str2ObjArrayByJson(configPOResult.getData().getBalanceIntervalJson(), ClusterBalanceIntervalDTO.class);
intervalDTOS.forEach(intervalDTO->{
if (Resource.CPU.resource().equals(intervalDTO.getType())){
itemStateMap.put(Resource.CPU.resource(), balanceStates.stream()
.filter(status->ClusterBalanceStateEnum.BALANCE.getState().equals(status.getCpuBalanceState())).count()==brokerMap.size());
}else if (Resource.NW_IN.resource().equals(intervalDTO.getType())){
itemStateMap.put(Resource.NW_IN.resource(), balanceStates.stream()
.filter(status->ClusterBalanceStateEnum.BALANCE.getState().equals(status.getBytesInBalanceState())).count()==brokerMap.size());
}else if (Resource.NW_OUT.resource().equals(intervalDTO.getType())){
itemStateMap.put(Resource.NW_OUT.resource(), balanceStates.stream()
.filter(status->ClusterBalanceStateEnum.BALANCE.getState().equals(status.getBytesOutBalanceState())).count()==brokerMap.size());
}else if (Resource.DISK.resource().equals(intervalDTO.getType())){
itemStateMap.put(Resource.DISK.resource(), balanceStates.stream()
.filter(status->ClusterBalanceStateEnum.BALANCE.getState().equals(status.getDiskBalanceState())).count()==brokerMap.size());
}
});
clusterBalanceState.setItemState(itemStateMap);
return Result.buildSuc(clusterBalanceState);
}
private List<ClusterBalanceOverviewVO> filterState(Map<String, Integer> stateParam, List<ClusterBalanceOverviewVO> oldVos){
if (stateParam.isEmpty()){
return oldVos;
}
List<ClusterBalanceOverviewVO> overviewVOS = new ArrayList<>();
for(ClusterBalanceOverviewVO oldVo : oldVos){
Boolean check = true;
for(Map.Entry<String, Integer> paramEntry : stateParam.entrySet()){
ClusterBalanceOverviewSubVO subVO = oldVo.getSub().get(paramEntry.getKey());
if (subVO == null){
check = false;
continue;
}
if (subVO.getStatus()==null || !subVO.getStatus().equals(paramEntry.getValue())){
check = false;
}
}
if (check){
overviewVOS.add(oldVo);
}
}
return overviewVOS;
}
@Override
public PaginationResult<ClusterBalanceHistoryVO> history(Long clusterPhyId, PaginationBaseDTO dto) {
return clusterBalanceJobService.page(clusterPhyId, dto);
}
@Override
public Result<ClusterBalancePlanVO> plan(Long clusterPhyId, Long jobId) {
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(clusterPhyId);
if (clusterPhy == null){
return Result.buildFrom(ResultStatus.CLUSTER_NOT_EXIST);
}
Result<ClusterBalanceJobPO> jobPOResult = clusterBalanceJobService.getClusterBalanceJobById(jobId);
if (jobPOResult.failed()){
return Result.buildFrom(ResultStatus.NOT_EXIST);
}
List<Broker> allBrokers= brokerService.listAllBrokersFromDB(clusterPhyId);
ClusterBalancePlanVO planVO = new ClusterBalancePlanVO();
ClusterBalanceJobPO jobPO = jobPOResult.getData();
planVO.setMoveSize(jobPO.getTotalReassignSize());
planVO.setBrokers(ClusterBalanceConverter.convert2HostList(allBrokers, jobPO.getBrokers()));
planVO.setBlackTopics(CommonUtils.string2StrList(jobPO.getTopicBlackList()));
planVO.setReplicas(jobPO.getTotalReassignReplicaNum());
planVO.setType(jobPO.getType());
planVO.setTopics(CommonUtils.string2StrList(jobPO.getMoveInTopicList()));
planVO.setDetail(ConvertUtil.str2ObjArrayByJson(jobPO.getBrokerBalanceDetail(), ClusterBalancePlanDetailVO.class));
planVO.setReassignmentJson(jobPO.getReassignmentJson());
planVO.setClusterBalanceIntervalList(ConvertUtil.str2ObjArrayByJson(jobPO.getBalanceIntervalJson(), ClusterBalanceIntervalVO.class));
return Result.buildSuc(planVO);
}
@Override
public Result<ClusterBalancePlanVO> preview(Long clusterPhyId, ClusterBalancePreviewDTO clusterBalancePreviewDTO) {
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(clusterPhyId);
if (clusterPhy == null){
return Result.buildFrom(ResultStatus.CLUSTER_NOT_EXIST);
}
List<Broker> allBrokers = brokerService.listAllBrokersFromDB(clusterPhy.getId());
Map<Integer, BrokerSpec> brokerSpecMap = brokerSpecService.getBrokerSpecMap(clusterPhy.getId());
for(Broker broker:allBrokers){
if (brokerSpecMap.get(broker.getBrokerId()) == null){
return Result.buildFromRSAndMsg(ResultStatus.BROKER_SPEC_NOT_EXIST,String.format("Broker规格信息不存在:brokerId:%s", broker.getBrokerId()));
}
}
if (clusterBalancePreviewDTO.getBrokers() == null || clusterBalancePreviewDTO.getBrokers().isEmpty()){
clusterBalancePreviewDTO.setBrokers(
allBrokers.stream().map(Broker::getBrokerId).collect(Collectors.toList()));
}
//获取任务计划
Map<Integer, Broker> brokerMap = allBrokers.stream().collect(Collectors.toMap(Broker::getBrokerId, Function.identity()));
List<String> topicNames = topicService.listRecentUpdateTopicNamesFromDB(clusterPhyId, configUtils.getClusterBalanceIgnoredTopicsTimeSecond());
BalanceParameter balanceParameter = ClusterBalanceConverter.convert2BalanceParameter(clusterBalancePreviewDTO, brokerMap, brokerSpecMap, clusterPhy, esAddress, topicNames);
ExecutionRebalance executionRebalance = new ExecutionRebalance();
try {
OptimizerResult optimizerResult = executionRebalance.optimizations(balanceParameter);
if (optimizerResult == null) {
return Result.buildFrom(ResultStatus.KAFKA_OPERATE_FAILED);
}
//生成平衡job
return Result.buildSuc(ClusterBalanceConverter.convert2ClusterBalancePlanVO(clusterBalancePreviewDTO, optimizerResult, allBrokers));
} catch (Exception e){
logger.error("method=preview||clusterBalancePreviewDTO:{}||errMsg=exception", clusterBalancePreviewDTO, e);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
}
}
@Override
public Result<ClusterBalancePlanVO> schedule(Long clusterPhyId, Long jobId) {
Result<ClusterBalanceJobPO> rbr= clusterBalanceJobService.getClusterBalanceJobById(jobId);
if (!rbr.hasData()){
return Result.buildFromIgnoreData(rbr);
}
return preview(clusterPhyId, ClusterBalanceConverter.convert2ClusterBalancePreviewDTO(rbr.getData()));
}
@Override
public Result<Void> strategy(Long clusterPhyId, ClusterBalanceStrategyDTO dto, String operator) {
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(clusterPhyId);
if (clusterPhy == null){
return Result.buildFrom(ResultStatus.CLUSTER_NOT_EXIST);
}
//如果不是周期任务,那么就直接往 jobService 中添加一个任务
if(!dto.isScheduleJob()){
JobDTO jobDTO = new JobDTO();
jobDTO.setPlanTime(new Date());
jobDTO.setJobStatus(JobStatusEnum.WAITING.getStatus());
jobDTO.setCreator(operator);
jobDTO.setJobType(JobHandleEnum.CLUSTER_BALANCE.getType());
jobDTO.setTarget(JobHandleEnum.CLUSTER_BALANCE.getMessage());
jobDTO.setJobData(ConvertUtil.obj2Json(dto));
return jobService.addTask(clusterPhyId, jobDTO, operator);
}else {
return clusterBalanceJobConfigService.replaceClusterBalanceJobConfigByClusterId(ClusterBalanceConverter.convert2ClusterBalanceJobConfigPO(dto, operator));
}
}
@Override
public Result<Void> createScheduleJob(Long clusterPhyId, long triggerTimeUnitMs){
//获取到 clusterPhyId 对应的周期任务策略
Result<ClusterBalanceJobConfigPO> configPOResult = clusterBalanceJobConfigService.getByClusterId(clusterPhyId);
if (!configPOResult.hasData() || configPOResult.getData().getStatus().equals(Constant.DOWN)){
return Result.buildSuc();
}
try {
CronExpression cronExpression = new CronExpression(configPOResult.getData().getTaskCron());
//是否到满足周期时间
if (!cronExpression.isSatisfiedBy(new Date(triggerTimeUnitMs))){
return Result.buildSuc();
}
} catch (ParseException e) {
logger.error("method=createScheduleJob||clusterId:{}||errMsg=exception", clusterPhyId, e);
e.printStackTrace();
}
//满足周期时间新增job任务
JobDTO jobDTO = new JobDTO();
jobDTO.setPlanTime(new Date());
jobDTO.setJobStatus(JobStatusEnum.WAITING.getStatus());
jobDTO.setCreator(Constant.SYSTEM);
jobDTO.setJobType(JobTypeEnum.CLUSTER_BALANCE.getType());
jobDTO.setTarget(JobHandleEnum.CLUSTER_BALANCE.getMessage());
JobClusterBalanceContent content = ClusterBalanceConverter.convert2JobClusterBalanceContent(configPOResult.getData());
jobDTO.setJobData(ConvertUtil.obj2Json(content));
return jobService.addTask(clusterPhyId, jobDTO, Constant.SYSTEM);
}
private Map<String, ClusterBalanceStateSubVO> getStateSubVOMap(Map<Resource, Double> resourceDoubleMap, ArrayList<BrokerBalanceState> balanceStates, Long clusterId){
Map<String, ClusterBalanceStateSubVO> subVOMap = new HashMap<>();
Map<String, Double> balanceInterval = clusterBalanceJobService.getBalanceInterval(clusterId);
for (Map.Entry<Resource, Double> entry : resourceDoubleMap.entrySet()){
Resource resource = entry.getKey();
if (Resource.CPU.resource().equals(resource.resource())){
ClusterBalanceStateSubVO cpuSubVo = new ClusterBalanceStateSubVO();
cpuSubVo.setAvg(entry.getValue());
cpuSubVo.setInterval(balanceInterval.get(Resource.CPU.resource()));
cpuSubVo.setBetweenNu(balanceStates.stream().filter(status->ClusterBalanceStateEnum.BALANCE.getState().equals(status.getCpuBalanceState())).count());
cpuSubVo.setBigNu(balanceStates.stream().filter(status->ClusterBalanceStateEnum.ABOVE_BALANCE.getState().equals(status.getCpuBalanceState())).count());
cpuSubVo.setSmallNu(balanceStates.stream().filter(status->ClusterBalanceStateEnum.BELOW_BALANCE.getState().equals(status.getCpuBalanceState())).count());
subVOMap.put(Resource.CPU.resource(), cpuSubVo);
}else if (Resource.NW_IN.resource().equals(resource.resource())){
ClusterBalanceStateSubVO cpuSubVo = new ClusterBalanceStateSubVO();
cpuSubVo.setAvg(entry.getValue());
cpuSubVo.setInterval(balanceInterval.get(Resource.NW_IN.resource()));
cpuSubVo.setBetweenNu(balanceStates.stream().filter(status->ClusterBalanceStateEnum.BALANCE.getState().equals(status.getBytesInBalanceState())).count());
cpuSubVo.setBigNu(balanceStates.stream().filter(status->ClusterBalanceStateEnum.ABOVE_BALANCE.getState().equals(status.getBytesInBalanceState())).count());
cpuSubVo.setSmallNu(balanceStates.stream().filter(status->ClusterBalanceStateEnum.BELOW_BALANCE.getState().equals(status.getBytesInBalanceState())).count());
subVOMap.put(Resource.NW_IN.resource(), cpuSubVo);
}else if (Resource.NW_OUT.resource().equals(resource.resource())){
ClusterBalanceStateSubVO cpuSubVo = new ClusterBalanceStateSubVO();
cpuSubVo.setAvg(entry.getValue());
cpuSubVo.setInterval(balanceInterval.get(Resource.NW_OUT.resource()));
cpuSubVo.setBetweenNu(balanceStates.stream().filter(status->ClusterBalanceStateEnum.BALANCE.getState().equals(status.getBytesOutBalanceState())).count());
cpuSubVo.setBigNu(balanceStates.stream().filter(status->ClusterBalanceStateEnum.ABOVE_BALANCE.getState().equals(status.getBytesOutBalanceState())).count());
cpuSubVo.setSmallNu(balanceStates.stream().filter(status->ClusterBalanceStateEnum.BELOW_BALANCE.getState().equals(status.getBytesOutBalanceState())).count());
subVOMap.put(Resource.NW_OUT.resource(), cpuSubVo);
}else if (Resource.DISK.resource().equals(resource.resource())){
ClusterBalanceStateSubVO cpuSubVo = new ClusterBalanceStateSubVO();
cpuSubVo.setAvg(entry.getValue());
cpuSubVo.setInterval(balanceInterval.get(Resource.DISK.resource()));
cpuSubVo.setBetweenNu(balanceStates.stream().filter(status->ClusterBalanceStateEnum.BALANCE.getState().equals(status.getDiskBalanceState())).count());
cpuSubVo.setBigNu(balanceStates.stream().filter(status->ClusterBalanceStateEnum.ABOVE_BALANCE.getState().equals(status.getDiskBalanceState())).count());
cpuSubVo.setSmallNu(balanceStates.stream().filter(status->ClusterBalanceStateEnum.BELOW_BALANCE.getState().equals(status.getDiskBalanceState())).count());
subVOMap.put(Resource.DISK.resource(), cpuSubVo);
}
}
return subVOMap;
}
}

View File

@@ -5,10 +5,12 @@ import com.didiglobal.logi.log.LogFactory;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.google.common.collect.Table;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.MetricDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.metrices.MetricsClusterPhyDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.enterprise.rebalance.bean.entity.ClusterBalanceItemState;
import com.xiaojukeji.know.streaming.km.common.bean.entity.kafkacontroller.KafkaController;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BrokerMetrics;
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ClusterMetrics;
@@ -38,6 +40,7 @@ import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerMetricService;
import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterMetricService;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.enterprise.rebalance.service.ClusterBalanceService;
import com.xiaojukeji.know.streaming.km.core.service.group.GroupService;
import com.xiaojukeji.know.streaming.km.core.service.health.state.HealthStateService;
import com.xiaojukeji.know.streaming.km.core.service.job.JobService;
@@ -50,6 +53,7 @@ import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache;
import com.xiaojukeji.know.streaming.km.persistence.es.dao.ClusterMetricESDAO;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient;
import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaJMXClient;
import com.xiaojukeji.know.streaming.km.rebalance.model.Resource;
import org.apache.kafka.common.resource.ResourceType;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Scheduled;
@@ -69,12 +73,14 @@ import java.util.stream.Collectors;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.ClusterMetrics.initWithMetrics;
import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.ClusterMetricVersionItems.*;
import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.TopicMetricVersionItems.*;
/**
* @author didi
*/
@Service("clusterMetricService")
@EnterpriseLoadReBalance(all = false)
public class ClusterMetricServiceImpl extends BaseMetricService implements ClusterMetricService {
private static final ILog LOGGER = LogFactory.getLog(ClusterMetricServiceImpl.class);
@@ -113,6 +119,9 @@ public class ClusterMetricServiceImpl extends BaseMetricService implements Clust
public static final String CLUSTER_METHOD_GET_JOBS_SUCCESS = "getJobsSuccess";
public static final String CLUSTER_METHOD_GET_JOBS_FAILED = "getJobsFailed";
@EnterpriseLoadReBalance(all = false)
public static final String CLUSTER_METHOD_GET_CLUSTER_LOAD_RE_BALANCE_INFO = "getClusterLoadReBalanceInfo";
@Autowired
private HealthStateService healthStateService;
@@ -152,6 +161,9 @@ public class ClusterMetricServiceImpl extends BaseMetricService implements Clust
@Autowired
private JobService jobService;
@Autowired
private ClusterBalanceService clusterBalanceService;
@Autowired
private ClusterPhyService clusterPhyService;
@@ -179,6 +191,7 @@ public class ClusterMetricServiceImpl extends BaseMetricService implements Clust
}
@Override
@EnterpriseLoadReBalance(all = false)
protected void initRegisterVCHandler(){
registerVCHandler( CLUSTER_METHOD_DO_NOTHING, this::doNothing);
registerVCHandler( CLUSTER_METHOD_GET_TOPIC_SIZE, this::getTopicSize);
@@ -218,6 +231,8 @@ public class ClusterMetricServiceImpl extends BaseMetricService implements Clust
registerVCHandler( CLUSTER_METHOD_GET_JOBS_WAITING, this::getJobsWaiting);
registerVCHandler( CLUSTER_METHOD_GET_JOBS_SUCCESS, this::getJobsSuccess);
registerVCHandler( CLUSTER_METHOD_GET_JOBS_FAILED, this::getJobsFailed);
registerVCHandler( CLUSTER_METHOD_GET_CLUSTER_LOAD_RE_BALANCE_INFO, this::getClusterLoadReBalanceInfo);
}
@Override
@@ -676,6 +691,26 @@ public class ClusterMetricServiceImpl extends BaseMetricService implements Clust
return Result.buildSuc(initWithMetrics(clusterId, metric, count));
}
@EnterpriseLoadReBalance
private Result<ClusterMetrics> getClusterLoadReBalanceInfo(VersionItemParam metricParam) {
ClusterMetricParam param = (ClusterMetricParam)metricParam;
Result<ClusterBalanceItemState> stateResult = clusterBalanceService.getItemState(param.getClusterId());
if (stateResult.failed()) {
return Result.buildFromIgnoreData(stateResult);
}
ClusterBalanceItemState state = stateResult.getData();
ClusterMetrics metric = ClusterMetrics.initWithMetrics(param.getClusterId(), CLUSTER_METRIC_LOAD_RE_BALANCE_ENABLE, state.getEnable()? Constant.YES: Constant.NO);
metric.putMetric(CLUSTER_METRIC_LOAD_RE_BALANCE_CPU, state.getResItemState(Resource.CPU).floatValue());
metric.putMetric(CLUSTER_METRIC_LOAD_RE_BALANCE_NW_IN, state.getResItemState(Resource.NW_IN).floatValue());
metric.putMetric(CLUSTER_METRIC_LOAD_RE_BALANCE_NW_OUT, state.getResItemState(Resource.NW_OUT).floatValue());
metric.putMetric(CLUSTER_METRIC_LOAD_RE_BALANCE_DISK, state.getResItemState(Resource.DISK).floatValue());
return Result.buildSuc(metric);
}
/**
* 从某一个 controller 的 JMX 中获取指标再聚合得到集群的指标
* @param metricParam

View File

@@ -1,6 +1,7 @@
package com.xiaojukeji.know.streaming.km.core.service.config;
import lombok.Getter;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
@@ -13,4 +14,7 @@ import org.springframework.stereotype.Service;
public class ConfigUtils {
private ConfigUtils() {
}
@Value("${cluster-balance.ignored-topics.time-second:300}")
private Integer clusterBalanceIgnoredTopicsTimeSecond;
}

View File

@@ -1,5 +1,6 @@
package com.xiaojukeji.know.streaming.km.core.service.version.metrics;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance;
import com.xiaojukeji.know.streaming.km.common.bean.entity.version.VersionMetricControlItem;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum;
@@ -18,6 +19,7 @@ import static com.xiaojukeji.know.streaming.km.core.service.cluster.impl.Cluster
* @author didi
*/
@Component
@EnterpriseLoadReBalance(all = false)
public class ClusterMetricVersionItems extends BaseMetricVersionMetric {
/**
* 整体的健康指标
@@ -103,6 +105,13 @@ public class ClusterMetricVersionItems extends BaseMetricVersionMetric {
public static final String CLUSTER_METRIC_JOB_SUCCESS = "JobsSuccess";
public static final String CLUSTER_METRIC_JOB_FAILED = "JobsFailed";
@EnterpriseLoadReBalance
public static final String CLUSTER_METRIC_LOAD_RE_BALANCE_ENABLE = "LoadReBalanceEnable";
public static final String CLUSTER_METRIC_LOAD_RE_BALANCE_CPU = "LoadReBalanceCpu";
public static final String CLUSTER_METRIC_LOAD_RE_BALANCE_NW_IN = "LoadReBalanceNwIn";
public static final String CLUSTER_METRIC_LOAD_RE_BALANCE_NW_OUT = "LoadReBalanceNwOut";
public static final String CLUSTER_METRIC_LOAD_RE_BALANCE_DISK = "LoadReBalanceDisk";
public static final String CLUSTER_METRIC_COLLECT_COST_TIME = Constant.COLLECT_METRICS_COST_TIME_METRICS_NAME;
public ClusterMetricVersionItems(){}
@@ -113,6 +122,7 @@ public class ClusterMetricVersionItems extends BaseMetricVersionMetric {
}
@Override
@EnterpriseLoadReBalance(all = false)
public List<VersionMetricControlItem> init(){
List<VersionMetricControlItem> itemList = new ArrayList<>();
@@ -397,6 +407,27 @@ public class ClusterMetricVersionItems extends BaseMetricVersionMetric {
.name(CLUSTER_METRIC_JOB_FAILED).unit("").desc("集群failed任务总数").category(CATEGORY_JOB)
.extend( buildMethodExtend( CLUSTER_METHOD_GET_JOBS_FAILED )));
// 集群维度-均衡相关
itemList.add( buildAllVersionsItem()
.name(CLUSTER_METRIC_LOAD_RE_BALANCE_ENABLE).unit("是/否").desc("是否开启均衡, 10").category(CATEGORY_CLUSTER)
.extend( buildMethodExtend( CLUSTER_METHOD_GET_CLUSTER_LOAD_RE_BALANCE_INFO )));
itemList.add( buildAllVersionsItem()
.name(CLUSTER_METRIC_LOAD_RE_BALANCE_CPU).unit("是/否").desc("CPU是否均衡, 10").category(CATEGORY_CLUSTER)
.extend( buildMethodExtend( CLUSTER_METHOD_GET_CLUSTER_LOAD_RE_BALANCE_INFO )));
itemList.add( buildAllVersionsItem()
.name(CLUSTER_METRIC_LOAD_RE_BALANCE_NW_IN).unit("是/否").desc("BytesIn是否均衡, 10").category(CATEGORY_CLUSTER)
.extend( buildMethodExtend( CLUSTER_METHOD_GET_CLUSTER_LOAD_RE_BALANCE_INFO )));
itemList.add( buildAllVersionsItem()
.name(CLUSTER_METRIC_LOAD_RE_BALANCE_NW_OUT).unit("是/否").desc("BytesOut是否均衡, 10").category(CATEGORY_CLUSTER)
.extend( buildMethodExtend( CLUSTER_METHOD_GET_CLUSTER_LOAD_RE_BALANCE_INFO )));
itemList.add( buildAllVersionsItem()
.name(CLUSTER_METRIC_LOAD_RE_BALANCE_DISK).unit("是/否").desc("Disk是否均衡, 10").category(CATEGORY_CLUSTER)
.extend( buildMethodExtend( CLUSTER_METHOD_GET_CLUSTER_LOAD_RE_BALANCE_INFO )));
itemList.add(buildAllVersionsItem()
.name(CLUSTER_METRIC_COLLECT_COST_TIME).unit("").desc("采集Cluster指标的耗时").category(CATEGORY_PERFORMANCE)
.extendMethod(CLUSTER_METHOD_DO_NOTHING));

View File

@@ -0,0 +1,33 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>km-testing</artifactId>
<version>${km.revision}</version>
<packaging>jar</packaging>
<parent>
<artifactId>km</artifactId>
<groupId>com.xiaojukeji.kafka</groupId>
<version>${km.revision}</version>
<relativePath>../../pom.xml</relativePath>
</parent>
<dependencies>
<dependency>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>km-common</artifactId>
<version>${project.parent.version}</version>
</dependency>
<dependency>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>km-core</artifactId>
<version>${project.parent.version}</version>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,12 @@
package com.xiaojukeji.know.streaming.km.testing;
import org.springframework.boot.SpringApplication;
public class KmTestingApplication {
public static void main(String[] args) {
SpringApplication.run(KmTestingApplication.class, args);
}
}

View File

@@ -0,0 +1,30 @@
package com.xiaojukeji.know.streaming.km.testing.biz;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseTesting;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.testing.common.bean.dto.KafkaConsumerDTO;
import com.xiaojukeji.know.streaming.km.testing.common.bean.dto.KafkaProducerDTO;
import com.xiaojukeji.know.streaming.km.testing.common.bean.vo.TestConsumerVO;
import com.xiaojukeji.know.streaming.km.testing.common.bean.vo.TestProducerVO;
import java.util.List;
@EnterpriseTesting
public interface KafkaClientTestManager {
/**
* 生产测试
* @param dto 生产测试参数
* @param operator 操作人
* @return
*/
Result<List<TestProducerVO>> produceTest(KafkaProducerDTO dto, String operator);
/**
* 消费测试
* @param dto 消费测试参数
* @param operator 操作人
* @return
*/
Result<TestConsumerVO> consumeTest(KafkaConsumerDTO dto, String operator);
}

View File

@@ -0,0 +1,596 @@
package com.xiaojukeji.know.streaming.km.testing.biz.impl;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.didiglobal.logi.security.common.dto.oplog.OplogDTO;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseTesting;
import com.xiaojukeji.know.streaming.km.common.bean.dto.partition.PartitionOffsetDTO;
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
import com.xiaojukeji.know.streaming.km.common.bean.entity.record.RecordHeaderKS;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus;
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicRecordVO;
import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant;
import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.ModuleEnum;
import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum;
import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException;
import com.xiaojukeji.know.streaming.km.common.exception.NotExistException;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.group.GroupService;
import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService;
import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.testing.biz.KafkaClientTestManager;
import com.xiaojukeji.know.streaming.km.testing.common.bean.dto.KafkaConsumerDTO;
import com.xiaojukeji.know.streaming.km.testing.common.bean.dto.KafkaConsumerFilterDTO;
import com.xiaojukeji.know.streaming.km.testing.common.bean.dto.KafkaConsumerStartFromDTO;
import com.xiaojukeji.know.streaming.km.testing.common.bean.dto.KafkaProducerDTO;
import com.xiaojukeji.know.streaming.km.testing.common.bean.vo.TestConsumerVO;
import com.xiaojukeji.know.streaming.km.testing.common.bean.vo.TestPartitionConsumedVO;
import com.xiaojukeji.know.streaming.km.testing.common.bean.vo.TestProducerVO;
import com.xiaojukeji.know.streaming.km.testing.common.enums.KafkaConsumerFilterEnum;
import com.xiaojukeji.know.streaming.km.testing.common.enums.KafkaConsumerStartFromEnum;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.admin.OffsetSpec;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.validation.annotation.Validated;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.stream.Collectors;
@Component
@EnterpriseTesting
public class KafkaClientTestManagerImpl implements KafkaClientTestManager {
private static final ILog log = LogFactory.getLog(KafkaClientTestManagerImpl.class);
@Autowired
private TopicService topicService;
@Autowired
private PartitionService partitionService;
@Autowired
private GroupService groupService;
@Autowired
private ClusterPhyService clusterPhyService;
@Autowired
private OpLogWrapService opLogWrapService;
@Override
public Result<TestConsumerVO> consumeTest(KafkaConsumerDTO dto, String operator) {
if (ValidateUtils.anyNull(dto, operator)) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
// 获取集群信息
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(dto.getClusterId());
if (clusterPhy == null) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getClusterPhyNotExist(dto.getClusterId()));
}
// 检查start 和 filter两个参数是否合法
Result<Void> rv = this.checkStartFromAndFilterLegal(dto.getStartFrom(), dto.getFilter());
if (rv.failed()) {
// 参数错误
return Result.buildFromIgnoreData(rv);
}
KafkaConsumer<String, String> kafkaConsumer = null;
try {
// 获取消费测试开始消费的offset信息
Result<List<PartitionOffsetDTO>> consumeStartOffsetResult = this.getConsumeStartOffset(dto.getClusterId(), dto.getTopicName(), dto.getStartFrom());
if (consumeStartOffsetResult.failed()) {
// 获取offset失败
return Result.buildFromIgnoreData(consumeStartOffsetResult);
}
//获取topic的BeginOffset
Result<Map<TopicPartition, Long>> partitionBeginOffsetMapResult = partitionService.getPartitionOffsetFromKafka(dto.getClusterId(), dto.getTopicName(), OffsetSpec.earliest(), null);
if (partitionBeginOffsetMapResult.failed()) {
return Result.buildFromIgnoreData(partitionBeginOffsetMapResult);
}
//计算最终的开始offset
consumeStartOffsetResult.getData().forEach(elem -> {
long offset = Math.max(partitionBeginOffsetMapResult.getData().get(new TopicPartition(dto.getTopicName(), elem.getPartitionId())), elem.getOffset());
elem.setOffset(offset);
});
// 获取Topic的EndOffset
Result<Map<TopicPartition, Long>> partitionEndOffsetMapResult = partitionService.getPartitionOffsetFromKafka(dto.getClusterId(), dto.getTopicName(), OffsetSpec.latest(), null);
if (partitionEndOffsetMapResult.failed()) {
return Result.buildFromIgnoreData(partitionEndOffsetMapResult);
}
// 创建Consumer客户端
Properties properties = this.buildProperties(clusterPhy, dto.getClientProperties(), true);
properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, dto.getMaxRecords());
kafkaConsumer = new KafkaConsumer<>(properties);
// 消费数据
List<ConsumerRecord> recordList = this.fetchData(
kafkaConsumer,
dto.getMaxDurationUnitMs(),
dto.getMaxRecords(),
dto.getTopicName(),
consumeStartOffsetResult.getData(),
partitionEndOffsetMapResult.getData()
);
// 进行数据组装
Result<TestConsumerVO> voResult = Result.buildSuc(this.convert2TestConsumerVO(
dto.getTopicName(),
recordList,
partitionEndOffsetMapResult.getData(),
consumeStartOffsetResult.getData(),
dto.getFilter())
);
// 记录用户操作
if (voResult.successful() && dto.getRecordOperate()) {
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.SEARCH.getDesc(),
ModuleEnum.KAFKA_TOPIC_DATA.getDesc(),
dto.getTopicName(),
MsgConstant.getTopicBizStr(dto.getClusterId(), dto.getTopicName())
));
}
return voResult;
} catch (NotExistException nee) {
log.error("method=consumeTest||param={}||operator={}||errMsg=res not exist.", dto, operator, nee);
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, nee.getMessage());
} catch (Exception e) {
log.error("method=consumeTest||param={}||operator={}||errMsg=exception.", dto, operator, e);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
} finally {
if (kafkaConsumer != null) {
kafkaConsumer.close();
}
}
}
@Override
public Result<List<TestProducerVO>> produceTest(@Validated KafkaProducerDTO dto, String operator) {
if (ValidateUtils.anyNull(dto, operator)) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
// 获取集群信息
ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(dto.getClusterId());
if (clusterPhy == null) {
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getClusterPhyNotExist(dto.getClusterId()));
}
// 内部Topic不允许生产
if (KafkaConstant.KAFKA_INTERNAL_TOPICS.contains(dto.getTopicName())) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "内部Topic不允许进行生产测试");
}
KafkaProducer<String, String> kafkaProducer = null;
try {
// 获取Topic信息并检查分区信息是否合法
Topic topic = topicService.getTopic(clusterPhy.getId(), dto.getTopicName());
if (topic == null) {
log.error("method=produceTest||param={}||operator={}||errMsg=res not exist.", dto, operator);
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getTopicNotExist(clusterPhy.getId(), dto.getTopicName()));
}
if (!ValidateUtils.isEmptyList(dto.getPartitionIdList()) && dto.getPartitionIdList().stream().anyMatch(elem -> !topic.getPartitionMap().containsKey(elem))) {
// 分区不存在
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getPartitionNotExist(dto.getClusterId(), dto.getTopicName()));
}
// 创建生产客户端
kafkaProducer = new KafkaProducer<>(this.buildProperties(clusterPhy, dto.getClientProperties(), false));
// 进行数据生产
Result<List<TestProducerVO>> listResult = this.sendData(kafkaProducer, dto);
// 如果成功,并且需要进行记录,则记录操作
if (listResult.successful() && dto.getRecordOperate()) {
opLogWrapService.saveOplogAndIgnoreException(new OplogDTO(
operator,
OperationEnum.ADD.getDesc(),
ModuleEnum.KAFKA_TOPIC_DATA.getDesc(),
dto.getTopicName(),
MsgConstant.getTopicBizStr(dto.getClusterId(), dto.getTopicName())
));
}
// 返回操作结果
return listResult;
} catch (Exception e) {
log.error("method=produceTest||param={}||operator={}||errMsg=exception!", dto, operator, e);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
} finally {
if (kafkaProducer != null) {
kafkaProducer.close();
}
}
}
/**************************************************** private method ****************************************************/
private Result<List<TestProducerVO>> sendData(KafkaProducer<String, String> kafkaProducer, KafkaProducerDTO dto) throws InterruptedException, ExecutionException {
List<RecordHeader> headers = new ArrayList<>();
if (dto.getRecordHeader() != null) {
for (Map.Entry<Object, Object> entry: dto.getRecordHeader().entrySet()) {
headers.add(new RecordHeader(entry.getKey().toString(), entry.getValue().toString().getBytes(StandardCharsets.UTF_8)));
}
}
// 随机一个数,如果指定分区了,则从该随机数下标位置的分区开始生产
int idx = new Random().nextInt(10000);
long now = System.currentTimeMillis();
List<Future<RecordMetadata>> futureList = new ArrayList<>();
for (int i = 0; i < dto.getRecordCount(); ++i) {
Integer partitionId = null;
if (!ValidateUtils.isEmptyList(dto.getPartitionIdList())) {
partitionId = dto.getPartitionIdList().get(idx % dto.getPartitionIdList().size());
idx += 1;
}
if (headers.isEmpty()) {
futureList.add(kafkaProducer.send(new ProducerRecord(dto.getTopicName(), partitionId, dto.getRecordKey(), dto.getRecordValue())));
} else {
futureList.add(kafkaProducer.send(new ProducerRecord(dto.getTopicName(), partitionId, dto.getRecordKey(), dto.getRecordValue(), headers)));
}
}
kafkaProducer.flush();
List<TestProducerVO> voList = new ArrayList<>();
for (Future<RecordMetadata> metadataFuture: futureList) {
RecordMetadata recordMetadata = metadataFuture.get();
voList.add(new TestProducerVO(
recordMetadata.timestamp() - now,
recordMetadata.partition(),
recordMetadata.offset() == -1? null: recordMetadata.offset(), // 如果返回为-1则设置为null
recordMetadata.timestamp())
);
}
return Result.buildSuc(voList);
}
private Properties buildProperties(ClusterPhy clusterPhy, Properties customProperties, boolean isConsume) {
Properties properties = ConvertUtil.str2ObjByJson(clusterPhy.getClientProperties(), Properties.class);
if (properties == null) {
properties = new Properties();
}
properties.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers());
if (isConsume) {
// 反序列化
properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
// 默认禁止提交offset
properties.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
} else {
// 序列化
properties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
properties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
}
if (customProperties != null) {
properties.putAll(customProperties);
}
return properties;
}
private List<ConsumerRecord> fetchData(KafkaConsumer<String, String> kafkaConsumer,
Long maxDurationUnitMs,
Integer maxRecords,
String topicName,
List<PartitionOffsetDTO> dtoList,
Map<TopicPartition, Long> endOffsetMap) {
long now = System.currentTimeMillis();
// 获取有数据的分区
Map<Integer, Long> hasDataPartitionMap = dtoList
.stream()
.filter(elem -> {
Long endOffset = endOffsetMap.get(new TopicPartition(topicName, elem.getPartitionId()));
return endOffset != null && endOffset > elem.getOffset();
})
.collect(Collectors.toMap(PartitionOffsetDTO::getPartitionId, PartitionOffsetDTO::getOffset));
if (ValidateUtils.isEmptyMap(hasDataPartitionMap)) {
return new ArrayList<>();
}
// assign进行消费的分区
kafkaConsumer.assign(
hasDataPartitionMap.keySet().stream().map(elem -> new TopicPartition(topicName, elem)).collect(Collectors.toList())
);
// 设置消费的起始offset
for (Map.Entry<Integer, Long> entry: hasDataPartitionMap.entrySet()) {
kafkaConsumer.seek(new TopicPartition(topicName, entry.getKey()), entry.getValue());
}
List<ConsumerRecord> recordList = new ArrayList<>();
while (System.currentTimeMillis() - now <= maxDurationUnitMs && recordList.size() < maxRecords) {
for (ConsumerRecord consumerRecord: kafkaConsumer.poll(Duration.ofSeconds(3))) {
recordList.add(consumerRecord);
if (recordList.size() >= maxRecords) {
break;
}
}
}
return recordList;
}
private Result<List<PartitionOffsetDTO>> getConsumeStartOffset(Long clusterPhyId, String topicName, KafkaConsumerStartFromDTO startFromDTO) throws NotExistException, AdminOperateException {
// 最新位置开始消费
if (KafkaConsumerStartFromEnum.LATEST.getCode().equals(startFromDTO.getStartFromType())) {
Result<Map<TopicPartition, Long>> offsetMapResult = partitionService.getPartitionOffsetFromKafka(clusterPhyId, topicName, OffsetSpec.latest(), null);
if (offsetMapResult.failed()) {
return Result.buildFromIgnoreData(offsetMapResult);
}
return Result.buildSuc(offsetMapResult.getData().entrySet()
.stream()
.map(entry-> new PartitionOffsetDTO(entry.getKey().partition(), entry.getValue()))
.collect(Collectors.toList())
);
}
// 最旧位置开始消费
if (KafkaConsumerStartFromEnum.EARLIEST.getCode().equals(startFromDTO.getStartFromType())) {
Result<Map<TopicPartition, Long>> offsetMapResult = partitionService.getPartitionOffsetFromKafka(clusterPhyId, topicName, OffsetSpec.earliest(), null);
if (offsetMapResult.failed()) {
return Result.buildFromIgnoreData(offsetMapResult);
}
return Result.buildSuc(offsetMapResult.getData().entrySet()
.stream()
.map(entry-> new PartitionOffsetDTO(entry.getKey().partition(), entry.getValue()))
.collect(Collectors.toList())
);
}
// 指定时间开始消费
if (KafkaConsumerStartFromEnum.PRECISE_TIMESTAMP.getCode().equals(startFromDTO.getStartFromType())) {
Result<Map<TopicPartition, Long>> offsetMapResult = partitionService.getPartitionOffsetFromKafka(clusterPhyId, topicName, OffsetSpec.forTimestamp(startFromDTO.getTimestampUnitMs()), startFromDTO.getTimestampUnitMs());
if (offsetMapResult.failed()) {
return Result.buildFromIgnoreData(offsetMapResult);
}
return Result.buildSuc(offsetMapResult.getData().entrySet()
.stream()
.map(entry-> new PartitionOffsetDTO(entry.getKey().partition(), Math.max(entry.getValue(), 0L)))
.collect(Collectors.toList())
);
}
// 指定位置开始消费
if (KafkaConsumerStartFromEnum.PRECISE_OFFSET.getCode().equals(startFromDTO.getStartFromType())) {
return Result.buildSuc(startFromDTO.getOffsetList());
}
// 指定消费组进行消费
if (KafkaConsumerStartFromEnum.CONSUMER_GROUP.getCode().equals(startFromDTO.getStartFromType())) {
Map<TopicPartition, Long> offsetMap = groupService.getGroupOffsetFromKafka(clusterPhyId, startFromDTO.getConsumerGroup());
return Result.buildSuc(offsetMap.entrySet()
.stream()
.filter(elem -> elem.getKey().topic().equals(topicName))
.map(entry-> new PartitionOffsetDTO(entry.getKey().partition(), entry.getValue()))
.collect(Collectors.toList())
);
}
// 近X条数据开始消费
if (KafkaConsumerStartFromEnum.LATEST_MINUS_X_OFFSET.getCode().equals(startFromDTO.getStartFromType())) {
Result<Map<TopicPartition, Long>> offsetMapResult = partitionService.getPartitionOffsetFromKafka(clusterPhyId, topicName, OffsetSpec.latest(), null);
if (offsetMapResult.failed()) {
return Result.buildFromIgnoreData(offsetMapResult);
}
return Result.buildSuc(offsetMapResult.getData().entrySet()
.stream()
.map(entry-> new PartitionOffsetDTO(entry.getKey().partition(), Math.max(0, entry.getValue() - startFromDTO.getLatestMinusX())))
.collect(Collectors.toList())
);
}
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "startFrom类型未知");
}
private Result<Void> checkStartFromAndFilterLegal(KafkaConsumerStartFromDTO startFrom, KafkaConsumerFilterDTO filter) {
// 指定时间开始消费
if (KafkaConsumerStartFromEnum.PRECISE_TIMESTAMP.getCode().equals(startFrom.getStartFromType()) && ValidateUtils.isNullOrLessThanZero(startFrom.getTimestampUnitMs())) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "指定时间消费必须设置时间参数");
}
// 指定位置开始消费
if (KafkaConsumerStartFromEnum.PRECISE_OFFSET.getCode().equals(startFrom.getStartFromType()) && ValidateUtils.isEmptyList(startFrom.getOffsetList())) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "指定offset消费必须设置offset参数");
}
// 指定消费组进行消费
if (KafkaConsumerStartFromEnum.CONSUMER_GROUP.getCode().equals(startFrom.getStartFromType()) && ValidateUtils.isBlank(startFrom.getConsumerGroup())) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "指定Group消费必须设置Group参数");
}
// 近X条数据开始消费
if (KafkaConsumerStartFromEnum.LATEST_MINUS_X_OFFSET.getCode().equals(startFrom.getStartFromType()) && ValidateUtils.isNullOrLessThanZero(startFrom.getLatestMinusX())) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "指定近X条开始消费必须设置latestMinusX参数");
}
// 包含过滤
if (KafkaConsumerFilterEnum.CONTAINS.getCode().equals(filter.getFilterType())
&& ValidateUtils.isBlank(filter.getFilterCompareKey()) && ValidateUtils.isBlank(filter.getFilterCompareValue())) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "包含的方式过滤必须有过滤的key或value");
}
// 不包含过滤
if (KafkaConsumerFilterEnum.NOT_CONTAINS.getCode().equals(filter.getFilterType())
&& ValidateUtils.isBlank(filter.getFilterCompareKey()) && ValidateUtils.isBlank(filter.getFilterCompareValue())) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "不包含的方式过滤必须有过滤的key或value");
}
// 等于过滤
if (KafkaConsumerFilterEnum.EQUAL_SIZE.getCode().equals(filter.getFilterType()) && ValidateUtils.isNullOrLessThanZero(filter.getFilterCompareSizeUnitB())) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "等于Size的方式过滤必须有过滤size大小参数");
}
// size大于过滤
if (KafkaConsumerFilterEnum.ABOVE_SIZE.getCode().equals(filter.getFilterType()) && ValidateUtils.isNullOrLessThanZero(filter.getFilterCompareSizeUnitB())) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "大于Size的方式过滤必须有过滤size大小参数");
}
// size小于过滤
if (KafkaConsumerFilterEnum.UNDER_SIZE.getCode().equals(filter.getFilterType()) && ValidateUtils.isNullOrLessThanZero(filter.getFilterCompareSizeUnitB())) {
return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "小于Size的方式过滤必须有过滤size大小参数");
}
return Result.buildSuc();
}
private TestConsumerVO convert2TestConsumerVO(String topicName,
List<ConsumerRecord> recordList,
Map<TopicPartition, Long> partitionEndOffsetMap,
List<PartitionOffsetDTO> consumedStartFromOffsetList,
KafkaConsumerFilterDTO filter) {
Map<Integer, TestPartitionConsumedVO> consumedVOMap = new HashMap<>();
for (PartitionOffsetDTO partitionOffsetDTO: consumedStartFromOffsetList) {
TestPartitionConsumedVO vo = consumedVOMap.get(partitionOffsetDTO.getPartitionId());
if (vo == null) {
vo = new TestPartitionConsumedVO();
vo.setPartitionId(partitionOffsetDTO.getPartitionId());
vo.setConsumedOffset(partitionOffsetDTO.getOffset());
vo.setRecordSizeUnitB(0L);
vo.setRecordCount(0);
vo.setLogEndOffset(partitionEndOffsetMap.get(new TopicPartition(topicName, partitionOffsetDTO.getPartitionId())));
consumedVOMap.put(partitionOffsetDTO.getPartitionId(), vo);
}
}
TestConsumerVO vo = new TestConsumerVO();
vo.setRecordList(new ArrayList<>());
vo.setTotalRecordCount(0);
vo.setTotalRecordSizeUnitB(0L);
vo.setMaxRecordTimestampUnitMs(0L);
for (ConsumerRecord record: recordList) {
// 统计消费信息
TestPartitionConsumedVO consumedVO = consumedVOMap.get(record.partition());
if (consumedVO == null) {
consumedVO = new TestPartitionConsumedVO();
consumedVO.setRecordSizeUnitB(0L);
consumedVO.setRecordCount(0);
consumedVO.setPartitionId(record.partition());
consumedVO.setLogEndOffset(partitionEndOffsetMap.get(new TopicPartition(topicName, record.partition())));
consumedVOMap.put(record.partition(), consumedVO);
}
if (record.offset() > consumedVO.getConsumedOffset()) {
consumedVO.setConsumedOffset(record.offset() + 1);
}
consumedVO.setRecordCount(consumedVO.getRecordCount() + 1);
consumedVO.setRecordSizeUnitB(consumedVO.getRecordSizeUnitB() + record.serializedKeySize() + record.serializedValueSize());
// 进行数据过滤
if (this.checkMatchFilter(record, filter)) {
vo.getRecordList().add(this.convert2TopicRecordVO(record));
}
vo.setMaxRecordTimestampUnitMs(Math.max(vo.getMaxRecordTimestampUnitMs(), record.timestamp()));
}
vo.setTotalRecordCount(vo.getRecordList().size());
vo.setPartitionConsumedList(new ArrayList<>(consumedVOMap.values()));
if (ValidateUtils.isEmptyList(vo.getPartitionConsumedList())) {
vo.setTotalRecordSizeUnitB(0L);
} else {
vo.setTotalRecordSizeUnitB(vo.getPartitionConsumedList().stream().map(elem -> elem.getRecordSizeUnitB()).reduce(Long::sum).get());
}
return vo;
}
private boolean checkMatchFilter(ConsumerRecord consumerRecord, KafkaConsumerFilterDTO filter) {
if (KafkaConsumerFilterEnum.NONE.getCode().equals(filter.getFilterType())) {
return true;
}
// 包含过滤
if (KafkaConsumerFilterEnum.CONTAINS.getCode().equals(filter.getFilterType())
&& (!ValidateUtils.isBlank(filter.getFilterCompareKey()) && consumerRecord.key() != null && consumerRecord.key().toString().contains(filter.getFilterCompareKey()))
&& (!ValidateUtils.isBlank(filter.getFilterCompareValue()) && consumerRecord.value() != null && consumerRecord.value().toString().contains(filter.getFilterCompareValue()))) {
return true;
}
// 不包含过滤
if (KafkaConsumerFilterEnum.NOT_CONTAINS.getCode().equals(filter.getFilterType())
&& (!ValidateUtils.isBlank(filter.getFilterCompareKey()) && (consumerRecord.key() == null || !consumerRecord.key().toString().contains(filter.getFilterCompareKey())))
&& (!ValidateUtils.isBlank(filter.getFilterCompareValue()) && (consumerRecord.value() == null || !consumerRecord.value().toString().contains(filter.getFilterCompareValue())))) {
return true;
}
// 等于过滤
if (KafkaConsumerFilterEnum.EQUAL_SIZE.getCode().equals(filter.getFilterType())
&& (!ValidateUtils.isNullOrLessThanZero(filter.getFilterCompareSizeUnitB()) && (consumerRecord.serializedValueSize() + consumerRecord.serializedValueSize()) == filter.getFilterCompareSizeUnitB())) {
return true;
}
// size大于过滤
if (KafkaConsumerFilterEnum.ABOVE_SIZE.getCode().equals(filter.getFilterType())
&& (!ValidateUtils.isNullOrLessThanZero(filter.getFilterCompareSizeUnitB()) && (consumerRecord.serializedValueSize() + consumerRecord.serializedValueSize()) > filter.getFilterCompareSizeUnitB())) {
return true;
}
// size小于过滤
if (KafkaConsumerFilterEnum.ABOVE_SIZE.getCode().equals(filter.getFilterType())
&& (!ValidateUtils.isNullOrLessThanZero(filter.getFilterCompareSizeUnitB()) && (consumerRecord.serializedValueSize() + consumerRecord.serializedValueSize()) < filter.getFilterCompareSizeUnitB())) {
return true;
}
return false;
}
private TopicRecordVO convert2TopicRecordVO(ConsumerRecord consumerRecord) {
TopicRecordVO vo = new TopicRecordVO();
vo.setTopicName(consumerRecord.topic());
vo.setPartitionId(consumerRecord.partition());
vo.setOffset(consumerRecord.offset());
vo.setTimestampUnitMs(consumerRecord.timestamp());
vo.setHeaderList(new ArrayList<>());
for (Header header: consumerRecord.headers()) {
vo.getHeaderList().add(new RecordHeaderKS(header.key(), new String(header.value(), StandardCharsets.UTF_8)));
}
vo.setKey(consumerRecord.key() == null ? null: consumerRecord.key().toString());
vo.setValue(consumerRecord.value() == null? null: consumerRecord.value().toString());
return vo;
}
}

View File

@@ -0,0 +1,47 @@
package com.xiaojukeji.know.streaming.km.testing.common.bean.dto;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseTesting;
import com.xiaojukeji.know.streaming.km.common.bean.dto.topic.ClusterTopicDTO;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import javax.validation.Valid;
import javax.validation.constraints.Min;
import javax.validation.constraints.NotNull;
import java.util.Properties;
/**
* @author zengqiao
* @date 20/4/23
*/
@Data
@EnterpriseTesting
@ApiModel(description="Kafka消费者测试")
public class KafkaConsumerDTO extends ClusterTopicDTO {
@Valid
@NotNull(message = "startFrom不允许为null")
@ApiModelProperty(value = "消费起始位置信息")
private KafkaConsumerStartFromDTO startFrom;
@Min(value = 1000, message = "maxDurationUnitMs不允许为null且不能小于1000ms")
@ApiModelProperty(value = "消费结束信息", example = "10000")
private Long maxDurationUnitMs;
@Valid
@NotNull(message = "filter不允许为null")
@ApiModelProperty(value = "发送消息条数", example = "6")
private KafkaConsumerFilterDTO filter;
@NotNull(message = "clientProperties不允许为null")
@ApiModelProperty(value = "客户端配置", example = "{}")
private Properties clientProperties;
@NotNull(message = "recordOperate不允许为空")
@ApiModelProperty(value = "记录操作,仅记录发起的第一次", example = "false")
private Boolean recordOperate;
@Min(value = 1, message = "maxRecords不允许为null且不能小于1")
@ApiModelProperty(value = "消费结束信息", example = "100")
private Integer maxRecords;
}

View File

@@ -0,0 +1,34 @@
package com.xiaojukeji.know.streaming.km.testing.common.bean.dto;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseTesting;
import com.xiaojukeji.know.streaming.km.common.bean.dto.BaseDTO;
import com.xiaojukeji.know.streaming.km.testing.common.enums.KafkaConsumerFilterEnum;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import org.hibernate.validator.constraints.Range;
/**
* @author zengqiao
* @date 20/4/23
*/
@Data
@EnterpriseTesting
@ApiModel(description="Kafka消费者测试")
public class KafkaConsumerFilterDTO extends BaseDTO {
/**
* @see KafkaConsumerFilterEnum
*/
@Range(min = 0, max = 5, message = "filterType最大和最小值必须在[0, 5]之间")
@ApiModelProperty(value = "开始消费位置的类型", example = "2")
private Integer filterType;
@ApiModelProperty(value = "比较匹配的Key", example = "ks-km")
private String filterCompareKey;
@ApiModelProperty(value = "比较匹配的Value", example = "ks-km")
private String filterCompareValue;
@ApiModelProperty(value = "比较匹配的大小", example = "1024")
private Long filterCompareSizeUnitB;
}

View File

@@ -0,0 +1,40 @@
package com.xiaojukeji.know.streaming.km.testing.common.bean.dto;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseTesting;
import com.xiaojukeji.know.streaming.km.common.bean.dto.BaseDTO;
import com.xiaojukeji.know.streaming.km.common.bean.dto.partition.PartitionOffsetDTO;
import com.xiaojukeji.know.streaming.km.testing.common.enums.KafkaConsumerStartFromEnum;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import org.hibernate.validator.constraints.Range;
import java.util.List;
/**
* @author zengqiao
* @date 20/4/23
*/
@Data
@EnterpriseTesting
@ApiModel(description="Kafka消费者测试")
public class KafkaConsumerStartFromDTO extends BaseDTO {
/**
* @see KafkaConsumerStartFromEnum
*/
@Range(min = 0, max = 5, message = "startFromType最大和最小值必须在[0, 5]之间")
@ApiModelProperty(value = "开始消费位置的类型", example = "2")
private Integer startFromType;
@ApiModelProperty(value = "指定时间戳消费", example = "2453535465")
private Long timestampUnitMs;
@ApiModelProperty(value = "指定offset消费", example = "[]")
private List<PartitionOffsetDTO> offsetList;
@ApiModelProperty(value = "指定消费组消费", example = "6")
private String consumerGroup;
@ApiModelProperty(value = "指定从最近多少条开始消费", example = "10")
private Long latestMinusX;
}

View File

@@ -0,0 +1,46 @@
package com.xiaojukeji.know.streaming.km.testing.common.bean.dto;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseTesting;
import com.xiaojukeji.know.streaming.km.common.bean.dto.topic.ClusterTopicDTO;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import javax.validation.constraints.Min;
import javax.validation.constraints.NotNull;
import java.util.List;
import java.util.Properties;
/**
* @author zengqiao
* @date 20/4/23
*/
@Data
@EnterpriseTesting
@ApiModel(description="Kafka生产者测试")
public class KafkaProducerDTO extends ClusterTopicDTO {
@ApiModelProperty(value = "消息Key", example = "hello know-streaming key")
private String recordKey;
@NotNull(message = "recordValue不允许为null")
@ApiModelProperty(value = "消息Value", example = "hello know-streaming value")
private String recordValue;
@ApiModelProperty(value = "recordHeader, key-value结构", example = "{}")
private Properties recordHeader;
@Min(value = 1, message = "recordCount不允许为null或者小于0")
@ApiModelProperty(value = "发送消息条数", example = "6")
private Integer recordCount;
@NotNull(message = "clientProperties不允许为null")
@ApiModelProperty(value = "客户端配置", example = "{}")
private Properties clientProperties;
@ApiModelProperty(value = "分区ID列表为空时表示不进行控制", example = "[1, 2, 3]")
private List<Integer> partitionIdList;
@NotNull(message = "recordOperate不允许为空")
@ApiModelProperty(value = "记录操作,仅记录发起的第一次", example = "false")
private Boolean recordOperate;
}

View File

@@ -0,0 +1,22 @@
package com.xiaojukeji.know.streaming.km.testing.common.bean.vo;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseTesting;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* @author zengqiao
* @date 21/8/19
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
@ApiModel(description = "用户测试")
@EnterpriseTesting
public class BaseTestVO {
@ApiModelProperty(value="花费时间, 单位ms", example = "1")
protected Long costTimeUnitMs;
}

View File

@@ -0,0 +1,33 @@
package com.xiaojukeji.know.streaming.km.testing.common.bean.vo;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseTesting;
import com.xiaojukeji.know.streaming.km.common.bean.vo.topic.TopicRecordVO;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import java.util.List;
/**
* @author zengqiao
* @date 21/8/19
*/
@Data
@ApiModel(description = "测试消费结果")
@EnterpriseTesting
public class TestConsumerVO extends BaseTestVO {
@ApiModelProperty(value = "消费信息")
private List<TestPartitionConsumedVO> partitionConsumedList;
@ApiModelProperty(value = "记录信息")
private List<TopicRecordVO> recordList;
@ApiModelProperty(value = "本次消费到的RecordSize总大小", example = "1234567")
private Long totalRecordSizeUnitB;
@ApiModelProperty(value = "本次消费到的总消息条数", example = "23")
private Integer totalRecordCount;
@ApiModelProperty(value = "时间戳最大的消息时间", example = "34335532342")
private Long maxRecordTimestampUnitMs;
}

View File

@@ -0,0 +1,31 @@
package com.xiaojukeji.know.streaming.km.testing.common.bean.vo;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseTesting;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
/**
* Topic Offset
* @author zengqiao
* @date 22/03/01
*/
@Data
@EnterpriseTesting
public class TestPartitionConsumedVO {
@ApiModelProperty(value = "分区ID", example = "1")
private Integer partitionId;
@ApiModelProperty(value = "分区end-offset", example = "123")
private Long logEndOffset;
@ApiModelProperty(value = "消费到的offset", example = "23")
private Long consumedOffset;
@ApiModelProperty(value = "消费到的LogSize", example = "23")
private Long recordSizeUnitB;
@ApiModelProperty(value = "消费到的消息条数", example = "23")
private Integer recordCount;
}

View File

@@ -0,0 +1,33 @@
package com.xiaojukeji.know.streaming.km.testing.common.bean.vo;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseTesting;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* @author zengqiao
* @date 21/8/19
*/
@Data
@NoArgsConstructor
@ApiModel(description = "测试生产结果")
@EnterpriseTesting
public class TestProducerVO extends BaseTestVO {
@ApiModelProperty(value = "数据", example = "1")
private Integer partitionId;
@ApiModelProperty(value = "数据", example = "123")
private Long offset;
@ApiModelProperty(value = "数据", example = "12321231321231")
private Long timestampUnitMs;
public TestProducerVO(Long costTimeUnitMs, Integer partitionId, Long offset, Long timestampUnitMs) {
super(costTimeUnitMs);
this.partitionId = partitionId;
this.offset = offset;
this.timestampUnitMs = timestampUnitMs;
}
}

View File

@@ -0,0 +1,35 @@
package com.xiaojukeji.know.streaming.km.testing.common.enums;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseTesting;
import lombok.Getter;
/**
* @author zengqiao
* @date 22/02/25
*/
@Getter
@EnterpriseTesting
public enum KafkaConsumerFilterEnum {
NONE(0, ""),
CONTAINS(1, "包含"),
NOT_CONTAINS(2, "不包含"),
EQUAL_SIZE(3, "size等于"),
ABOVE_SIZE(4, "size大于"),
UNDER_SIZE(5, "size小于"),
;
private final Integer code;
private final String message;
KafkaConsumerFilterEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
}

View File

@@ -0,0 +1,35 @@
package com.xiaojukeji.know.streaming.km.testing.common.enums;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseTesting;
import lombok.Getter;
/**
* @author zengqiao
* @date 22/02/25
*/
@Getter
@EnterpriseTesting
public enum KafkaConsumerStartFromEnum {
LATEST(0, "最新位置开始消费"),
EARLIEST(1, "最旧位置开始消费"),
PRECISE_TIMESTAMP(2, "指定时间开始消费"),
PRECISE_OFFSET(3, "指定位置开始消费"),
CONSUMER_GROUP(4, "指定消费组进行消费"),
LATEST_MINUS_X_OFFSET(5, "近X条数据开始消费"),
;
private final Integer code;
private final String message;
KafkaConsumerStartFromEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
}

View File

@@ -0,0 +1,7 @@
/**
* 生产消费测试 相关功能模块
*/
@EnterpriseTesting
package com.xiaojukeji.know.streaming.km.testing.common;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseTesting;

View File

@@ -0,0 +1,46 @@
package com.xiaojukeji.know.streaming.km.testing.rest;
import com.didiglobal.logi.security.util.HttpRequestUtil;
import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseTesting;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.constant.ApiPrefix;
import com.xiaojukeji.know.streaming.km.common.constant.Constant;
import com.xiaojukeji.know.streaming.km.testing.biz.KafkaClientTestManager;
import com.xiaojukeji.know.streaming.km.testing.common.bean.dto.KafkaConsumerDTO;
import com.xiaojukeji.know.streaming.km.testing.common.bean.dto.KafkaProducerDTO;
import com.xiaojukeji.know.streaming.km.testing.common.bean.vo.TestConsumerVO;
import com.xiaojukeji.know.streaming.km.testing.common.bean.vo.TestProducerVO;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.validation.annotation.Validated;
import org.springframework.web.bind.annotation.*;
import java.util.List;
/**
* @author zengqiao
* @date 22/02/23
*/
@EnterpriseTesting
@Api(tags = Constant.SWAGGER_API_TAG_PREFIX + "KafkaClient-相关接口(REST)")
@RestController
@RequestMapping(ApiPrefix.API_V3_PREFIX)
public class KafkaClientController {
@Autowired
private KafkaClientTestManager kafkaClientTestManager;
@ApiOperation(value = "生产者测试")
@PostMapping(value = "clients/producer")
@ResponseBody
public Result<List<TestProducerVO>> produceTest(@Validated @RequestBody KafkaProducerDTO dto) {
return kafkaClientTestManager.produceTest(dto, HttpRequestUtil.getOperator());
}
@ApiOperation(value = "消费者测试")
@PostMapping(value = "clients/consumer")
@ResponseBody
public Result<TestConsumerVO> consumeTest(@Validated @RequestBody KafkaConsumerDTO dto) {
return kafkaClientTestManager.consumeTest(dto, HttpRequestUtil.getOperator());
}
}

View File

@@ -0,0 +1,40 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>km-license</artifactId>
<version>${km.revision}</version>
<packaging>jar</packaging>
<parent>
<artifactId>km</artifactId>
<groupId>com.xiaojukeji.kafka</groupId>
<version>${km.revision}</version>
<relativePath>../../pom.xml</relativePath>
</parent>
<dependencies>
<dependency>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>km-common</artifactId>
<version>${project.parent.version}</version>
</dependency>
<dependency>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>km-persistence</artifactId>
<version>${project.parent.version}</version>
</dependency>
<dependency>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>km-core</artifactId>
<version>${project.parent.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,69 @@
package com.xiaojukeji.know.streaming.km.license;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil;
import com.xiaojukeji.know.streaming.km.license.service.LicenseService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.web.servlet.HandlerInterceptor;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.OutputStream;
import static com.xiaojukeji.know.streaming.km.common.constant.ApiPrefix.API_V3_PREFIX;
@Component
public class LicenseInterceptor implements HandlerInterceptor {
private static final ILog LOGGER = LogFactory.getLog(LicenseInterceptor.class);
private static final String PHYSICAL_CLUSTER_URL = API_V3_PREFIX + "physical-clusters";
private static final String UTF_8 = "utf-8";
@Autowired
private LicenseService licenseService;
/**
* 拦截预处理
* @return boolean false:拦截, 不向下执行, true:放行
*/
@Override
public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) throws Exception {
if (PHYSICAL_CLUSTER_URL.equals( request.getRequestURI() ) &&
"POST".equals( request.getMethod() )) {
Result<Void> result = licenseService.addClusterLimit();
if (result.failed()) {
// 如果出错,构造错误信息
OutputStream out = null;
try {
response.setCharacterEncoding(UTF_8);
response.setContentType("text/json");
out = response.getOutputStream();
out.write(ConvertUtil.obj2Json(result).getBytes(UTF_8));
out.flush();
} catch (IOException e) {
LOGGER.error( "method=preHandle||msg=physical-clusters add exception! ", e);
} finally {
try {
if (out != null) {
out.close();
}
} catch (IOException e) {
LOGGER.error( "method=preHandle||msg=outputStream close exception! ", e);
}
}
// 拒绝向下执行
return false;
}
}
// 未达到限制,继续后续的执行
return true;
}
}

View File

@@ -0,0 +1,24 @@
package com.xiaojukeji.know.streaming.km.license;
import com.xiaojukeji.know.streaming.km.common.constant.ApiPrefix;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.servlet.config.annotation.InterceptorRegistry;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
/**
* @author didi
*/
@Configuration
public class LicenseWebConfig implements WebMvcConfigurer {
@Autowired
private LicenseInterceptor licenseInterceptor;
@Override
public void addInterceptors(InterceptorRegistry registry) {
// 会进行拦截的接口
registry.addInterceptor(licenseInterceptor).addPathPatterns(ApiPrefix.API_PREFIX + "**");
}
}

View File

@@ -0,0 +1,11 @@
package com.xiaojukeji.know.streaming.km.license.bean;
import lombok.Data;
/**
* @author didi
*/
@Data
public class KmLicense {
private int clusters;
}

View File

@@ -0,0 +1,24 @@
package com.xiaojukeji.know.streaming.km.license.bean;
import lombok.Data;
import java.util.List;
/**
* @author didi
*/
@Data
public class KmLicenseUsageDetail {
/**
* 上报的 ks 的节点
*/
private String host;
/**
* 上报的 ks 的集群的所有的节点
*/
private List<String> hosts;
/**
* 上报的 ks 集群中 kafka 集群数量
*/
private int clusters;
}

View File

@@ -0,0 +1,34 @@
package com.xiaojukeji.know.streaming.km.license.bean;
import lombok.Data;
/**
* @author didi
*/
@Data
public class LicenseInfo<T> {
/**
*
*/
private int status;
/**
* license 过期时间,单位秒
*/
private Long expiredDate;
/**
*
*/
private String app;
/**
*
*/
private String type;
/**
*
*/
private T info;
}

View File

@@ -0,0 +1,12 @@
package com.xiaojukeji.know.streaming.km.license.bean;
import lombok.Data;
/**
* @author didi
*/
@Data
public class LicenseResult<T> {
String err;
T reply;
}

View File

@@ -0,0 +1,24 @@
package com.xiaojukeji.know.streaming.km.license.bean;
import lombok.Data;
/**
* @author didi
*/
@Data
public class LicenseUsage {
/**
* 上报时间戳
*/
private Long timeStamp;
/**
* uuid
*/
private String uuid;
/**
* 业务数据
*/
private String data;
}

View File

@@ -0,0 +1,27 @@
package com.xiaojukeji.know.streaming.km.license.controller;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.constant.ApiPrefix;
import com.xiaojukeji.know.streaming.km.license.service.LicenseService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.RestController;
/**
* @author didi
*/
@RestController
@RequestMapping(ApiPrefix.API_V3_PREFIX)
public class LicenseController {
@Autowired
private LicenseService licenseService;
@GetMapping(value = "license")
@ResponseBody
public Result<Void> check() {
return licenseService.check();
}
}

View File

@@ -0,0 +1,18 @@
package com.xiaojukeji.know.streaming.km.license.service;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
public interface LicenseService {
/**
* 是否达到了 license 现在的集群数量
* @return
*/
Result<Void> addClusterLimit();
/**
* 校验 license 是否通过
* @return
*/
Result<Void> check();
}

View File

@@ -0,0 +1,253 @@
package com.xiaojukeji.know.streaming.km.license.service.impl;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.TypeReference;
import com.didiglobal.logi.log.ILog;
import com.didiglobal.logi.log.LogFactory;
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
import com.xiaojukeji.know.streaming.km.common.component.RestTool;
import com.xiaojukeji.know.streaming.km.common.utils.CommonUtils;
import com.xiaojukeji.know.streaming.km.common.utils.NetUtils;
import com.xiaojukeji.know.streaming.km.common.utils.Tuple;
import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService;
import com.xiaojukeji.know.streaming.km.core.service.km.KmNodeService;
import com.xiaojukeji.know.streaming.km.license.service.LicenseService;
import com.xiaojukeji.know.streaming.km.license.bean.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.http.HttpHeaders;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
import org.springframework.util.StringUtils;
import javax.annotation.PostConstruct;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.stream.Collectors;
@Service
public class LicenseServiceImpl implements LicenseService {
private static final ILog LOGGER = LogFactory.getLog(LicenseServiceImpl.class);
private static final String LICENSE_INFO_URL = "/api/license/info";
private static final String LICENSE_USAGE_URL = "/api/license/usage";
private static final String LICENSE_HEADER_TOKEN = "x-l-token";
private static final String LICENSE_HEADER_APP = "x-l-app-name";
private static final String LICENSE_HEADER_SIGNATURE = "x-l-signature";
private static final int FAILED_NO_LICENSE = 1000000000;
private static final int FAILED_LICENSE_EXPIRE = 1000000001;
private static final int FAILED_LICENSE_CLUSTER_LIMIT = 1000000002;
private static final int ONE_HOUR = 60 * 60 * 1000;
@Value("${license.server}")
private String licenseSrvUrl;
@Value("${license.signature}")
private String licenseSignature;
@Value("${license.token}")
private String licenseToken;
@Value("${license.app-name}")
private String appName;
@Autowired
private KmNodeService kmNodeService;
@Autowired
private ClusterPhyService clusterPhyService;
@Autowired
private RestTool restTool;
private LicenseInfo<KmLicense> kmLicense;
private List<LicenseUsage> licenseUsages = new ArrayList<>();
@Override
public Result<Void> addClusterLimit() {
//对 LicenseUsage 按照时间挫,从小到大排序,即最新的在最后面
licenseUsages.sort((o1, o2) -> o1.getTimeStamp() < o2.getTimeStamp() ? 1 : -1);
List<KmLicenseUsageDetail> details = licenseUsages.stream()
.map(l -> JSON.parseObject(l.getData(), KmLicenseUsageDetail.class))
.collect(Collectors.toList());
if(CollectionUtils.isEmpty(details)){return Result.buildSuc();}
//Tuple.v1 : ks cluster hosts
//Tuple.v2 : ks 集群管理的 kafka 集群个数
List<Tuple<List<String>, Integer>> ksClusterHostsList = new ArrayList<>();
ksClusterHostsList.add(new Tuple<>(details.get(0).getHosts(), details.get(0).getClusters()));
//根据 hosts 是否有交集,来获取 ks 的集群列表
for(KmLicenseUsageDetail detail : details){
for(Tuple<List<String>, Integer> tuple : ksClusterHostsList){
if(isListIntersection(tuple.getV1(), detail.getHosts())){
tuple.setV1(detail.getHosts());
tuple.setV2(detail.getClusters());
}else {
ksClusterHostsList.add(new Tuple<>(detail.getHosts(), detail.getClusters()));
}
}
}
LOGGER.debug("method=addClusterLimit||details={}||ksClusterHostsList={}",
JSON.toJSONString(details), JSON.toJSONString(ksClusterHostsList));
//计算索引 ks 集群管理的 kafka 集群总个数
final int[] totalKafkaClusterNus = {0};
ksClusterHostsList.stream().forEach(l -> totalKafkaClusterNus[0] += l.getV2() );
if(null == kmLicense) {
return Result.buildFailure(FAILED_NO_LICENSE, "无法获取KS的License信息");
}
if(kmLicense.getInfo().getClusters() < totalKafkaClusterNus[0]) {
return Result.buildFailure(FAILED_LICENSE_CLUSTER_LIMIT, String.format("KS管理的Kafka集群已达到License限制的%d个集群", kmLicense.getInfo().getClusters()));
}
return Result.buildSuc();
}
/**
* 当前这个接口只做最小限度的校验,即 km-license 模块和 license 信息存在,
* 其他异常情况license-srv 临时挂掉不考虑
* check 接口返回的异常 code、msg就在该模块定义不要放到 ResultStatus 中
*/
@Override
public Result<Void> check() {
if(null == kmLicense){
return Result.buildFailure(FAILED_NO_LICENSE, "无法获取KS的license信息");
}
if(System.currentTimeMillis() > kmLicense.getExpiredDate() * 1000){
return Result.buildFailure(FAILED_LICENSE_EXPIRE, "当前KS的license已过期");
}
return Result.buildSuc();
}
@PostConstruct
public void init(){
syncLicenseInfo();
}
/**
* 每10分钟同步一次
*/
@Scheduled(cron="0 0/10 * * * ?")
public void syncLicenseInfo(){
try {
saveLicenseUsageInfo();
List<LicenseUsage> licenseUsages = listLicenseUsageInfo();
if(!CollectionUtils.isEmpty(licenseUsages)){
this.licenseUsages.clear();
this.licenseUsages.addAll(licenseUsages);
}
LicenseInfo<KmLicense> kmLicense = this.getLicenseInfo();
if(null != kmLicense){
this.kmLicense = kmLicense;
}
} catch (Exception e){
LOGGER.error("method=syncLicenseInfo||msg=exception!", e);
}
}
/**************************************************** private method ****************************************************/
private LicenseInfo<KmLicense> getLicenseInfo(){
String url = licenseSrvUrl + LICENSE_INFO_URL;
LicenseResult<String> ret = restTool.getForObject(
url, genHeaders(), new TypeReference<LicenseResult<String>>(){});
LOGGER.debug("method=getLicenseInfo||url={}||ret={}", url, JSON.toJSONString(ret));
if(!StringUtils.isEmpty(ret.getErr())){
return null;
}
byte[] encrypted = Base64.getDecoder().decode(ret.getReply().getBytes(StandardCharsets.UTF_8));
LicenseInfo<KmLicense> info = JSON.parseObject(
new String(encrypted),
new TypeReference<LicenseInfo<KmLicense>>(){}
);
return info;
}
private List<LicenseUsage> listLicenseUsageInfo(){
String url = licenseSrvUrl + LICENSE_USAGE_URL;
LicenseResult<List<LicenseUsage>> ret = restTool.getForObject(
url, genHeaders(), new TypeReference<LicenseResult<List<LicenseUsage>>>(){});
LOGGER.debug("method=listLicenseUsageInfo||url={}||ret={}", url, JSON.toJSONString(ret));
if(!StringUtils.isEmpty(ret.getErr())){
return new ArrayList<>();
}
List<LicenseUsage> licenseUsages = ret.getReply();
if(!CollectionUtils.isEmpty(licenseUsages)){
long now = System.currentTimeMillis();
return licenseUsages.stream()
.filter(l -> l.getTimeStamp() + 6 * ONE_HOUR > now)
.collect(Collectors.toList());
}
return new ArrayList<>();
}
private boolean saveLicenseUsageInfo(){
String host = NetUtils.localHost();
KmLicenseUsageDetail detail = new KmLicenseUsageDetail();
detail.setHost(host);
detail.setHosts(kmNodeService.listKmHosts());
detail.setClusters(clusterPhyService.listAllClusters().size());
LicenseUsage licenseUsage = new LicenseUsage();
licenseUsage.setTimeStamp(System.currentTimeMillis());
licenseUsage.setUuid(CommonUtils.getMD5(host));
licenseUsage.setData(JSON.toJSONString(detail));
Map<String, String> param = new HashMap<>();
param.put("usageSecret", Base64.getEncoder().encodeToString(JSON.toJSONString(licenseUsage).getBytes(StandardCharsets.UTF_8)));
String url = licenseSrvUrl + LICENSE_USAGE_URL;
LicenseResult<Void> ret = restTool.putForObject(url, genHeaders(), JSON.toJSONString(param), LicenseResult.class);
LOGGER.debug("method=saveLicenseUsageInfo||url={}||ret={}", url, JSON.toJSONString(ret));
if(!StringUtils.isEmpty(ret.getErr())){
return false;
}
return true;
}
private HttpHeaders genHeaders(){
HttpHeaders headers = new HttpHeaders();
headers.add(LICENSE_HEADER_TOKEN, licenseToken);
headers.add(LICENSE_HEADER_APP, appName);
headers.add(LICENSE_HEADER_SIGNATURE, licenseSignature);
headers.add("content-type", "application/json");
return headers;
}
/**
* 两个 list 是否相交,是否有相同的内容
* @return
*/
private boolean isListIntersection(List<String> l, List<String> r){
l.retainAll(r);
return !CollectionUtils.isEmpty(l);
}
}

View File

@@ -0,0 +1,67 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>km</artifactId>
<groupId>com.xiaojukeji.kafka</groupId>
<version>${km.revision}</version>
<relativePath>../../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>km-rebalance</artifactId>
<dependencies>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
</dependency>
<dependency>
<groupId>org.elasticsearch.client</groupId>
<artifactId>elasticsearch-rest-client</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<dependency>
<groupId>net.sf.jopt-simple</groupId>
<artifactId>jopt-simple</artifactId>
</dependency>
<!-- <dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>1.2</version>
<scope>runtime</scope>
</dependency>-->
</dependencies>
</project>

View File

@@ -0,0 +1,139 @@
package com.xiaojukeji.know.streaming.km.rebalance;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.xiaojukeji.know.streaming.km.rebalance.executor.ExecutionRebalance;
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.BalanceParameter;
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.HostEnv;
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.OptimizerResult;
import com.xiaojukeji.know.streaming.km.rebalance.utils.CommandLineUtils;
import joptsimple.OptionParser;
import joptsimple.OptionSet;
import org.apache.commons.io.FileUtils;
import org.apache.kafka.clients.CommonClientConfigs;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;
public class KafkaRebalanceMain {
public void run(OptionSet options) {
try {
BalanceParameter balanceParameter = new BalanceParameter();
if (options.has("excluded-topics")) {
balanceParameter.setExcludedTopics(options.valueOf("excluded-topics").toString());
}
if (options.has("offline-brokers")) {
balanceParameter.setOfflineBrokers(options.valueOf("offline-brokers").toString());
}
if (options.has("disk-threshold")) {
Double diskThreshold = (Double) options.valueOf("disk-threshold");
balanceParameter.setDiskThreshold(diskThreshold);
}
if (options.has("cpu-threshold")) {
Double cpuThreshold = (Double) options.valueOf("cpu-threshold");
balanceParameter.setCpuThreshold(cpuThreshold);
}
if (options.has("network-in-threshold")) {
Double networkInThreshold = (Double) options.valueOf("network-in-threshold");
balanceParameter.setNetworkInThreshold(networkInThreshold);
}
if (options.has("network-out-threshold")) {
Double networkOutThreshold = (Double) options.valueOf("network-out-threshold");
balanceParameter.setNetworkOutThreshold(networkOutThreshold);
}
if (options.has("balance-brokers")) {
balanceParameter.setBalanceBrokers(options.valueOf("balance-brokers").toString());
}
if (options.has("topic-leader-threshold")) {
Double topicLeaderThreshold = (Double) options.valueOf("topic-leader-threshold");
balanceParameter.setTopicLeaderThreshold(topicLeaderThreshold);
}
if (options.has("topic-replica-threshold")) {
Double topicReplicaThreshold = (Double) options.valueOf("topic-replica-threshold");
balanceParameter.setTopicReplicaThreshold(topicReplicaThreshold);
}
if (options.has("ignored-topics")) {
balanceParameter.setIgnoredTopics(options.valueOf("ignored-topics").toString());
}
String path = options.valueOf("output-path").toString();
String goals = options.valueOf("goals").toString();
balanceParameter.setGoals(Arrays.asList(goals.split(",")));
balanceParameter.setCluster(options.valueOf("cluster").toString());
Properties kafkaConfig = new Properties();
kafkaConfig.setProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, options.valueOf("bootstrap-servers").toString());
balanceParameter.setKafkaConfig(kafkaConfig);
balanceParameter.setEsRestURL(options.valueOf("es-rest-url").toString());
balanceParameter.setEsIndexPrefix(options.valueOf("es-index-prefix").toString());
balanceParameter.setBeforeSeconds((Integer) options.valueOf("before-seconds"));
String envFile = options.valueOf("hardware-env-file").toString();
String envJson = FileUtils.readFileToString(new File(envFile), "UTF-8");
List<HostEnv> env = new ObjectMapper().readValue(envJson, new TypeReference<List<HostEnv>>() {
});
balanceParameter.setHardwareEnv(env);
ExecutionRebalance exec = new ExecutionRebalance();
OptimizerResult optimizerResult = exec.optimizations(balanceParameter);
FileUtils.write(new File(path.concat("/overview.json")), optimizerResult.resultJsonOverview(), "UTF-8");
FileUtils.write(new File(path.concat("/detailed.json")), optimizerResult.resultJsonDetailed(), "UTF-8");
FileUtils.write(new File(path.concat("/task.json")), optimizerResult.resultJsonTask(), "UTF-8");
} catch (IOException e) {
e.printStackTrace();
}
}
public static void main(String[] args) {
OptionParser parser = new OptionParser();
parser.accepts("bootstrap-servers", "Kafka cluster boot server").withRequiredArg().ofType(String.class);
parser.accepts("es-rest-url", "The url of elasticsearch").withRequiredArg().ofType(String.class);
parser.accepts("es-index-prefix", "The Index Prefix of elasticsearch").withRequiredArg().ofType(String.class);
parser.accepts("goals", "Balanced goals include TopicLeadersDistributionGoal,TopicReplicaDistributionGoal,DiskDistributionGoal,NetworkInboundDistributionGoal,NetworkOutboundDistributionGoal").withRequiredArg().ofType(String.class);
parser.accepts("cluster", "Balanced cluster name").withRequiredArg().ofType(String.class);
parser.accepts("excluded-topics", "Topic does not perform data balancing").withOptionalArg().ofType(String.class);
parser.accepts("ignored-topics","Topics that do not contain model calculations").withOptionalArg().ofType(String.class);
parser.accepts("offline-brokers", "Broker does not perform data balancing").withOptionalArg().ofType(String.class);
parser.accepts("balance-brokers", "Balanced brokers list").withOptionalArg().ofType(String.class);
parser.accepts("disk-threshold", "Disk data balance threshold").withOptionalArg().ofType(Double.class);
parser.accepts("topic-leader-threshold","topic leader threshold").withOptionalArg().ofType(Double.class);
parser.accepts("topic-replica-threshold","topic replica threshold").withOptionalArg().ofType(Double.class);
parser.accepts("cpu-threshold", "Cpu utilization balance threshold").withOptionalArg().ofType(Double.class);
parser.accepts("network-in-threshold", "Network inflow threshold").withOptionalArg().ofType(Double.class);
parser.accepts("network-out-threshold", "Network outflow threshold").withOptionalArg().ofType(Double.class);
parser.accepts("before-seconds", "Query es data time").withRequiredArg().ofType(Integer.class);
parser.accepts("hardware-env-file", "Machine environment information includes cpu, disk and network").withRequiredArg().ofType(String.class);
parser.accepts("output-path", "Cluster balancing result file directory").withRequiredArg().ofType(String.class);
OptionSet options = parser.parse(args);
if (args.length == 0) {
CommandLineUtils.printUsageAndDie(parser, "Running parameters need to be configured to perform cluster balancing");
}
if (!options.has("bootstrap-servers")) {
CommandLineUtils.printUsageAndDie(parser, "bootstrap-servers cannot be empty");
}
if (!options.has("es-rest-url")) {
CommandLineUtils.printUsageAndDie(parser, "es-rest-url cannot be empty");
}
if (!options.has("es-index-prefix")) {
CommandLineUtils.printUsageAndDie(parser, "es-index-prefix cannot be empty");
}
if (!options.has("goals")) {
CommandLineUtils.printUsageAndDie(parser, "goals cannot be empty");
}
if (!options.has("cluster")) {
CommandLineUtils.printUsageAndDie(parser, "cluster name cannot be empty");
}
if (!options.has("before-seconds")) {
CommandLineUtils.printUsageAndDie(parser, "before-seconds cannot be empty");
}
if (!options.has("hardware-env-file")) {
CommandLineUtils.printUsageAndDie(parser, "hardware-env-file cannot be empty");
}
if (!options.has("output-path")) {
CommandLineUtils.printUsageAndDie(parser, "output-path cannot be empty");
}
KafkaRebalanceMain rebalanceMain = new KafkaRebalanceMain();
rebalanceMain.run(options);
}
}

View File

@@ -0,0 +1,15 @@
package com.xiaojukeji.know.streaming.km.rebalance.exception;
public class OptimizationFailureException extends Exception {
public OptimizationFailureException(String message, Throwable cause) {
super(message, cause);
}
public OptimizationFailureException(String message) {
super(message);
}
public OptimizationFailureException(Throwable cause) {
super(cause);
}
}

View File

@@ -0,0 +1,78 @@
package com.xiaojukeji.know.streaming.km.rebalance.executor;
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.BalanceGoal;
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.BalanceParameter;
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.BalanceThreshold;
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.BrokerBalanceState;
import com.xiaojukeji.know.streaming.km.rebalance.model.ClusterModel;
import com.xiaojukeji.know.streaming.km.rebalance.model.Load;
import com.xiaojukeji.know.streaming.km.rebalance.model.Resource;
import com.xiaojukeji.know.streaming.km.rebalance.optimizer.GoalOptimizer;
import com.xiaojukeji.know.streaming.km.rebalance.optimizer.OptimizationOptions;
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.OptimizerResult;
import com.xiaojukeji.know.streaming.km.rebalance.utils.GoalUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.HashMap;
import java.util.Map;
public class ExecutionRebalance {
private static final Logger logger = LoggerFactory.getLogger(ExecutionRebalance.class);
public OptimizerResult optimizations(BalanceParameter balanceParameter) {
Validate.isTrue(StringUtils.isNotBlank(balanceParameter.getCluster()), "cluster is empty");
Validate.isTrue(balanceParameter.getKafkaConfig() != null, "Kafka config properties is empty");
Validate.isTrue(balanceParameter.getGoals() != null, "Balance goals is empty");
Validate.isTrue(StringUtils.isNotBlank(balanceParameter.getEsIndexPrefix()), "EsIndexPrefix is empty");
Validate.isTrue(StringUtils.isNotBlank(balanceParameter.getEsRestURL()), "EsRestURL is empty");
Validate.isTrue(balanceParameter.getHardwareEnv() != null, "HardwareEnv is empty");
logger.info("Cluster balancing start");
ClusterModel clusterModel = GoalUtils.getInitClusterModel(balanceParameter);
GoalOptimizer optimizer = new GoalOptimizer();
OptimizerResult optimizerResult = optimizer.optimizations(clusterModel, new OptimizationOptions(balanceParameter));
logger.info("Cluster balancing completed");
return optimizerResult;
}
public static Map<Resource, Double> getClusterAvgResourcesState(BalanceParameter balanceParameter) {
ClusterModel clusterModel = GoalUtils.getInitClusterModel(balanceParameter);
Load load = clusterModel.load();
Map<Resource, Double> avgResource = new HashMap<>();
avgResource.put(Resource.DISK, load.loadFor(Resource.DISK) / clusterModel.brokers().size());
avgResource.put(Resource.CPU, load.loadFor(Resource.CPU) / clusterModel.brokers().size());
avgResource.put(Resource.NW_OUT, load.loadFor(Resource.NW_OUT) / clusterModel.brokers().size());
avgResource.put(Resource.NW_IN, load.loadFor(Resource.NW_IN) / clusterModel.brokers().size());
return avgResource;
}
public static Map<Integer, BrokerBalanceState> getBrokerResourcesBalanceState(BalanceParameter balanceParameter) {
Map<Integer, BrokerBalanceState> balanceState = new HashMap<>();
ClusterModel clusterModel = GoalUtils.getInitClusterModel(balanceParameter);
double[] clusterAvgResource = clusterModel.avgOfUtilization();
Map<String, BalanceThreshold> balanceThreshold = GoalUtils.getBalanceThreshold(balanceParameter, clusterAvgResource);
clusterModel.brokers().forEach(i -> {
BrokerBalanceState state = new BrokerBalanceState();
if (balanceParameter.getGoals().contains(BalanceGoal.DISK.goal())) {
state.setDiskAvgResource(i.load().loadFor(Resource.DISK));
state.setDiskUtilization(i.utilizationFor(Resource.DISK));
state.setDiskBalanceState(balanceThreshold.get(BalanceGoal.DISK.goal()).state(i.utilizationFor(Resource.DISK)));
}
if (balanceParameter.getGoals().contains(BalanceGoal.NW_IN.goal())) {
state.setBytesInAvgResource(i.load().loadFor(Resource.NW_IN));
state.setBytesInUtilization(i.utilizationFor(Resource.NW_IN));
state.setBytesInBalanceState(balanceThreshold.get(BalanceGoal.NW_IN.goal()).state(i.utilizationFor(Resource.NW_IN)));
}
if (balanceParameter.getGoals().contains(BalanceGoal.NW_OUT.goal())) {
state.setBytesOutAvgResource(i.load().loadFor(Resource.NW_OUT));
state.setBytesOutUtilization(i.utilizationFor(Resource.NW_OUT));
state.setBytesOutBalanceState(balanceThreshold.get(BalanceGoal.NW_OUT.goal()).state(i.utilizationFor(Resource.NW_OUT)));
}
balanceState.put(i.id(), state);
});
return balanceState;
}
}

View File

@@ -0,0 +1,76 @@
package com.xiaojukeji.know.streaming.km.rebalance.executor.common;
public class BalanceActionHistory {
//均衡目标
private String goal;
//均衡动作
private String actionType;
//均衡Topic
private String topic;
//均衡分区
private int partition;
//源Broker
private int sourceBrokerId;
//目标Broker
private int destinationBrokerId;
public String getGoal() {
return goal;
}
public void setGoal(String goal) {
this.goal = goal;
}
public String getActionType() {
return actionType;
}
public void setActionType(String actionType) {
this.actionType = actionType;
}
public String getTopic() {
return topic;
}
public void setTopic(String topic) {
this.topic = topic;
}
public int getPartition() {
return partition;
}
public void setPartition(int partition) {
this.partition = partition;
}
public int getSourceBrokerId() {
return sourceBrokerId;
}
public void setSourceBrokerId(int sourceBrokerId) {
this.sourceBrokerId = sourceBrokerId;
}
public int getDestinationBrokerId() {
return destinationBrokerId;
}
public void setDestinationBrokerId(int destinationBrokerId) {
this.destinationBrokerId = destinationBrokerId;
}
@Override
public String toString() {
return "BalanceActionHistory{" +
"goal='" + goal + '\'' +
", actionType='" + actionType + '\'' +
", topic='" + topic + '\'' +
", partition='" + partition + '\'' +
", sourceBrokerId='" + sourceBrokerId + '\'' +
", destinationBrokerId='" + destinationBrokerId + '\'' +
'}';
}
}

View File

@@ -0,0 +1,173 @@
package com.xiaojukeji.know.streaming.km.rebalance.executor.common;
public class BalanceDetailed {
private int brokerId;
private String host;
//当前CPU使用率
private double currentCPUUtilization;
//最新CPU使用率
private double lastCPUUtilization;
//当前磁盘使用率
private double currentDiskUtilization;
//最新磁盘使用量
private double lastDiskUtilization;
//当前网卡入流量
private double currentNetworkInUtilization;
//最新网卡入流量
private double lastNetworkInUtilization;
//当前网卡出流量
private double currentNetworkOutUtilization;
//最新网卡出流量
private double lastNetworkOutUtilization;
//均衡状态
private int balanceState = 0;
//迁入磁盘容量
private double moveInDiskSize;
//迁出磁盘容量
private double moveOutDiskSize;
//迁入副本数
private double moveInReplicas;
//迁出副本数
private double moveOutReplicas;
public int getBrokerId() {
return brokerId;
}
public void setBrokerId(int brokerId) {
this.brokerId = brokerId;
}
public double getCurrentCPUUtilization() {
return currentCPUUtilization;
}
public void setCurrentCPUUtilization(double currentCPUUtilization) {
this.currentCPUUtilization = currentCPUUtilization;
}
public double getLastCPUUtilization() {
return lastCPUUtilization;
}
public void setLastCPUUtilization(double lastCPUUtilization) {
this.lastCPUUtilization = lastCPUUtilization;
}
public double getCurrentDiskUtilization() {
return currentDiskUtilization;
}
public void setCurrentDiskUtilization(double currentDiskUtilization) {
this.currentDiskUtilization = currentDiskUtilization;
}
public double getLastDiskUtilization() {
return lastDiskUtilization;
}
public void setLastDiskUtilization(double lastDiskUtilization) {
this.lastDiskUtilization = lastDiskUtilization;
}
public double getCurrentNetworkInUtilization() {
return currentNetworkInUtilization;
}
public void setCurrentNetworkInUtilization(double currentNetworkInUtilization) {
this.currentNetworkInUtilization = currentNetworkInUtilization;
}
public double getLastNetworkInUtilization() {
return lastNetworkInUtilization;
}
public void setLastNetworkInUtilization(double lastNetworkInUtilization) {
this.lastNetworkInUtilization = lastNetworkInUtilization;
}
public double getCurrentNetworkOutUtilization() {
return currentNetworkOutUtilization;
}
public void setCurrentNetworkOutUtilization(double currentNetworkOutUtilization) {
this.currentNetworkOutUtilization = currentNetworkOutUtilization;
}
public double getLastNetworkOutUtilization() {
return lastNetworkOutUtilization;
}
public void setLastNetworkOutUtilization(double lastNetworkOutUtilization) {
this.lastNetworkOutUtilization = lastNetworkOutUtilization;
}
public int getBalanceState() {
return balanceState;
}
public void setBalanceState(int balanceState) {
this.balanceState = balanceState;
}
public double getMoveInDiskSize() {
return moveInDiskSize;
}
public void setMoveInDiskSize(double moveInDiskSize) {
this.moveInDiskSize = moveInDiskSize;
}
public double getMoveOutDiskSize() {
return moveOutDiskSize;
}
public void setMoveOutDiskSize(double moveOutDiskSize) {
this.moveOutDiskSize = moveOutDiskSize;
}
public double getMoveInReplicas() {
return moveInReplicas;
}
public void setMoveInReplicas(double moveInReplicas) {
this.moveInReplicas = moveInReplicas;
}
public double getMoveOutReplicas() {
return moveOutReplicas;
}
public void setMoveOutReplicas(double moveOutReplicas) {
this.moveOutReplicas = moveOutReplicas;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
@Override
public String toString() {
return "BalanceDetailed{" +
"brokerId=" + brokerId +
", host='" + host + '\'' +
", currentCPUUtilization=" + currentCPUUtilization +
", lastCPUUtilization=" + lastCPUUtilization +
", currentDiskUtilization=" + currentDiskUtilization +
", lastDiskUtilization=" + lastDiskUtilization +
", currentNetworkInUtilization=" + currentNetworkInUtilization +
", lastNetworkInUtilization=" + lastNetworkInUtilization +
", currentNetworkOutUtilization=" + currentNetworkOutUtilization +
", lastNetworkOutUtilization=" + lastNetworkOutUtilization +
", balanceState=" + balanceState +
", moveInDiskSize=" + moveInDiskSize +
", moveOutDiskSize=" + moveOutDiskSize +
", moveInReplicas=" + moveInReplicas +
", moveOutReplicas=" + moveOutReplicas +
'}';
}
}

View File

@@ -0,0 +1,20 @@
package com.xiaojukeji.know.streaming.km.rebalance.executor.common;
public enum BalanceGoal {
// KM传参时使用
TOPIC_LEADERS("TopicLeadersDistributionGoal"),
TOPIC_REPLICA("TopicReplicaDistributionGoal"),
DISK("DiskDistributionGoal"),
NW_IN("NetworkInboundDistributionGoal"),
NW_OUT("NetworkOutboundDistributionGoal");
private final String goal;
BalanceGoal(String goal) {
this.goal = goal;
}
public String goal() {
return goal;
}
}

View File

@@ -0,0 +1,102 @@
package com.xiaojukeji.know.streaming.km.rebalance.executor.common;
import com.xiaojukeji.know.streaming.km.rebalance.model.Resource;
import java.util.Map;
public class BalanceOverview {
//任务类型
private String taskType;
//节点范围
private String nodeRange;
//总的迁移大小
private double totalMoveSize;
//topic黑名单
private String topicBlacklist;
//迁移副本数
private int moveReplicas;
//迁移Topic
private String moveTopics;
//均衡阈值
private Map<Resource, Double> balanceThreshold;
//移除节点
private String removeNode;
public String getTaskType() {
return taskType;
}
public void setTaskType(String taskType) {
this.taskType = taskType;
}
public String getNodeRange() {
return nodeRange;
}
public void setNodeRange(String nodeRange) {
this.nodeRange = nodeRange;
}
public double getTotalMoveSize() {
return totalMoveSize;
}
public void setTotalMoveSize(double totalMoveSize) {
this.totalMoveSize = totalMoveSize;
}
public String getTopicBlacklist() {
return topicBlacklist;
}
public void setTopicBlacklist(String topicBlacklist) {
this.topicBlacklist = topicBlacklist;
}
public int getMoveReplicas() {
return moveReplicas;
}
public void setMoveReplicas(int moveReplicas) {
this.moveReplicas = moveReplicas;
}
public String getMoveTopics() {
return moveTopics;
}
public void setMoveTopics(String moveTopics) {
this.moveTopics = moveTopics;
}
public Map<Resource, Double> getBalanceThreshold() {
return balanceThreshold;
}
public void setBalanceThreshold(Map<Resource, Double> balanceThreshold) {
this.balanceThreshold = balanceThreshold;
}
public String getRemoveNode() {
return removeNode;
}
public void setRemoveNode(String removeNode) {
this.removeNode = removeNode;
}
@Override
public String toString() {
return "BalanceOverview{" +
"taskType='" + taskType + '\'' +
", nodeRange='" + nodeRange + '\'' +
", totalMoveSize=" + totalMoveSize +
", topicBlacklist='" + topicBlacklist + '\'' +
", moveReplicas=" + moveReplicas +
", moveTopics='" + moveTopics + '\'' +
", balanceThreshold=" + balanceThreshold +
", removeNode='" + removeNode + '\'' +
'}';
}
}

View File

@@ -0,0 +1,199 @@
package com.xiaojukeji.know.streaming.km.rebalance.executor.common;
import java.util.List;
import java.util.Properties;
public class BalanceParameter {
//集群名称
private String cluster;
//集群访问配置
private Properties kafkaConfig;
//ES访问地址
private String esRestURL;
//ES存储索引前缀
private String esIndexPrefix;
//均衡目标
private List<String> goals;
//Topic黑名单参与模型计算
private String excludedTopics = "";
//忽略的Topic列表不参与模型计算
private String ignoredTopics = "";
//下线的Broker
private String offlineBrokers = "";
//需要均衡的Broker
private String balanceBrokers = "";
//默认Topic副本分布阈值
private double topicReplicaThreshold = 0.1;
//磁盘浮动阈值
private double diskThreshold = 0.1;
//CPU浮动阈值
private double cpuThreshold = 0.1;
//流入浮动阈值
private double networkInThreshold = 0.1;
//流出浮动阈值
private double networkOutThreshold = 0.1;
//均衡时间范围
private int beforeSeconds = 300;
//集群中所有Broker的硬件环境:cpu、disk、bytesIn、bytesOut
private List<HostEnv> hardwareEnv;
//最小Leader浮动阈值,不追求绝对平均,避免集群流量抖动
private double topicLeaderThreshold = 0.1;
public String getCluster() {
return cluster;
}
public void setCluster(String cluster) {
this.cluster = cluster;
}
public String getEsRestURL() {
return esRestURL;
}
public void setEsRestURL(String esRestURL) {
this.esRestURL = esRestURL;
}
public List<String> getGoals() {
return goals;
}
public void setGoals(List<String> goals) {
this.goals = goals;
}
public String getExcludedTopics() {
return excludedTopics;
}
public void setExcludedTopics(String excludedTopics) {
this.excludedTopics = excludedTopics;
}
public String getIgnoredTopics() {
return ignoredTopics;
}
public void setIgnoredTopics(String ignoredTopics) {
this.ignoredTopics = ignoredTopics;
}
public double getTopicReplicaThreshold() {
return topicReplicaThreshold;
}
public void setTopicReplicaThreshold(double topicReplicaThreshold) {
this.topicReplicaThreshold = topicReplicaThreshold;
}
public double getDiskThreshold() {
return diskThreshold;
}
public void setDiskThreshold(double diskThreshold) {
this.diskThreshold = diskThreshold;
}
public double getCpuThreshold() {
return cpuThreshold;
}
public void setCpuThreshold(double cpuThreshold) {
this.cpuThreshold = cpuThreshold;
}
public double getNetworkInThreshold() {
return networkInThreshold;
}
public void setNetworkInThreshold(double networkInThreshold) {
this.networkInThreshold = networkInThreshold;
}
public double getNetworkOutThreshold() {
return networkOutThreshold;
}
public void setNetworkOutThreshold(double networkOutThreshold) {
this.networkOutThreshold = networkOutThreshold;
}
public List<HostEnv> getHardwareEnv() {
return hardwareEnv;
}
public void setHardwareEnv(List<HostEnv> hardwareEnv) {
this.hardwareEnv = hardwareEnv;
}
public String getBalanceBrokers() {
return balanceBrokers;
}
public void setBalanceBrokers(String balanceBrokers) {
this.balanceBrokers = balanceBrokers;
}
public Properties getKafkaConfig() {
return kafkaConfig;
}
public void setKafkaConfig(Properties kafkaConfig) {
this.kafkaConfig = kafkaConfig;
}
public String getEsIndexPrefix() {
return esIndexPrefix;
}
public void setEsIndexPrefix(String esIndexPrefix) {
this.esIndexPrefix = esIndexPrefix;
}
public String getOfflineBrokers() {
return offlineBrokers;
}
public void setOfflineBrokers(String offlineBrokers) {
this.offlineBrokers = offlineBrokers;
}
public int getBeforeSeconds() {
return beforeSeconds;
}
public void setBeforeSeconds(int beforeSeconds) {
this.beforeSeconds = beforeSeconds;
}
public double getTopicLeaderThreshold() {
return topicLeaderThreshold;
}
public void setTopicLeaderThreshold(double topicLeaderThreshold) {
this.topicLeaderThreshold = topicLeaderThreshold;
}
@Override
public String toString() {
return "BalanceParameter{" +
"cluster='" + cluster + '\'' +
", kafkaConfig=" + kafkaConfig +
", esRestURL='" + esRestURL + '\'' +
", esIndexPrefix='" + esIndexPrefix + '\'' +
", goals=" + goals +
", excludedTopics='" + excludedTopics + '\'' +
", offlineBrokers='" + offlineBrokers + '\'' +
", balanceBrokers='" + balanceBrokers + '\'' +
", topicReplicaThreshold=" + topicReplicaThreshold +
", diskThreshold=" + diskThreshold +
", cpuThreshold=" + cpuThreshold +
", networkInThreshold=" + networkInThreshold +
", networkOutThreshold=" + networkOutThreshold +
", beforeSeconds=" + beforeSeconds +
", hardwareEnv=" + hardwareEnv +
", topicLeaderThreshold=" + topicLeaderThreshold +
'}';
}
}

View File

@@ -0,0 +1,43 @@
package com.xiaojukeji.know.streaming.km.rebalance.executor.common;
import java.util.List;
public class BalanceTask {
private String topic;
private int partition;
//副本分配列表
private List<Integer> replicas;
public String getTopic() {
return topic;
}
public void setTopic(String topic) {
this.topic = topic;
}
public int getPartition() {
return partition;
}
public void setPartition(int partition) {
this.partition = partition;
}
public List<Integer> getReplicas() {
return replicas;
}
public void setReplicas(List<Integer> replicas) {
this.replicas = replicas;
}
@Override
public String toString() {
return "BalanceTask{" +
"topic='" + topic + '\'' +
", partition=" + partition +
", replicas=" + replicas +
'}';
}
}

View File

@@ -0,0 +1,41 @@
package com.xiaojukeji.know.streaming.km.rebalance.executor.common;
import com.xiaojukeji.know.streaming.km.rebalance.model.Resource;
public class BalanceThreshold {
private final Resource _resource;
private final double _upper;
private final double _lower;
public BalanceThreshold(Resource resource, double threshold, double avgResource) {
_resource = resource;
_upper = avgResource * (1 + threshold);
_lower = avgResource * (1 - threshold);
}
public Resource resource() {
return _resource;
}
public boolean isInRange(double utilization) {
return utilization > _lower && utilization < _upper;
}
public int state(double utilization) {
if (utilization <= _lower) {
return -1;
} else if (utilization >= _upper) {
return 1;
}
return 0;
}
@Override
public String toString() {
return "BalanceThreshold{" +
"_resource=" + _resource +
", _upper=" + _upper +
", _lower=" + _lower +
'}';
}
}

View File

@@ -0,0 +1,144 @@
package com.xiaojukeji.know.streaming.km.rebalance.executor.common;
public class BrokerBalanceState {
//CPU平均资源
private Double cpuAvgResource;
//CPU资源使用率
private Double cpuUtilization;
// -1,低于均衡范围
// 0,均衡范围内
// 1,高于均衡范围
private Integer cpuBalanceState;
//磁盘平均资源
private Double diskAvgResource;
//磁盘资源使用率
private Double diskUtilization;
//磁盘均衡状态
private Integer diskBalanceState;
//流入平均资源
private Double bytesInAvgResource;
//流入资源使用率
private Double bytesInUtilization;
//流入均衡状态
private Integer bytesInBalanceState;
//流出平均资源
private Double bytesOutAvgResource;
//流出资源使用率
private Double bytesOutUtilization;
//流出均衡状态
private Integer bytesOutBalanceState;
public Double getCpuAvgResource() {
return cpuAvgResource;
}
public void setCpuAvgResource(Double cpuAvgResource) {
this.cpuAvgResource = cpuAvgResource;
}
public Double getCpuUtilization() {
return cpuUtilization;
}
public void setCpuUtilization(Double cpuUtilization) {
this.cpuUtilization = cpuUtilization;
}
public Integer getCpuBalanceState() {
return cpuBalanceState;
}
public void setCpuBalanceState(Integer cpuBalanceState) {
this.cpuBalanceState = cpuBalanceState;
}
public Double getDiskAvgResource() {
return diskAvgResource;
}
public void setDiskAvgResource(Double diskAvgResource) {
this.diskAvgResource = diskAvgResource;
}
public Double getDiskUtilization() {
return diskUtilization;
}
public void setDiskUtilization(Double diskUtilization) {
this.diskUtilization = diskUtilization;
}
public Integer getDiskBalanceState() {
return diskBalanceState;
}
public void setDiskBalanceState(Integer diskBalanceState) {
this.diskBalanceState = diskBalanceState;
}
public Double getBytesInAvgResource() {
return bytesInAvgResource;
}
public void setBytesInAvgResource(Double bytesInAvgResource) {
this.bytesInAvgResource = bytesInAvgResource;
}
public Double getBytesInUtilization() {
return bytesInUtilization;
}
public void setBytesInUtilization(Double bytesInUtilization) {
this.bytesInUtilization = bytesInUtilization;
}
public Integer getBytesInBalanceState() {
return bytesInBalanceState;
}
public void setBytesInBalanceState(Integer bytesInBalanceState) {
this.bytesInBalanceState = bytesInBalanceState;
}
public Double getBytesOutAvgResource() {
return bytesOutAvgResource;
}
public void setBytesOutAvgResource(Double bytesOutAvgResource) {
this.bytesOutAvgResource = bytesOutAvgResource;
}
public Double getBytesOutUtilization() {
return bytesOutUtilization;
}
public void setBytesOutUtilization(Double bytesOutUtilization) {
this.bytesOutUtilization = bytesOutUtilization;
}
public Integer getBytesOutBalanceState() {
return bytesOutBalanceState;
}
public void setBytesOutBalanceState(Integer bytesOutBalanceState) {
this.bytesOutBalanceState = bytesOutBalanceState;
}
@Override
public String toString() {
return "BrokerBalanceState{" +
"cpuAvgResource=" + cpuAvgResource +
", cpuUtilization=" + cpuUtilization +
", cpuBalanceState=" + cpuBalanceState +
", diskAvgResource=" + diskAvgResource +
", diskUtilization=" + diskUtilization +
", diskBalanceState=" + diskBalanceState +
", bytesInAvgResource=" + bytesInAvgResource +
", bytesInUtilization=" + bytesInUtilization +
", bytesInBalanceState=" + bytesInBalanceState +
", bytesOutAvgResource=" + bytesOutAvgResource +
", bytesOutUtilization=" + bytesOutUtilization +
", bytesOutBalanceState=" + bytesOutBalanceState +
'}';
}
}

View File

@@ -0,0 +1,76 @@
package com.xiaojukeji.know.streaming.km.rebalance.executor.common;
public class HostEnv {
//BrokerId
private int id;
//机器IP
private String host;
//机架ID
private String rackId;
//CPU核数
private int cpu;
//磁盘总容量
private double disk;
//网卡容量
private double network;
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public String getRackId() {
return rackId;
}
public void setRackId(String rackId) {
this.rackId = rackId;
}
public int getCpu() {
return cpu;
}
public void setCpu(int cpu) {
this.cpu = cpu;
}
public double getDisk() {
return disk;
}
public void setDisk(double disk) {
this.disk = disk;
}
public double getNetwork() {
return network;
}
public void setNetwork(double network) {
this.network = network;
}
@Override
public String toString() {
return "HostEnv{" +
"id=" + id +
", host='" + host + '\'' +
", rackId='" + rackId + '\'' +
", cpu=" + cpu +
", disk=" + disk +
", network=" + network +
'}';
}
}

View File

@@ -0,0 +1,218 @@
package com.xiaojukeji.know.streaming.km.rebalance.executor.common;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.xiaojukeji.know.streaming.km.rebalance.model.Broker;
import com.xiaojukeji.know.streaming.km.rebalance.model.ClusterModel;
import com.xiaojukeji.know.streaming.km.rebalance.model.ReplicaPlacementInfo;
import com.xiaojukeji.know.streaming.km.rebalance.model.Resource;
import com.xiaojukeji.know.streaming.km.rebalance.optimizer.ExecutionProposal;
import com.xiaojukeji.know.streaming.km.rebalance.optimizer.OptimizationOptions;
import com.xiaojukeji.know.streaming.km.rebalance.utils.GoalUtils;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.stream.Collectors;
public class OptimizerResult {
private static final Logger logger = LoggerFactory.getLogger(OptimizerResult.class);
private Set<ExecutionProposal> _proposals;
private final BalanceParameter parameter;
private Set<Broker> _balanceBrokersBefore;
private Set<Broker> _balanceBrokersAfter;
private final ClusterModel clusterModel;
private final Map<TopicPartition, List<BalanceActionHistory>> balanceActionHistory;
private final Map<String, BalanceThreshold> balanceThreshold;
public OptimizerResult(ClusterModel clusterModel, OptimizationOptions optimizationOptions) {
this.clusterModel = clusterModel;
balanceActionHistory = clusterModel.balanceActionHistory();
parameter = optimizationOptions.parameter();
double[] clusterAvgResource = clusterModel.avgOfUtilization();
balanceThreshold = GoalUtils.getBalanceThreshold(parameter, clusterAvgResource);
}
/**
* 计划概览
*/
public BalanceOverview resultOverview() {
BalanceOverview overview = new BalanceOverview();
overview.setTopicBlacklist(parameter.getExcludedTopics());
overview.setMoveReplicas(_proposals.size());
overview.setNodeRange(parameter.getBalanceBrokers());
overview.setRemoveNode(parameter.getOfflineBrokers());
Map<Resource, Double> balanceThreshold = new HashMap<>();
balanceThreshold.put(Resource.CPU, parameter.getCpuThreshold());
balanceThreshold.put(Resource.DISK, parameter.getDiskThreshold());
balanceThreshold.put(Resource.NW_IN, parameter.getNetworkInThreshold());
balanceThreshold.put(Resource.NW_OUT, parameter.getNetworkOutThreshold());
overview.setBalanceThreshold(balanceThreshold);
Set<String> moveTopicsSet = _proposals.stream().map(j -> j.tp().topic()).collect(Collectors.toSet());
String moveTopics = String.join(",", moveTopicsSet);
overview.setMoveTopics(moveTopics);
//Leader切换时不需要进行统计
double totalMoveSize = _proposals.stream().filter(i -> Integer.max(i.replicasToAdd().size(), i.replicasToRemove().size()) != 0).mapToDouble(ExecutionProposal::partitionSize).sum();
overview.setTotalMoveSize(totalMoveSize);
return overview;
}
/**
* 计划明细
*/
public Map<Integer, BalanceDetailed> resultDetailed() {
Map<Integer, BalanceDetailed> details = new HashMap<>();
_balanceBrokersBefore.forEach(i -> {
BalanceDetailed balanceDetailed = new BalanceDetailed();
balanceDetailed.setBrokerId(i.id());
balanceDetailed.setHost(i.host());
balanceDetailed.setCurrentCPUUtilization(i.utilizationFor(Resource.CPU));
balanceDetailed.setCurrentDiskUtilization(i.utilizationFor(Resource.DISK));
balanceDetailed.setCurrentNetworkInUtilization(i.utilizationFor(Resource.NW_IN));
balanceDetailed.setCurrentNetworkOutUtilization(i.utilizationFor(Resource.NW_OUT));
details.put(i.id(), balanceDetailed);
});
Map<Integer, Double> totalAddReplicaCount = new HashMap<>();
Map<Integer, Double> totalAddDataSize = new HashMap<>();
Map<Integer, Double> totalRemoveReplicaCount = new HashMap<>();
Map<Integer, Double> totalRemoveDataSize = new HashMap<>();
_proposals.forEach(i -> {
i.replicasToAdd().forEach((k, v) -> {
totalAddReplicaCount.merge(k, v[0], Double::sum);
totalAddDataSize.merge(k, v[1], Double::sum);
});
i.replicasToRemove().forEach((k, v) -> {
totalRemoveReplicaCount.merge(k, v[0], Double::sum);
totalRemoveDataSize.merge(k, v[1], Double::sum);
});
});
_balanceBrokersAfter.forEach(i -> {
BalanceDetailed balanceDetailed = details.get(i.id());
balanceDetailed.setLastCPUUtilization(i.utilizationFor(Resource.CPU));
balanceDetailed.setLastDiskUtilization(i.utilizationFor(Resource.DISK));
balanceDetailed.setLastNetworkInUtilization(i.utilizationFor(Resource.NW_IN));
balanceDetailed.setLastNetworkOutUtilization(i.utilizationFor(Resource.NW_OUT));
balanceDetailed.setMoveInReplicas(totalAddReplicaCount.getOrDefault(i.id(), 0.0));
balanceDetailed.setMoveOutReplicas(totalRemoveReplicaCount.getOrDefault(i.id(), 0.0));
balanceDetailed.setMoveInDiskSize(totalAddDataSize.getOrDefault(i.id(), 0.0));
balanceDetailed.setMoveOutDiskSize(totalRemoveDataSize.getOrDefault(i.id(), 0.0));
for (String str : parameter.getGoals()) {
BalanceThreshold threshold = balanceThreshold.get(str);
if (!threshold.isInRange(i.utilizationFor(threshold.resource()))) {
balanceDetailed.setBalanceState(-1);
break;
}
}
});
return details;
}
/**
* 计划任务
*/
public List<BalanceTask> resultTask() {
List<BalanceTask> balanceTasks = new ArrayList<>();
_proposals.forEach(proposal -> {
BalanceTask task = new BalanceTask();
task.setTopic(proposal.tp().topic());
task.setPartition(proposal.tp().partition());
List<Integer> replicas = proposal.newReplicas().stream().map(ReplicaPlacementInfo::brokerId).collect(Collectors.toList());
task.setReplicas(replicas);
balanceTasks.add(task);
});
return balanceTasks;
}
public Map<TopicPartition, List<BalanceActionHistory>> resultBalanceActionHistory() {
return Collections.unmodifiableMap(balanceActionHistory);
}
public String resultJsonOverview() {
try {
return new ObjectMapper().writeValueAsString(resultOverview());
} catch (Exception e) {
logger.error("result overview json process error", e);
}
return "{}";
}
public String resultJsonDetailed() {
try {
return new ObjectMapper().writeValueAsString(resultDetailed());
} catch (Exception e) {
logger.error("result detailed json process error", e);
}
return "{}";
}
public String resultJsonTask() {
try {
Map<String, Object> reassign = new HashMap<>();
reassign.put("partitions", resultTask());
reassign.put("version", 1);
return new ObjectMapper().writeValueAsString(reassign);
} catch (Exception e) {
logger.error("result task json process error", e);
}
return "{}";
}
public List<TopicChangeHistory> resultTopicChangeHistory() {
List<TopicChangeHistory> topicChangeHistoryList = new ArrayList<>();
for (ExecutionProposal proposal : _proposals) {
TopicChangeHistory changeHistory = new TopicChangeHistory();
changeHistory.setTopic(proposal.tp().topic());
changeHistory.setPartition(proposal.tp().partition());
changeHistory.setOldLeader(proposal.oldLeader().brokerId());
changeHistory.setNewLeader(proposal.newReplicas().get(0).brokerId());
List<Integer> balanceBefore = proposal.oldReplicas().stream().map(ReplicaPlacementInfo::brokerId).collect(Collectors.toList());
List<Integer> balanceAfter = proposal.newReplicas().stream().map(ReplicaPlacementInfo::brokerId).collect(Collectors.toList());
changeHistory.setBalanceBefore(balanceBefore);
changeHistory.setBalanceAfter(balanceAfter);
topicChangeHistoryList.add(changeHistory);
}
return topicChangeHistoryList;
}
public String resultJsonTopicChangeHistory() {
try {
return new ObjectMapper().writeValueAsString(resultTopicChangeHistory());
} catch (Exception e) {
logger.error("result balance topic change history json process error", e);
}
return "{}";
}
public String resultJsonBalanceActionHistory() {
try {
return new ObjectMapper().writeValueAsString(balanceActionHistory);
} catch (Exception e) {
logger.error("result balance action history json process error", e);
}
return "{}";
}
public void setBalanceBrokersFormBefore(Set<Broker> balanceBrokersBefore) {
_balanceBrokersBefore = new HashSet<>();
balanceBrokersBefore.forEach(i -> {
Broker broker = new Broker(i.rack(), i.id(), i.host(), false, i.capacity());
broker.load().addLoad(i.load());
_balanceBrokersBefore.add(broker);
});
}
public void setBalanceBrokersFormAfter(Set<Broker> balanceBrokersAfter) {
_balanceBrokersAfter = balanceBrokersAfter;
}
public void setExecutionProposal(Set<ExecutionProposal> proposals) {
_proposals = proposals;
}
// test
public ClusterModel clusterModel() {
return clusterModel;
}
}

View File

@@ -0,0 +1,78 @@
package com.xiaojukeji.know.streaming.km.rebalance.executor.common;
import java.util.List;
public class TopicChangeHistory {
//均衡Topic
private String topic;
//均衡分区
private int partition;
//旧Leader的BrokerID
private int oldLeader;
//均衡前副本分布
private List<Integer> balanceBefore;
//新Leader的BrokerID
private int newLeader;
//均衡后副本分布
private List<Integer> balanceAfter;
public String getTopic() {
return topic;
}
public void setTopic(String topic) {
this.topic = topic;
}
public int getPartition() {
return partition;
}
public void setPartition(int partition) {
this.partition = partition;
}
public int getOldLeader() {
return oldLeader;
}
public void setOldLeader(int oldLeader) {
this.oldLeader = oldLeader;
}
public List<Integer> getBalanceBefore() {
return balanceBefore;
}
public void setBalanceBefore(List<Integer> balanceBefore) {
this.balanceBefore = balanceBefore;
}
public int getNewLeader() {
return newLeader;
}
public void setNewLeader(int newLeader) {
this.newLeader = newLeader;
}
public List<Integer> getBalanceAfter() {
return balanceAfter;
}
public void setBalanceAfter(List<Integer> balanceAfter) {
this.balanceAfter = balanceAfter;
}
@Override
public String toString() {
return "TopicChangeHistory{" +
"topic='" + topic + '\'' +
", partition='" + partition + '\'' +
", oldLeader=" + oldLeader +
", balanceBefore=" + balanceBefore +
", newLeader=" + newLeader +
", balanceAfter=" + balanceAfter +
'}';
}
}

View File

@@ -0,0 +1,51 @@
package com.xiaojukeji.know.streaming.km.rebalance.metric;
/**
* @author leewei
* @date 2022/5/12
*/
public class Metric {
private String topic;
private int partition;
private double cpu;
private double bytesIn;
private double bytesOut;
private double disk;
public Metric() {
}
public Metric(String topic, int partition, double cpu, double bytesIn, double bytesOut, double disk) {
this.topic = topic;
this.partition = partition;
this.cpu = cpu;
this.bytesIn = bytesIn;
this.bytesOut = bytesOut;
this.disk = disk;
}
public String topic() {
return topic;
}
public int partition() {
return partition;
}
public double cpu() {
return cpu;
}
public double bytesIn() {
return bytesIn;
}
public double bytesOut() {
return bytesOut;
}
public double disk() {
return disk;
}
}

View File

@@ -0,0 +1,9 @@
package com.xiaojukeji.know.streaming.km.rebalance.metric;
/**
* @author leewei
* @date 2022/4/29
*/
public interface MetricStore {
Metrics getMetrics(String clusterName, int beforeSeconds);
}

View File

@@ -0,0 +1,46 @@
package com.xiaojukeji.know.streaming.km.rebalance.metric;
import com.xiaojukeji.know.streaming.km.rebalance.model.Load;
import com.xiaojukeji.know.streaming.km.rebalance.model.Resource;
import org.apache.kafka.common.TopicPartition;
import java.util.*;
/**
* @author leewei
* @date 2022/4/29
*/
public class Metrics {
private final Map<TopicPartition, Metric> metricByTopicPartition;
public Metrics() {
this.metricByTopicPartition = new HashMap<>();
}
public void addMetrics(Metric metric) {
TopicPartition topicPartition = new TopicPartition(metric.topic(), metric.partition());
this.metricByTopicPartition.put(topicPartition, metric);
}
public List<Metric> values() {
return Collections.unmodifiableList(new ArrayList<>(this.metricByTopicPartition.values()));
}
public Metric metric(TopicPartition topicPartition) {
return this.metricByTopicPartition.get(topicPartition);
}
public Load load(TopicPartition topicPartition) {
Metric metric = this.metricByTopicPartition.get(topicPartition);
if (metric == null) {
return null;
}
Load load = new Load();
load.setLoad(Resource.CPU, metric.cpu());
load.setLoad(Resource.NW_IN, metric.bytesIn());
load.setLoad(Resource.NW_OUT, metric.bytesOut());
load.setLoad(Resource.DISK, metric.disk());
return load;
}
}

View File

@@ -0,0 +1,109 @@
package com.xiaojukeji.know.streaming.km.rebalance.metric.elasticsearch;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.xiaojukeji.know.streaming.km.rebalance.metric.Metric;
import com.xiaojukeji.know.streaming.km.rebalance.metric.MetricStore;
import com.xiaojukeji.know.streaming.km.rebalance.metric.Metrics;
import org.apache.commons.io.IOUtils;
import org.apache.http.HttpHost;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Set;
import java.util.TreeSet;
/**
* @author leewei
* @date 2022/4/29
*/
public class ElasticsearchMetricStore implements MetricStore {
private final Logger logger = LoggerFactory.getLogger(ElasticsearchMetricStore.class);
private final ObjectMapper objectMapper = new ObjectMapper();
private final String hosts;
private final String indexPrefix;
private final String format;
public ElasticsearchMetricStore(String hosts, String indexPrefix) {
this(hosts, indexPrefix, "yyyy-MM-dd");
}
public ElasticsearchMetricStore(String hosts, String indexPrefix, String format) {
this.hosts = hosts;
this.indexPrefix = indexPrefix;
this.format = format;
}
@Override
public Metrics getMetrics(String clusterName, int beforeSeconds) {
Metrics metrics = new Metrics();
try {
String metricsQueryJson = IOUtils.resourceToString("/MetricsQuery.json", StandardCharsets.UTF_8);
metricsQueryJson = metricsQueryJson.replaceAll("<var_before_time>", Integer.toString(beforeSeconds))
.replaceAll("<var_cluster_name>", clusterName);
try (RestClient restClient = RestClient.builder(toHttpHosts(this.hosts)).build()) {
Request request = new Request(
"GET",
"/" + indices(beforeSeconds) + "/_search");
request.setJsonEntity(metricsQueryJson);
logger.debug("Es metrics query for cluster: {} request: {} dsl: {}", clusterName, request, metricsQueryJson);
Response response = restClient.performRequest(request);
if (response.getStatusLine().getStatusCode() == 200) {
JsonNode rootNode = objectMapper.readTree(response.getEntity().getContent());
JsonNode topics = rootNode.at("/aggregations/by_topic/buckets");
for (JsonNode topic : topics) {
String topicName = topic.path("key").asText();
JsonNode partitions = topic.at("/by_partition/buckets");
for (JsonNode partition : partitions) {
int partitionId = partition.path("key").asInt();
// double cpu = partition.at("/avg_cpu/value").asDouble();
double cpu = 0D;
double bytesIn = partition.at("/avg_bytes_in/value").asDouble();
double bytesOut = partition.at("/avg_bytes_out/value").asDouble();
double disk = partition.at("/lastest_disk/hits/hits/0/_source/metrics/LogSize").asDouble();
// add
metrics.addMetrics(new Metric(topicName, partitionId, cpu, bytesIn, bytesOut, disk));
}
}
}
}
} catch (IOException e) {
throw new IllegalArgumentException("Cannot get metrics of cluster: " + clusterName, e);
}
logger.debug("Es metrics query for cluster: {} result count: {}", clusterName, metrics.values().size());
return metrics;
}
private String indices(long beforeSeconds) {
Set<String> indices = new TreeSet<>();
DateFormat df = new SimpleDateFormat(this.format);
long endTime = System.currentTimeMillis();
long time = endTime - (beforeSeconds * 1000);
while (time < endTime) {
indices.add(this.indexPrefix + df.format(new Date(time)));
time += 24 * 60 * 60 * 1000; // add 24h
}
indices.add(this.indexPrefix + df.format(new Date(endTime)));
return String.join(",", indices);
}
private static HttpHost[] toHttpHosts(String url) {
String[] nodes = url.split(",");
HttpHost[] hosts = new HttpHost[nodes.length];
for (int i = 0; i < nodes.length; i++) {
String [] ipAndPort = nodes[i].split(":");
hosts[i] = new HttpHost(ipAndPort[0], ipAndPort.length > 1 ? Integer.parseInt(ipAndPort[1]) : 9200);
}
return hosts;
}
}

View File

@@ -0,0 +1,222 @@
package com.xiaojukeji.know.streaming.km.rebalance.model;
import org.apache.kafka.common.TopicPartition;
import java.util.*;
import java.util.function.Predicate;
import java.util.stream.Collectors;
/**
* @author leewei
* @date 2022/4/29
*/
public class Broker implements Comparable<Broker> {
public static final Broker NONE = new Broker(new Rack("-1"), -1, "localhost", true, new Capacity());
private final Rack rack;
private final int id;
private final String host;
private final boolean isOffline;
private final Set<Replica> replicas;
private final Set<Replica> leaderReplicas;
private final Map<String, Map<Integer, Replica>> topicReplicas;
private final Load load;
private final Capacity capacity;
public Broker(Rack rack, int id, String host, boolean isOffline, Capacity capacity) {
this.rack = rack;
this.id = id;
this.host = host;
this.isOffline = isOffline;
this.replicas = new HashSet<>();
this.leaderReplicas = new HashSet<>();
this.topicReplicas = new HashMap<>();
this.load = new Load();
this.capacity = capacity;
}
public Rack rack() {
return rack;
}
public int id() {
return id;
}
public String host() {
return host;
}
public boolean isOffline() {
return isOffline;
}
public Set<Replica> replicas() {
return Collections.unmodifiableSet(this.replicas);
}
public SortedSet<Replica> sortedReplicasFor(Resource resource, boolean reverse) {
return sortedReplicasFor(null, resource, reverse);
}
public SortedSet<Replica> sortedReplicasFor(Predicate<? super Replica> filter, Resource resource, boolean reverse) {
Comparator<Replica> comparator =
Comparator.<Replica>comparingDouble(r -> r.load().loadFor(resource))
.thenComparingInt(Replica::hashCode);
if (reverse)
comparator = comparator.reversed();
SortedSet<Replica> sortedReplicas = new TreeSet<>(comparator);
if (filter == null) {
sortedReplicas.addAll(this.replicas);
} else {
sortedReplicas.addAll(this.replicas.stream()
.filter(filter).collect(Collectors.toList()));
}
return sortedReplicas;
}
public Set<Replica> leaderReplicas() {
return Collections.unmodifiableSet(this.leaderReplicas);
}
public Load load() {
return load;
}
public Capacity capacity() {
return capacity;
}
public double utilizationFor(Resource resource) {
return this.load.loadFor(resource) / this.capacity.capacityFor(resource);
}
public double expectedUtilizationAfterAdd(Resource resource, Load loadToChange) {
return (this.load.loadFor(resource) + ((loadToChange == null) ? 0 : loadToChange.loadFor(resource)))
/ this.capacity.capacityFor(resource);
}
public double expectedUtilizationAfterRemove(Resource resource, Load loadToChange) {
return (this.load.loadFor(resource) - ((loadToChange == null) ? 0 : loadToChange.loadFor(resource)))
/ this.capacity.capacityFor(resource);
}
public Replica replica(TopicPartition topicPartition) {
Map<Integer, Replica> replicas = this.topicReplicas.get(topicPartition.topic());
if (replicas == null) {
return null;
}
return replicas.get(topicPartition.partition());
}
void addReplica(Replica replica) {
// Add replica to list of all replicas in the broker.
if (this.replicas.contains(replica)) {
throw new IllegalStateException(String.format("Broker %d already has replica %s", this.id,
replica.topicPartition()));
}
this.replicas.add(replica);
// Add topic replica.
this.topicReplicas.computeIfAbsent(replica.topicPartition().topic(), t -> new HashMap<>())
.put(replica.topicPartition().partition(), replica);
// Add leader replica.
if (replica.isLeader()) {
this.leaderReplicas.add(replica);
}
// Add replica load to the broker load.
this.load.addLoad(replica.load());
}
Replica removeReplica(TopicPartition topicPartition) {
Replica replica = replica(topicPartition);
if (replica != null) {
this.replicas.remove(replica);
Map<Integer, Replica> replicas = this.topicReplicas.get(topicPartition.topic());
if (replicas != null) {
replicas.remove(topicPartition.partition());
}
if (replica.isLeader()) {
this.leaderReplicas.remove(replica);
}
this.load.subtractLoad(replica.load());
}
return replica;
}
Load makeFollower(TopicPartition topicPartition) {
Replica replica = replica(topicPartition);
Load leaderLoadDelta = replica.makeFollower();
// Remove leadership load from load.
this.load.subtractLoad(leaderLoadDelta);
this.leaderReplicas.remove(replica);
return leaderLoadDelta;
}
void makeLeader(TopicPartition topicPartition, Load leaderLoadDelta) {
Replica replica = replica(topicPartition);
replica.makeLeader(leaderLoadDelta);
// Add leadership load to load.
this.load.addLoad(leaderLoadDelta);
this.leaderReplicas.add(replica);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Broker broker = (Broker) o;
return id == broker.id;
}
@Override
public int hashCode() {
return Objects.hash(id);
}
@Override
public int compareTo(Broker o) {
return Integer.compare(id, o.id());
}
@Override
public String toString() {
return "Broker{" +
"id=" + id +
", host='" + host + '\'' +
", rack=" + rack.id() +
", replicas=" + replicas +
", leaderReplicas=" + leaderReplicas +
", topicReplicas=" + topicReplicas +
", load=" + load +
", capacity=" + capacity +
'}';
}
public int numLeadersFor(String topicName) {
return (int) replicasOfTopicInBroker(topicName).stream().filter(Replica::isLeader).count();
}
public Set<String> topics() {
return topicReplicas.keySet();
}
public int numReplicasOfTopicInBroker(String topic) {
Map<Integer, Replica> replicaMap = topicReplicas.get(topic);
return replicaMap == null ? 0 : replicaMap.size();
}
public Collection<Replica> replicasOfTopicInBroker(String topic) {
Map<Integer, Replica> replicaMap = topicReplicas.get(topic);
return replicaMap == null ? Collections.emptySet() : replicaMap.values();
}
public Set<Replica> currentOfflineReplicas() {
return replicas.stream().filter(Replica::isCurrentOffline).collect(Collectors.toSet());
}
}

View File

@@ -0,0 +1,36 @@
package com.xiaojukeji.know.streaming.km.rebalance.model;
import java.util.Arrays;
/**
* @author leewei
* @date 2022/5/9
*/
public class Capacity {
private final double[] values;
public Capacity() {
this.values = new double[Resource.values().length];
}
public void setCapacity(Resource resource, double capacity) {
this.values[resource.id()] = capacity;
}
public double capacityFor(Resource resource) {
return this.values[resource.id()];
}
public void addCapacity(Capacity capacityToAdd) {
for (Resource resource : Resource.values()) {
this.setCapacity(resource, this.capacityFor(resource) + capacityToAdd.capacityFor(resource));
}
}
@Override
public String toString() {
return "Capacity{" +
"values=" + Arrays.toString(values) +
'}';
}
}

View File

@@ -0,0 +1,236 @@
package com.xiaojukeji.know.streaming.km.rebalance.model;
import com.xiaojukeji.know.streaming.km.rebalance.executor.common.BalanceActionHistory;
import org.apache.kafka.common.TopicPartition;
import java.util.*;
import java.util.function.Predicate;
import java.util.stream.Collectors;
/**
* @author leewei
* @date 2022/4/29
*/
public class ClusterModel {
private final Map<String, Rack> racksById;
private final Map<Integer, Broker> brokersById;
private final Map<String, Map<TopicPartition, Partition>> partitionsByTopic;
private Map<TopicPartition, List<BalanceActionHistory>> balanceActionHistory;
public ClusterModel() {
this.racksById = new HashMap<>();
this.brokersById = new HashMap<>();
this.partitionsByTopic = new HashMap<>();
this.balanceActionHistory = new HashMap<>();
}
public Rack rack(String rackId) {
return this.racksById.get(rackId);
}
public Rack addRack(String rackId) {
Rack rack = new Rack(rackId);
this.racksById.putIfAbsent(rackId, rack);
return this.racksById.get(rackId);
}
public SortedSet<Broker> brokers() {
return new TreeSet<>(this.brokersById.values());
}
public Set<String> topics() {
return this.partitionsByTopic.keySet();
}
public SortedSet<Partition> topic(String name) {
return new TreeSet<>(this.partitionsByTopic.get(name).values());
}
public SortedSet<Broker> sortedBrokersFor(Resource resource, boolean reverse) {
return sortedBrokersFor(null, resource, reverse);
}
public SortedSet<Broker> sortedBrokersFor(Predicate<? super Broker> filter, Resource resource, boolean reverse) {
Comparator<Broker> comparator =
Comparator.<Broker>comparingDouble(b -> b.utilizationFor(resource))
.thenComparingInt(Broker::id);
if (reverse)
comparator = comparator.reversed();
SortedSet<Broker> sortedBrokers = new TreeSet<>(comparator);
if (filter == null) {
sortedBrokers.addAll(this.brokersById.values());
} else {
sortedBrokers.addAll(this.brokersById.values().stream()
.filter(filter).collect(Collectors.toList()));
}
return sortedBrokers;
}
public Load load() {
Load load = new Load();
for (Broker broker : this.brokersById.values()) {
load.addLoad(broker.load());
}
return load;
}
public Capacity capacity() {
Capacity capacity = new Capacity();
for (Broker broker : this.brokersById.values()) {
capacity.addCapacity(broker.capacity());
}
return capacity;
}
public double utilizationFor(Resource resource) {
return load().loadFor(resource) / capacity().capacityFor(resource);
}
public double[] avgOfUtilization() {
Load load = load();
Capacity capacity = capacity();
double[] unils = new double[Resource.values().length];
for (Resource resource : Resource.values()) {
unils[resource.id()] = load.loadFor(resource) / capacity.capacityFor(resource);
}
return unils;
}
public Broker broker(int brokerId) {
return this.brokersById.get(brokerId);
}
public Broker addBroker(String rackId, int brokerId, String host, boolean isOffline, Capacity capacity) {
Rack rack = rack(rackId);
if (rack == null)
throw new IllegalArgumentException("Rack: " + rackId + "is not exists.");
Broker broker = new Broker(rack, brokerId, host, isOffline, capacity);
rack.addBroker(broker);
this.brokersById.put(brokerId, broker);
return broker;
}
public Replica addReplica(int brokerId, TopicPartition topicPartition, boolean isLeader, Load load) {
return addReplica(brokerId, topicPartition, isLeader, false, load);
}
public Replica addReplica(int brokerId, TopicPartition topicPartition, boolean isLeader, boolean isOffline, Load load) {
Broker broker = broker(brokerId);
if (broker == null) {
throw new IllegalArgumentException("Broker: " + brokerId + "is not exists.");
}
Replica replica = new Replica(broker, topicPartition, isLeader, isOffline);
replica.setLoad(load);
// add to broker
broker.addReplica(replica);
Map<TopicPartition, Partition> partitions = this.partitionsByTopic
.computeIfAbsent(topicPartition.topic(), k -> new HashMap<>());
Partition partition = partitions.computeIfAbsent(topicPartition, Partition::new);
if (isLeader) {
partition.addLeader(replica, 0);
} else {
partition.addFollower(replica, partition.replicas().size());
}
return replica;
}
public Replica removeReplica(int brokerId, TopicPartition topicPartition) {
Broker broker = broker(brokerId);
return broker.removeReplica(topicPartition);
}
public void relocateLeadership(String goal, String actionType, TopicPartition topicPartition, int sourceBrokerId, int destinationBrokerId) {
relocateLeadership(topicPartition, sourceBrokerId, destinationBrokerId);
addBalanceActionHistory(goal, actionType, topicPartition, sourceBrokerId, destinationBrokerId);
}
public void relocateLeadership(TopicPartition topicPartition, int sourceBrokerId, int destinationBrokerId) {
Broker sourceBroker = broker(sourceBrokerId);
Replica sourceReplica = sourceBroker.replica(topicPartition);
if (!sourceReplica.isLeader()) {
throw new IllegalArgumentException("Cannot relocate leadership of partition " + topicPartition + "from broker "
+ sourceBrokerId + " to broker " + destinationBrokerId
+ " because the source replica isn't leader.");
}
Broker destinationBroker = broker(destinationBrokerId);
Replica destinationReplica = destinationBroker.replica(topicPartition);
if (destinationReplica.isLeader()) {
throw new IllegalArgumentException("Cannot relocate leadership of partition " + topicPartition + "from broker "
+ sourceBrokerId + " to broker " + destinationBrokerId
+ " because the destination replica is a leader.");
}
Load leaderLoadDelta = sourceBroker.makeFollower(topicPartition);
destinationBroker.makeLeader(topicPartition, leaderLoadDelta);
Partition partition = this.partitionsByTopic.get(topicPartition.topic()).get(topicPartition);
partition.relocateLeadership(destinationReplica);
}
public void relocateReplica(String goal, String actionType, TopicPartition topicPartition, int sourceBrokerId, int destinationBrokerId) {
relocateReplica(topicPartition, sourceBrokerId, destinationBrokerId);
addBalanceActionHistory(goal, actionType, topicPartition, sourceBrokerId, destinationBrokerId);
}
public void relocateReplica(TopicPartition topicPartition, int sourceBrokerId, int destinationBrokerId) {
Replica replica = removeReplica(sourceBrokerId, topicPartition);
if (replica == null) {
throw new IllegalArgumentException("Replica is not in the cluster.");
}
Broker destinationBroker = broker(destinationBrokerId);
replica.setBroker(destinationBroker);
destinationBroker.addReplica(replica);
}
private void addBalanceActionHistory(String goal, String actionType, TopicPartition topicPartition, int sourceBrokerId, int destinationBrokerId) {
BalanceActionHistory history = new BalanceActionHistory();
history.setActionType(actionType);
history.setGoal(goal);
history.setTopic(topicPartition.topic());
history.setPartition(topicPartition.partition());
history.setSourceBrokerId(sourceBrokerId);
history.setDestinationBrokerId(destinationBrokerId);
this.balanceActionHistory.computeIfAbsent(topicPartition, k -> new ArrayList<>()).add(history);
}
public Map<String, Integer> numLeadersPerTopic(Set<String> topics) {
Map<String, Integer> leaderCountByTopicNames = new HashMap<>();
topics.forEach(topic -> leaderCountByTopicNames.put(topic, partitionsByTopic.get(topic).size()));
return leaderCountByTopicNames;
}
public Map<TopicPartition, List<ReplicaPlacementInfo>> getReplicaDistribution() {
Map<TopicPartition, List<ReplicaPlacementInfo>> replicaDistribution = new HashMap<>();
for (Map<TopicPartition, Partition> tp : partitionsByTopic.values()) {
tp.values().forEach(i -> {
i.replicas().forEach(j -> replicaDistribution.computeIfAbsent(j.topicPartition(), k -> new ArrayList<>())
.add(new ReplicaPlacementInfo(j.broker().id(), "")));
});
}
return replicaDistribution;
}
public Replica partition(TopicPartition tp) {
return partitionsByTopic.get(tp.topic()).get(tp).leader();
}
public Map<TopicPartition, ReplicaPlacementInfo> getLeaderDistribution() {
Map<TopicPartition, ReplicaPlacementInfo> leaderDistribution = new HashMap<>();
for (Broker broker : brokersById.values()) {
broker.leaderReplicas().forEach(i -> leaderDistribution.put(i.topicPartition(), new ReplicaPlacementInfo(broker.id(), "")));
}
return leaderDistribution;
}
public int numTopicReplicas(String topic) {
return partitionsByTopic.get(topic).size();
}
public Map<TopicPartition, List<BalanceActionHistory>> balanceActionHistory() {
return this.balanceActionHistory;
}
}

View File

@@ -0,0 +1,42 @@
package com.xiaojukeji.know.streaming.km.rebalance.model;
import java.util.Arrays;
/**
* @author leewei
* @date 2022/5/9
*/
public class Load {
private final double[] values;
public Load() {
this.values = new double[Resource.values().length];
}
public void setLoad(Resource resource, double load) {
this.values[resource.id()] = load;
}
public double loadFor(Resource resource) {
return this.values[resource.id()];
}
public void addLoad(Load loadToAdd) {
for (Resource resource : Resource.values()) {
this.setLoad(resource, this.loadFor(resource) + loadToAdd.loadFor(resource));
}
}
public void subtractLoad(Load loadToSubtract) {
for (Resource resource : Resource.values()) {
this.setLoad(resource, this.loadFor(resource) - loadToSubtract.loadFor(resource));
}
}
@Override
public String toString() {
return "Load{" +
"values=" + Arrays.toString(values) +
'}';
}
}

View File

@@ -0,0 +1,148 @@
package com.xiaojukeji.know.streaming.km.rebalance.model;
import org.apache.kafka.common.TopicPartition;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
/**
* @author leewei
* @date 2022/5/11
*/
public class Partition implements Comparable<Partition> {
private final TopicPartition topicPartition;
private final List<Replica> replicas;
public Partition(TopicPartition topicPartition) {
this.topicPartition = topicPartition;
this.replicas = new ArrayList<>();
}
public TopicPartition topicPartition() {
return topicPartition;
}
public List<Replica> replicas() {
return replicas;
}
public Broker originalLeaderBroker() {
return replicas.stream().filter(r -> r.original().isLeader())
.findFirst().orElseThrow(IllegalStateException::new).broker();
}
public Replica leader() {
return replicas.stream()
.filter(Replica::isLeader)
.findFirst()
.orElseThrow(() ->
new IllegalArgumentException("Not found leader of partition " + topicPartition)
);
}
public Replica leaderOrNull() {
return replicas.stream()
.filter(Replica::isLeader)
.findFirst()
.orElse(null);
}
public List<Replica> followers() {
return replicas.stream()
.filter(r -> !r.isLeader())
.collect(Collectors.toList());
}
Replica replica(long brokerId) {
return replicas.stream()
.filter(r -> r.broker().id() == brokerId)
.findFirst()
.orElseThrow(() ->
new IllegalArgumentException("Requested replica " + brokerId + " is not a replica of partition " + topicPartition)
);
}
public boolean isLeaderChanged() {
// return originalLeaderBroker() != this.leader().broker();
return replicas.stream().anyMatch(Replica::isLeaderChanged);
}
public boolean isChanged() {
return replicas.stream().anyMatch(Replica::isChanged);
}
void addLeader(Replica leader, int index) {
if (leaderOrNull() != null) {
throw new IllegalArgumentException(String.format("Partition %s already has a leader replica %s. Cannot "
+ "add a new leader replica %s", this.topicPartition, leaderOrNull(), leader));
}
if (!leader.isLeader()) {
throw new IllegalArgumentException("Inconsistent leadership information. Trying to set " + leader.broker()
+ " as the leader for partition " + this.topicPartition + " while the replica is not marked "
+ "as a leader.");
}
this.replicas.add(index, leader);
}
void addFollower(Replica follower, int index) {
if (follower.isLeader()) {
throw new IllegalArgumentException("Inconsistent leadership information. Trying to add follower replica "
+ follower + " while it is a leader.");
}
if (!follower.topicPartition().equals(this.topicPartition)) {
throw new IllegalArgumentException("Inconsistent topic partition. Trying to add follower replica " + follower
+ " to partition " + this.topicPartition + ".");
}
this.replicas.add(index, follower);
}
void relocateLeadership(Replica newLeader) {
if (!newLeader.isLeader()) {
throw new IllegalArgumentException("Inconsistent leadership information. Trying to set " + newLeader.broker()
+ " as the leader for partition " + this.topicPartition + " while the replica is not marked "
+ "as a leader.");
}
int leaderPos = this.replicas.indexOf(newLeader);
swapReplicaPositions(0, leaderPos);
}
void swapReplicaPositions(int index1, int index2) {
Replica replica1 = this.replicas.get(index1);
Replica replica2 = this.replicas.get(index2);
this.replicas.set(index2, replica1);
this.replicas.set(index1, replica2);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Partition partition = (Partition) o;
return topicPartition.equals(partition.topicPartition);
}
@Override
public int hashCode() {
return Objects.hash(topicPartition);
}
@Override
public String toString() {
return "Partition{" +
"topicPartition=" + topicPartition +
", replicas=" + replicas +
", originalLeaderBroker=" + originalLeaderBroker().id() +
", leader=" + leaderOrNull() +
'}';
}
@Override
public int compareTo(Partition o) {
return Integer.compare(topicPartition.partition(), o.topicPartition.partition());
}
}

Some files were not shown because too many files have changed in this diff Show More