This commit is contained in:
zengqiao
2020-03-19 17:59:34 +08:00
commit 229140f067
407 changed files with 46207 additions and 0 deletions

50
common/pom.xml Normal file
View File

@@ -0,0 +1,50 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>kafka-manager-common</artifactId>
<version>1.0.0-SNAPSHOT</version>
<packaging>jar</packaging>
<parent>
<artifactId>kafka-manager</artifactId>
<groupId>com.xiaojukeji.kafka</groupId>
<version>1.0.0-SNAPSHOT</version>
</parent>
<properties>
<kafka-manager.revision>1.0.0-SNAPSHOT</kafka-manager.revision>
<maven.test.skip>true</maven.test.skip>
<downloadSources>true</downloadSources>
<java_source_version>1.8</java_source_version>
<java_target_version>1.8</java_target_version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<file_encoding>UTF-8</file_encoding>
</properties>
<dependencies>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<version>1.9.3</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>2.10.0</version>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,21 @@
package com.xiaojukeji.kafka.manager.common.constant;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* @author zengqiao
* @date 20/2/28
*/
public class Constant {
public static final String KAFKA_MANAGER_INNER_ERROR = "kafka-manager inner error";
public final static Map<Integer, List<String>> BROKER_METRICS_TYPE_MBEAN_NAME_MAP = new ConcurrentHashMap<>();
public final static Map<Integer, List<String>> TOPIC_METRICS_TYPE_MBEAN_NAME_MAP = new ConcurrentHashMap<>();
public static final String COLLECTOR_METRICS_LOGGER = "COLLECTOR_METRICS_LOGGER";
public static final String API_METRICS_LOGGER = "API_METRICS_LOGGER";
}

View File

@@ -0,0 +1,23 @@
package com.xiaojukeji.kafka.manager.common.constant;
public class MetricsType {
/**
* Broker流量详情
*/
public static final int BROKER_FLOW_DETAIL = 0;
public static final int BROKER_TO_DB_METRICS = 1; // Broker入DB的Metrics指标
public static final int BROKER_REAL_TIME_METRICS = 2; // Broker入DB的Metrics指标
public static final int BROKER_OVER_VIEW_METRICS = 3; // Broker状态概览的指标
public static final int BROKER_OVER_ALL_METRICS = 4; // Broker状态总揽的指标
public static final int BROKER_ANALYSIS_METRICS = 5; // Broker分析的指标
public static final int BROKER_TOPIC_ANALYSIS_METRICS = 6; // Broker分析的指标
/**
* Topic流量详情
*/
public static final int TOPIC_FLOW_DETAIL = 100;
public static final int TOPIC_FLOW_OVERVIEW = 101;
public static final int TOPIC_METRICS_TO_DB = 102;
}

View File

@@ -0,0 +1,35 @@
package com.xiaojukeji.kafka.manager.common.constant;
/**
* @author limeng
* @date 2017/11/21
*/
public enum OffsetStoreLocation {
ZOOKEEPER("zookeeper"),
BROKER("broker");
private final String location;
OffsetStoreLocation(String location) {
this.location = location;
}
public String getLocation() {
return location;
}
public static OffsetStoreLocation getOffsetStoreLocation(String location) {
if (location == null) {
return null;
}
for (OffsetStoreLocation offsetStoreLocation: OffsetStoreLocation.values()) {
if (offsetStoreLocation.location.equals(location)) {
return offsetStoreLocation;
}
}
return null;
}
}

View File

@@ -0,0 +1,35 @@
package com.xiaojukeji.kafka.manager.common.constant;
public class StatusCode {
/*
* kafka-manager status code: 17000 ~ 17999
*
* 正常 - 0
* 参数错误 - 10000
* 资源未就绪 - 10001
*/
/*
* 已约定的状态码
*/
public static final Integer SUCCESS = 0;
public static final Integer PARAM_ERROR = 10000; //参数错误
public static final Integer RES_UNREADY = 10001; //资源未就绪
public static final Integer MY_SQL_SELECT_ERROR = 17210; // MySQL 查询数据异常
public static final Integer MY_SQL_INSERT_ERROR = 17211; // MySQL 插入数据异常
public static final Integer MY_SQL_DELETE_ERROR = 17212; // MySQL 删除数据异常
public static final Integer MY_SQL_UPDATE_ERROR = 17213; // MySQL 更新数据异常
public static final Integer MY_SQL_REPLACE_ERROR = 17214; // MySQL 替换数据异常
public static final Integer OPERATION_ERROR = 17300; // 请求操作异常
/**
* Topic相关的异常
*/
public static final Integer TOPIC_EXISTED = 17400; //Topic已经存在了
public static final Integer PARTIAL_SUCESS = 17700; //操作部分成功
}

View File

@@ -0,0 +1,71 @@
package com.xiaojukeji.kafka.manager.common.constant.monitor;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.List;
/**
* 条件类型
* @author zengqiao
* @date 19/5/12
*/
public enum MonitorConditionType {
BIGGER(">", "大于"),
EQUAL("=", "等于"),
LESS("<", "小于"),
NOT_EQUAL("!=", "不等于");
private String name;
private String message;
MonitorConditionType(String name, String message) {
this.name = name;
this.message = message;
}
public static boolean legal(String name) {
for (MonitorConditionType elem: MonitorConditionType.values()) {
if (elem.name.equals(name)) {
return true;
}
}
return false;
}
@Override
public String toString() {
return "ConditionType{" +
"name='" + name + '\'' +
", message='" + message + '\'' +
'}';
}
public static List<AbstractMap.SimpleEntry<String, String>> toList() {
List<AbstractMap.SimpleEntry<String, String>> conditionTypeList = new ArrayList<>();
for (MonitorConditionType elem: MonitorConditionType.values()) {
conditionTypeList.add(new AbstractMap.SimpleEntry<>(elem.name, elem.message));
}
return conditionTypeList;
}
/**
* 计算 operation(data1, data2) 是否为true
* @param data1
* @param data2
* @param operation
* @author zengqiao
* @date 19/5/12
* @return boolean
*/
public static boolean matchCondition(Double data1, Double data2, String operation) {
switch (operation) {
case ">": return data1 > data2;
case "<": return data1 < data2;
case "=": return data1.equals(data2);
case "!=": return !data1.equals(data2);
default:
}
return false;
}
}

View File

@@ -0,0 +1,19 @@
package com.xiaojukeji.kafka.manager.common.constant.monitor;
/**
* @author zengqiao
* @date 20/3/18
*/
public enum MonitorMatchStatus {
UNKNOWN(0),
YES(1),
NO(2);
public Integer status;
MonitorMatchStatus(Integer status) {
this.status = status;
}
}

View File

@@ -0,0 +1,59 @@
package com.xiaojukeji.kafka.manager.common.constant.monitor;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.List;
/**
* 指标类型
* @author zengqiao
* @date 19/5/12
*/
public enum MonitorMetricsType {
BYTES_IN("BytesIn", "流入流量"),
BYTES_OUT("BytesOut", "流出流量"),
LAG("Lag", "消费组Lag");
private String name;
private String message;
MonitorMetricsType(String name, String message) {
this.name = name;
this.message = message;
}
public static boolean legal(String name) {
for (MonitorMetricsType elem: MonitorMetricsType.values()) {
if (elem.name.equals(name)) {
return true;
}
}
return false;
}
@Override
public String toString() {
return "MetricType{" +
"name='" + name + '\'' +
", message='" + message + '\'' +
'}';
}
public static List<AbstractMap.SimpleEntry<String, String>> toList() {
List<AbstractMap.SimpleEntry<String, String>> metricTypeList = new ArrayList<>();
for (MonitorMetricsType elem: MonitorMetricsType.values()) {
metricTypeList.add(new AbstractMap.SimpleEntry<>(elem.name, elem.message));
}
return metricTypeList;
}
public String getName() {
return name;
}
public String getMessage() {
return message;
}
}

View File

@@ -0,0 +1,56 @@
package com.xiaojukeji.kafka.manager.common.constant.monitor;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.List;
/**
* 通知类型
* @author huangyiminghappy@163.com
* @date 2019-05-06
*/
public enum MonitorNotifyType {
KAFKA_MESSAGE("KAFKA", "告警发送到KAFKA");
String name;
String message;
MonitorNotifyType(String name, String message){
this.name = name;
this.message = message;
}
public String getName() {
return name;
}
public String getMessage() {
return message;
}
public static boolean legal(String name) {
for (MonitorNotifyType elem: MonitorNotifyType.values()) {
if (elem.name.equals(name)) {
return true;
}
}
return false;
}
@Override
public String toString() {
return "NotifyType{" +
"name='" + name + '\'' +
", message='" + message + '\'' +
'}';
}
public static List<AbstractMap.SimpleEntry<String, String>> toList() {
List<AbstractMap.SimpleEntry<String, String>> notifyTypeList = new ArrayList<>();
for (MonitorNotifyType elem: MonitorNotifyType.values()) {
notifyTypeList.add(new AbstractMap.SimpleEntry<>(elem.name, elem.message));
}
return notifyTypeList;
}
}

View File

@@ -0,0 +1,37 @@
package com.xiaojukeji.kafka.manager.common.entity;
import kafka.admin.AdminClient;
import java.util.*;
/**
* @author zengqiao
* @date 19/5/14
*/
public class ConsumerMetadata {
private Set<String> consumerGroupSet = new HashSet<>();
private Map<String, Set<String>> topicNameConsumerGroupMap = new HashMap<>();
private Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap = new HashMap<>();
public ConsumerMetadata(Set<String> consumerGroupSet,
Map<String, Set<String>> topicNameConsumerGroupMap,
Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap) {
this.consumerGroupSet = consumerGroupSet;
this.topicNameConsumerGroupMap = topicNameConsumerGroupMap;
this.consumerGroupSummaryMap = consumerGroupSummaryMap;
}
public Set<String> getConsumerGroupSet() {
return consumerGroupSet;
}
public Map<String, Set<String>> getTopicNameConsumerGroupMap() {
return topicNameConsumerGroupMap;
}
public Map<String, AdminClient.ConsumerGroupSummary> getConsumerGroupSummaryMap() {
return consumerGroupSummaryMap;
}
}

View File

@@ -0,0 +1,69 @@
package com.xiaojukeji.kafka.manager.common.entity;
/**
* ConsumerMetrics
* @author tukun
* @date 2015/11/12
*/
public class ConsumerMetrics {
private Long clusterId;
private String topicName;
private String consumerGroup;
private String location;
private Long sumLag;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
public String getLocation() {
return location;
}
public void setLocation(String location) {
this.location = location;
}
public Long getSumLag() {
return sumLag;
}
public void setSumLag(Long sumLag) {
this.sumLag = sumLag;
}
@Override
public String toString() {
return "ConsumerMetrics{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", consumerGroup='" + consumerGroup + '\'' +
", location='" + location + '\'' +
", sumLag=" + sumLag +
'}';
}
}

View File

@@ -0,0 +1,76 @@
package com.xiaojukeji.kafka.manager.common.entity;
import com.alibaba.fastjson.JSON;
import com.xiaojukeji.kafka.manager.common.constant.StatusCode;
import java.io.Serializable;
/**
* @author huangyiminghappy@163.com
* @date 2019-07-08
*/
public class Result<T> implements Serializable {
private static final long serialVersionUID = -2772975319944108658L;
private T data;
private String message;
private Integer code;
public Result(T data) {
this.data = data;
this.code = StatusCode.SUCCESS;
this.message = "成功";
}
public Result() {
this(null);
}
public Result(Integer code, String message) {
this.message = message;
this.code = code;
}
public Result(Integer code, T data, String message) {
this.data = data;
this.message = message;
this.code = code;
}
public T getData()
{
return (T)this.data;
}
public void setData(T data)
{
this.data = data;
}
public String getMessage()
{
return this.message;
}
public void setMessage(String message)
{
this.message = message;
}
public Integer getCode()
{
return this.code;
}
public void setCode(Integer code)
{
this.code = code;
}
@Override
public String toString()
{
return JSON.toJSONString(this);
}
}

View File

@@ -0,0 +1,24 @@
package com.xiaojukeji.kafka.manager.common.entity.annotations;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
/**
* FieldSelector
* @author huangyiminghappy@163.com
* @date 2019-06-19
*/
@Target(ElementType.FIELD)
@Retention(RUNTIME)
@Documented
public @interface FieldSelector {
//注解的属性
String name() default "";
int[] types() default {};
}

View File

@@ -0,0 +1,35 @@
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
/**
* 用户角色
* @author zengqiao_cn@163.com
* @date 19/4/15
*/
public enum AccountRoleEnum {
UNKNOWN(-1),
NORMAL(0),
SRE(1),
ADMIN(2);
private Integer role;
AccountRoleEnum(Integer role) {
this.role = role;
}
public Integer getRole() {
return role;
}
public static AccountRoleEnum getUserRoleEnum(Integer role) {
for (AccountRoleEnum elem: AccountRoleEnum.values()) {
if (elem.getRole().equals(role)) {
return elem;
}
}
return null;
}
}

View File

@@ -0,0 +1,38 @@
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
/**
* 操作Topic的状态
* @author zengqiao
* @date 19/11/26
*/
public enum AdminTopicStatusEnum {
SUCCESS(0, "成功"),
REPLACE_DB_FAILED(1, "更新DB失败"),
PARAM_NULL_POINTER(2, "参数错误"),
PARTITION_NUM_ILLEGAL(3, "分区数错误"),
BROKER_NUM_NOT_ENOUGH(4, "Broker数不足错误"),
TOPIC_NAME_ILLEGAL(5, "Topic名称非法"),
TOPIC_EXISTED(6, "Topic已存在"),
UNKNOWN_TOPIC_PARTITION(7, "Topic未知"),
TOPIC_CONFIG_ILLEGAL(8, "Topic配置错误"),
TOPIC_IN_DELETING(9, "Topic正在删除"),
UNKNOWN_ERROR(10, "未知错误");
private Integer code;
private String message;
AdminTopicStatusEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
public Integer getCode() {
return code;
}
public String getMessage() {
return message;
}
}

View File

@@ -0,0 +1,42 @@
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
/**
* DBStatus状态含义
* @author zengqiao_cn@163.com
* @date 19/4/15
*/
public enum DBStatusEnum {
/**
* 逻辑删除
*/
DELETED(-1),
/**
* 普通
*/
NORMAL(0),
/**
* 已完成并通过
*/
PASSED(1);
private Integer status;
DBStatusEnum(Integer status) {
this.status = status;
}
public Integer getStatus() {
return status;
}
public static DBStatusEnum getDBStatusEnum(Integer status) {
for (DBStatusEnum elem: DBStatusEnum.values()) {
if (elem.getStatus().equals(status)) {
return elem;
}
}
return null;
}
}

View File

@@ -0,0 +1,19 @@
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
/**
* 操作类型
* @author zengqiao
* @date 19/11/21
*/
public enum OperationEnum {
CREATE_TOPIC("create_topic"),
DELETE_TOPIC("delete_topic"),
MODIFY_TOPIC_CONFIG("modify_topic_config"),
EXPAND_TOPIC_PARTITION("expand_topic_partition");
public String message;
OperationEnum(String message) {
this.message = message;
}
}

View File

@@ -0,0 +1,28 @@
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
public enum OrderStatusEnum {
WAIT_DEAL(0, "待处理"),
PASSED(1, "通过"),
REFUSED(2, "拒绝"),
CANCELLED(3, "取消");
private Integer code;
private String message;
OrderStatusEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
public Integer getCode() {
return code;
}
public String getMessage() {
return message;
}
}

View File

@@ -0,0 +1,33 @@
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
/**
* 工单类型
* @author zengqiao
* @date 19/6/23
*/
public enum OrderTypeEnum {
UNKNOWN(-1),
APPLY_TOPIC(0),
APPLY_PARTITION(1);
private Integer code;
OrderTypeEnum(Integer code) {
this.code = code;
}
public Integer getCode() {
return code;
}
public static OrderTypeEnum getOrderTypeEnum(Integer code) {
for (OrderTypeEnum elem: OrderTypeEnum.values()) {
if (elem.getCode().equals(code)) {
return elem;
}
}
return OrderTypeEnum.UNKNOWN;
}
}

View File

@@ -0,0 +1,31 @@
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
/**
* 优先副本选举状态
* @author zengqiao
* @date 2017/6/29.
*/
public enum PreferredReplicaElectEnum {
SUCCESS(0, "成功[创建成功|执行成功]"),
RUNNING(1, "正在执行"),
ALREADY_EXIST(2, "任务已存在"),
PARAM_ILLEGAL(3, "参数错误"),
UNKNOWN(4, "进度未知");
private Integer code;
private String message;
PreferredReplicaElectEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
public Integer getCode() {
return code;
}
public String getMessage() {
return message;
}
}

View File

@@ -0,0 +1,45 @@
package com.xiaojukeji.kafka.manager.common.entity.bizenum;
/**
* 迁移状态
* @author zengqiao
* @date 19/12/29
*/
public enum ReassignmentStatusEnum {
WAITING(0, "等待执行"),
RUNNING(1, "正在执行"),
SUCCESS(2, "迁移成功"),
FAILED(3, "迁移失败"),
CANCELED(4, "取消任务");
private Integer code;
private String message;
ReassignmentStatusEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
public Integer getCode() {
return code;
}
public String getMessage() {
return message;
}
public static boolean triggerTask(Integer status) {
if (WAITING.code.equals(status) || RUNNING.code.equals(status)) {
return true;
}
return false;
}
public static boolean cancelTask(Integer status) {
if (WAITING.code.equals(status)) {
return true;
}
return false;
}
}

View File

@@ -0,0 +1,91 @@
package com.xiaojukeji.kafka.manager.common.entity.dto;
/**
* Broker基本信息
* @author zengqiao_cn@163.com
* @date 19/4/8
*/
public class BrokerBasicDTO {
private String host;
private Integer port;
private Integer jmxPort;
private Integer topicNum;
private Integer partitionCount;
private Long startTime;
private Integer leaderCount;
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Integer getPort() {
return port;
}
public void setPort(Integer port) {
this.port = port;
}
public Integer getJmxPort() {
return jmxPort;
}
public void setJmxPort(Integer jmxPort) {
this.jmxPort = jmxPort;
}
public Integer getTopicNum() {
return topicNum;
}
public void setTopicNum(Integer topicNum) {
this.topicNum = topicNum;
}
public Integer getPartitionCount() {
return partitionCount;
}
public void setPartitionCount(Integer partitionCount) {
this.partitionCount = partitionCount;
}
public Long getStartTime() {
return startTime;
}
public void setStartTime(Long startTime) {
this.startTime = startTime;
}
public Integer getLeaderCount() {
return leaderCount;
}
public void setLeaderCount(Integer leaderCount) {
this.leaderCount = leaderCount;
}
@Override
public String toString() {
return "BrokerBasicInfoDTO{" +
"host='" + host + '\'' +
", port=" + port +
", jmxPort=" + jmxPort +
", topicNum=" + topicNum +
", partitionCount=" + partitionCount +
", startTime=" + startTime +
", leaderCount=" + leaderCount +
'}';
}
}

View File

@@ -0,0 +1,132 @@
package com.xiaojukeji.kafka.manager.common.entity.dto;
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.BrokerMetadata;
/**
* @author zengqiao
* @date 19/4/21
*/
public class BrokerOverallDTO {
private Integer brokerId;
private String host;
private Integer port;
private Integer jmxPort;
private Long startTime;
private Integer partitionCount;
private Integer underReplicatedPartitions;
private Integer leaderCount;
private Double bytesInPerSec;
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Integer getPort() {
return port;
}
public void setPort(Integer port) {
this.port = port;
}
public Integer getJmxPort() {
return jmxPort;
}
public void setJmxPort(Integer jmxPort) {
this.jmxPort = jmxPort;
}
public Long getStartTime() {
return startTime;
}
public void setStartTime(Long startTime) {
this.startTime = startTime;
}
public Integer getPartitionCount() {
return partitionCount;
}
public void setPartitionCount(Integer partitionCount) {
this.partitionCount = partitionCount;
}
public Integer getUnderReplicatedPartitions() {
return underReplicatedPartitions;
}
public void setUnderReplicatedPartitions(Integer underReplicatedPartitions) {
this.underReplicatedPartitions = underReplicatedPartitions;
}
public Integer getLeaderCount() {
return leaderCount;
}
public void setLeaderCount(Integer leaderCount) {
this.leaderCount = leaderCount;
}
public Double getBytesInPerSec() {
return bytesInPerSec;
}
public void setBytesInPerSec(Double bytesInPerSec) {
this.bytesInPerSec = bytesInPerSec;
}
@Override
public String toString() {
return "BrokerOverallDTO{" +
"brokerId=" + brokerId +
", host='" + host + '\'' +
", port=" + port +
", jmxPort=" + jmxPort +
", startTime=" + startTime +
", partitionCount=" + partitionCount +
", underReplicatedPartitions=" + underReplicatedPartitions +
", leaderCount=" + leaderCount +
", bytesInPerSec=" + bytesInPerSec +
'}';
}
public static BrokerOverallDTO newInstance(BrokerMetadata brokerMetadata, BrokerMetrics brokerMetrics) {
BrokerOverallDTO brokerOverallDTO = new BrokerOverallDTO();
brokerOverallDTO.setBrokerId(brokerMetadata.getBrokerId());
brokerOverallDTO.setHost(brokerMetadata.getHost());
brokerOverallDTO.setPort(brokerMetadata.getPort());
brokerOverallDTO.setJmxPort(brokerMetadata.getJmxPort());
brokerOverallDTO.setStartTime(brokerMetadata.getTimestamp());
if (brokerMetrics == null) {
return brokerOverallDTO;
}
brokerOverallDTO.setPartitionCount(brokerMetrics.getPartitionCount());
brokerOverallDTO.setLeaderCount(brokerMetrics.getLeaderCount());
brokerOverallDTO.setBytesInPerSec(brokerMetrics.getBytesInPerSec());
brokerOverallDTO.setUnderReplicatedPartitions(brokerMetrics.getUnderReplicatedPartitions());
return brokerOverallDTO;
}
}

View File

@@ -0,0 +1,121 @@
package com.xiaojukeji.kafka.manager.common.entity.dto;
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.BrokerMetadata;
import com.xiaojukeji.kafka.manager.common.entity.bizenum.DBStatusEnum;
/**
* @author zengqiao_cn@163.com
* @date 19/4/21
*/
public class BrokerOverviewDTO {
private Integer brokerId;
private String host;
private Integer port;
private Integer jmxPort;
private Long startTime;
private Double byteIn;
private Double byteOut;
private Integer status;
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Integer getPort() {
return port;
}
public void setPort(Integer port) {
this.port = port;
}
public Integer getJmxPort() {
return jmxPort;
}
public void setJmxPort(Integer jmxPort) {
this.jmxPort = jmxPort;
}
public Long getStartTime() {
return startTime;
}
public void setStartTime(Long startTime) {
this.startTime = startTime;
}
public Double getByteIn() {
return byteIn;
}
public void setByteIn(Double byteIn) {
this.byteIn = byteIn;
}
public Double getByteOut() {
return byteOut;
}
public void setByteOut(Double byteOut) {
this.byteOut = byteOut;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
@Override
public String toString() {
return "BrokerInfoDTO{" +
"brokerId=" + brokerId +
", host='" + host + '\'' +
", port=" + port +
", jmxPort=" + jmxPort +
", startTime=" + startTime +
", byteIn=" + byteIn +
", byteOut=" + byteOut +
", status=" + status +
'}';
}
public static BrokerOverviewDTO newInstance(BrokerMetadata brokerMetadata, BrokerMetrics brokerMetrics) {
BrokerOverviewDTO brokerOverviewDTO = new BrokerOverviewDTO();
brokerOverviewDTO.setBrokerId(brokerMetadata.getBrokerId());
brokerOverviewDTO.setHost(brokerMetadata.getHost());
brokerOverviewDTO.setPort(brokerMetadata.getPort());
brokerOverviewDTO.setJmxPort(brokerMetadata.getJmxPort());
brokerOverviewDTO.setStartTime(brokerMetadata.getTimestamp());
brokerOverviewDTO.setStatus(DBStatusEnum.NORMAL.getStatus());
if (brokerMetrics == null) {
return brokerOverviewDTO;
}
brokerOverviewDTO.setByteIn(brokerMetrics.getBytesInPerSec());
brokerOverviewDTO.setByteOut(brokerMetrics.getBytesOutPerSec());
return brokerOverviewDTO;
}
}

View File

@@ -0,0 +1,70 @@
package com.xiaojukeji.kafka.manager.common.entity.dto;
import java.util.Date;
/**
* @author zengqiao
* @date 19/4/22
*/
public class ControllerDTO {
private String clusterName;
private Integer brokerId;
private String host;
private Integer controllerVersion;
private Date controllerTimestamp;
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Integer getControllerVersion() {
return controllerVersion;
}
public void setControllerVersion(Integer controllerVersion) {
this.controllerVersion = controllerVersion;
}
public Date getControllerTimestamp() {
return controllerTimestamp;
}
public void setControllerTimestamp(Date controllerTimestamp) {
this.controllerTimestamp = controllerTimestamp;
}
@Override
public String toString() {
return "ControllerInfoDTO{" +
"clusterName='" + clusterName + '\'' +
", brokerId=" + brokerId +
", host='" + host + '\'' +
", controllerVersion=" + controllerVersion +
", controllerTimestamp=" + controllerTimestamp +
'}';
}
}

View File

@@ -0,0 +1,62 @@
package com.xiaojukeji.kafka.manager.common.entity.dto;
/**
* Topic Offset
* @author zengqiao
* @date 19/6/2
*/
public class PartitionOffsetDTO {
private Integer partitionId;
private Long offset;
private Long timestamp;
public PartitionOffsetDTO() {
}
public PartitionOffsetDTO(Integer partitionId, Long offset) {
this.partitionId = partitionId;
this.offset = offset;
}
public PartitionOffsetDTO(Integer partitionId, Long offset, Long timestamp) {
this.partitionId = partitionId;
this.offset = offset;
this.timestamp = timestamp;
}
public Integer getPartitionId() {
return partitionId;
}
public void setPartitionId(Integer partitionId) {
this.partitionId = partitionId;
}
public Long getOffset() {
return offset;
}
public void setOffset(Long offset) {
this.offset = offset;
}
public Long getTimestamp() {
return timestamp;
}
public void setTimestamp(Long timestamp) {
this.timestamp = timestamp;
}
@Override
public String toString() {
return "TopicOffsetDTO{" +
", partitionId=" + partitionId +
", offset=" + offset +
", timestamp=" + timestamp +
'}';
}
}

View File

@@ -0,0 +1,123 @@
package com.xiaojukeji.kafka.manager.common.entity.dto;
/**
* @author arthur
* @date 2018/09/03
*/
public class TopicBasicDTO {
private String topicName;
private Integer partitionNum;
private Integer replicaNum;
private Integer brokerNum;
private String remark;
private Long modifyTime;
private Long createTime;
private String region;
private Long retentionTime;
private String principal;
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Integer getPartitionNum() {
return partitionNum;
}
public void setPartitionNum(Integer partitionNum) {
this.partitionNum = partitionNum;
}
public Integer getReplicaNum() {
return replicaNum;
}
public void setReplicaNum(Integer replicaNum) {
this.replicaNum = replicaNum;
}
public Integer getBrokerNum() {
return brokerNum;
}
public void setBrokerNum(Integer brokerNum) {
this.brokerNum = brokerNum;
}
public String getRemark() {
return remark;
}
public void setRemark(String remark) {
this.remark = remark;
}
public String getRegion() {
return region;
}
public void setRegion(String region) {
this.region = region;
}
public Long getRetentionTime() {
return retentionTime;
}
public void setRetentionTime(Long retentionTime) {
this.retentionTime = retentionTime;
}
public Long getModifyTime() {
return modifyTime;
}
public void setModifyTime(Long modifyTime) {
this.modifyTime = modifyTime;
}
public Long getCreateTime() {
return createTime;
}
public void setCreateTime(Long createTime) {
this.createTime = createTime;
}
public String getPrincipal() {
return principal;
}
public void setPrincipal(String principal) {
this.principal = principal;
}
@Override
public String toString() {
return "TopicBasicInfoDTO{" +
"topicName='" + topicName + '\'' +
", partitionNum=" + partitionNum +
", replicaNum=" + replicaNum +
", brokerNum=" + brokerNum +
", remark='" + remark + '\'' +
", modifyTime=" + modifyTime +
", createTime=" + createTime +
", region='" + region + '\'' +
", retentionTime=" + retentionTime +
", principal='" + principal + '\'' +
'}';
}
}

View File

@@ -0,0 +1,86 @@
package com.xiaojukeji.kafka.manager.common.entity.dto;
public class TopicOverviewDTO {
private Long clusterId;
private String topicName;
private Integer replicaNum;
private Integer partitionNum;
private Double bytesInPerSec;
private Double produceRequestPerSec;
private Long updateTime;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Integer getReplicaNum() {
return replicaNum;
}
public void setReplicaNum(Integer replicaNum) {
this.replicaNum = replicaNum;
}
public Integer getPartitionNum() {
return partitionNum;
}
public void setPartitionNum(Integer partitionNum) {
this.partitionNum = partitionNum;
}
public Double getBytesInPerSec() {
return bytesInPerSec;
}
public void setBytesInPerSec(Double bytesInPerSec) {
this.bytesInPerSec = bytesInPerSec;
}
public Double getProduceRequestPerSec() {
return produceRequestPerSec;
}
public void setProduceRequestPerSec(Double produceRequestPerSec) {
this.produceRequestPerSec = produceRequestPerSec;
}
public Long getUpdateTime() {
return updateTime;
}
public void setUpdateTime(Long updateTime) {
this.updateTime = updateTime;
}
@Override
public String toString() {
return "TopicOverviewDTO{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", replicaNum=" + replicaNum +
", partitionNum=" + partitionNum +
", bytesInPerSec=" + bytesInPerSec +
", produceRequestPerSec=" + produceRequestPerSec +
", updateTime=" + updateTime +
'}';
}
}

View File

@@ -0,0 +1,105 @@
package com.xiaojukeji.kafka.manager.common.entity.dto;
import java.io.Serializable;
import java.util.List;
/**
* @author arthur
* @date 2017/6/6.
*/
public class TopicPartitionDTO implements Serializable {
private Integer partitionId;
private Long offset;
private Integer leaderBrokerId;
private Integer preferredBrokerId;
private Integer leaderEpoch;
private List<Integer> replicasBroker;
private List<Integer> isr;
private Boolean underReplicated;
public Integer getPartitionId() {
return partitionId;
}
public void setPartitionId(Integer partitionId) {
this.partitionId = partitionId;
}
public Long getOffset() {
return offset;
}
public void setOffset(Long offset) {
this.offset = offset;
}
public Integer getLeaderBrokerId() {
return leaderBrokerId;
}
public void setLeaderBrokerId(Integer leaderBrokerId) {
this.leaderBrokerId = leaderBrokerId;
}
public Integer getPreferredBrokerId() {
return preferredBrokerId;
}
public void setPreferredBrokerId(Integer preferredBrokerId) {
this.preferredBrokerId = preferredBrokerId;
}
public Integer getLeaderEpoch() {
return leaderEpoch;
}
public void setLeaderEpoch(Integer leaderEpoch) {
this.leaderEpoch = leaderEpoch;
}
public List<Integer> getReplicasBroker() {
return replicasBroker;
}
public void setReplicasBroker(List<Integer> replicasBroker) {
this.replicasBroker = replicasBroker;
}
public List<Integer> getIsr() {
return isr;
}
public void setIsr(List<Integer> isr) {
this.isr = isr;
}
public boolean isUnderReplicated() {
return underReplicated;
}
public void setUnderReplicated(boolean underReplicated) {
this.underReplicated = underReplicated;
}
@Override
public String toString() {
return "TopicPartitionDTO{" +
"partitionId=" + partitionId +
", offset=" + offset +
", leaderBrokerId=" + leaderBrokerId +
", preferredBrokerId=" + preferredBrokerId +
", leaderEpoch=" + leaderEpoch +
", replicasBroker=" + replicasBroker +
", isr=" + isr +
", underReplicated=" + underReplicated +
'}';
}
}

View File

@@ -0,0 +1,47 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.alarm;
/**
* 告警通知
* @author zengqiao
* @date 2020-02-14
*/
public class AlarmNotifyDTO {
private Long alarmRuleId;
private String actionTag;
private String message;
public Long getAlarmRuleId() {
return alarmRuleId;
}
public void setAlarmRuleId(Long alarmRuleId) {
this.alarmRuleId = alarmRuleId;
}
public String getActionTag() {
return actionTag;
}
public void setActionTag(String actionTag) {
this.actionTag = actionTag;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
@Override
public String toString() {
return "AlarmNotifyDTO{" +
"alarmRuleId=" + alarmRuleId +
", actionTag='" + actionTag + '\'' +
", message='" + message + '\'' +
'}';
}
}

View File

@@ -0,0 +1,127 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.alarm;
import java.util.Map;
/**
* @author zengqiao
* @date 19/12/16
*/
public class AlarmRuleDTO {
/**
* 告警ID
*/
private Long id;
/**
* 告警名称
*/
private String name;
/**
* 已持续次数
*/
private Integer duration;
/**
* 集群ID, 过滤条件中必有的, 单独拿出来
*/
private Long clusterId;
/**
* 告警策略表达式
*/
private AlarmStrategyExpressionDTO strategyExpression;
/**
* 告警策略过滤条件
*/
private Map<String, String> strategyFilterMap;
/**
* 告警策略Action方式
*/
private Map<String, AlarmStrategyActionDTO> strategyActionMap;
/**
* 修改时间
*/
private Long gmtModify;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Integer getDuration() {
return duration;
}
public void setDuration(Integer duration) {
this.duration = duration;
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public AlarmStrategyExpressionDTO getStrategyExpression() {
return strategyExpression;
}
public void setStrategyExpression(AlarmStrategyExpressionDTO strategyExpression) {
this.strategyExpression = strategyExpression;
}
public Map<String, String> getStrategyFilterMap() {
return strategyFilterMap;
}
public void setStrategyFilterMap(Map<String, String> strategyFilterMap) {
this.strategyFilterMap = strategyFilterMap;
}
public Map<String, AlarmStrategyActionDTO> getStrategyActionMap() {
return strategyActionMap;
}
public void setStrategyActionMap(Map<String, AlarmStrategyActionDTO> strategyActionMap) {
this.strategyActionMap = strategyActionMap;
}
public Long getGmtModify() {
return gmtModify;
}
public void setGmtModify(Long gmtModify) {
this.gmtModify = gmtModify;
}
@Override
public String toString() {
return "AlarmRuleDTO{" +
"id=" + id +
", name='" + name + '\'' +
", duration=" + duration +
", clusterId=" + clusterId +
", strategyExpression=" + strategyExpression +
", strategyFilterMap=" + strategyFilterMap +
", strategyActionMap=" + strategyActionMap +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -0,0 +1,43 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.alarm;
/**
* @author zengqiao
* @date 19/12/16
*/
public class AlarmStrategyActionDTO {
private String actionWay; // 告知方式: kafka
private String actionTag;
public String getActionWay() {
return actionWay;
}
public void setActionWay(String actionWay) {
this.actionWay = actionWay;
}
public String getActionTag() {
return actionTag;
}
public void setActionTag(String actionTag) {
this.actionTag = actionTag;
}
@Override
public String toString() {
return "AlarmStrategyActionDTO{" +
"actionWay='" + actionWay + '\'' +
", actionTag='" + actionTag + '\'' +
'}';
}
public boolean legal() {
if (actionWay == null
|| actionTag == null) {
return false;
}
return true;
}
}

View File

@@ -0,0 +1,68 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.alarm;
/**
* 策略表达式
* @author zengqiao
* @date 19/12/16
*/
public class AlarmStrategyExpressionDTO {
private String metric;
private String opt;
private Long threshold;
private Integer duration;
public String getMetric() {
return metric;
}
public void setMetric(String metric) {
this.metric = metric;
}
public String getOpt() {
return opt;
}
public void setOpt(String opt) {
this.opt = opt;
}
public Long getThreshold() {
return threshold;
}
public void setThreshold(Long threshold) {
this.threshold = threshold;
}
public Integer getDuration() {
return duration;
}
public void setDuration(Integer duration) {
this.duration = duration;
}
@Override
public String toString() {
return "AlarmStrategyExpressionModel{" +
"metric='" + metric + '\'' +
", opt='" + opt + '\'' +
", threshold=" + threshold +
", duration=" + duration +
'}';
}
public boolean legal() {
if (metric == null
|| opt == null
|| threshold == null
|| duration == null || duration <= 0) {
return false;
}
return true;
}
}

View File

@@ -0,0 +1,44 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.alarm;
/**
* 告警过滤条件
* @author zengqiao
* @date 19/12/16
*/
public class AlarmStrategyFilterDTO {
private String key;
private String value;
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
@Override
public String toString() {
return "AlarmStrategyFilterModel{" +
"key='" + key + '\'' +
", value='" + value + '\'' +
'}';
}
public boolean legal() {
if (key == null
|| value == null) {
return false;
}
return true;
}
}

View File

@@ -0,0 +1,114 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.analysis;
import java.util.List;
/**
* @author zengqiao
* @date 19/12/29
*/
public class AnalysisBrokerDTO {
private Long clusterId;
private Integer brokerId;
private Long baseTime;
private Double bytesIn;
private Double bytesOut;
private Double messagesIn;
private Double totalFetchRequests;
private Double totalProduceRequests;
List<AnalysisTopicDTO> topicAnalysisVOList;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public Long getBaseTime() {
return baseTime;
}
public void setBaseTime(Long baseTime) {
this.baseTime = baseTime;
}
public Double getBytesIn() {
return bytesIn;
}
public void setBytesIn(Double bytesIn) {
this.bytesIn = bytesIn;
}
public Double getBytesOut() {
return bytesOut;
}
public void setBytesOut(Double bytesOut) {
this.bytesOut = bytesOut;
}
public Double getMessagesIn() {
return messagesIn;
}
public void setMessagesIn(Double messagesIn) {
this.messagesIn = messagesIn;
}
public Double getTotalFetchRequests() {
return totalFetchRequests;
}
public void setTotalFetchRequests(Double totalFetchRequests) {
this.totalFetchRequests = totalFetchRequests;
}
public Double getTotalProduceRequests() {
return totalProduceRequests;
}
public void setTotalProduceRequests(Double totalProduceRequests) {
this.totalProduceRequests = totalProduceRequests;
}
public List<AnalysisTopicDTO> getTopicAnalysisVOList() {
return topicAnalysisVOList;
}
public void setTopicAnalysisVOList(List<AnalysisTopicDTO> topicAnalysisVOList) {
this.topicAnalysisVOList = topicAnalysisVOList;
}
@Override
public String toString() {
return "AnalysisBrokerDTO{" +
"clusterId=" + clusterId +
", brokerId=" + brokerId +
", baseTime=" + baseTime +
", bytesIn=" + bytesIn +
", bytesOut=" + bytesOut +
", messagesIn=" + messagesIn +
", totalFetchRequests=" + totalFetchRequests +
", totalProduceRequests=" + totalProduceRequests +
", topicAnalysisVOList=" + topicAnalysisVOList +
'}';
}
}

View File

@@ -0,0 +1,134 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.analysis;
/**
* @author zengqiao
* @date 19/12/29
*/
public class AnalysisTopicDTO {
private String topicName;
private Double bytesIn;
private Double bytesInRate;
private Double bytesOut;
private Double bytesOutRate;
private Double messagesIn;
private Double messagesInRate;
private Double totalFetchRequests;
private Double totalFetchRequestsRate;
private Double totalProduceRequests;
private Double totalProduceRequestsRate;
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Double getBytesIn() {
return bytesIn;
}
public void setBytesIn(Double bytesIn) {
this.bytesIn = bytesIn;
}
public Double getBytesInRate() {
return bytesInRate;
}
public void setBytesInRate(Double bytesInRate) {
this.bytesInRate = bytesInRate;
}
public Double getBytesOut() {
return bytesOut;
}
public void setBytesOut(Double bytesOut) {
this.bytesOut = bytesOut;
}
public Double getBytesOutRate() {
return bytesOutRate;
}
public void setBytesOutRate(Double bytesOutRate) {
this.bytesOutRate = bytesOutRate;
}
public Double getMessagesIn() {
return messagesIn;
}
public void setMessagesIn(Double messagesIn) {
this.messagesIn = messagesIn;
}
public Double getMessagesInRate() {
return messagesInRate;
}
public void setMessagesInRate(Double messagesInRate) {
this.messagesInRate = messagesInRate;
}
public Double getTotalFetchRequests() {
return totalFetchRequests;
}
public void setTotalFetchRequests(Double totalFetchRequests) {
this.totalFetchRequests = totalFetchRequests;
}
public Double getTotalFetchRequestsRate() {
return totalFetchRequestsRate;
}
public void setTotalFetchRequestsRate(Double totalFetchRequestsRate) {
this.totalFetchRequestsRate = totalFetchRequestsRate;
}
public Double getTotalProduceRequests() {
return totalProduceRequests;
}
public void setTotalProduceRequests(Double totalProduceRequests) {
this.totalProduceRequests = totalProduceRequests;
}
public Double getTotalProduceRequestsRate() {
return totalProduceRequestsRate;
}
public void setTotalProduceRequestsRate(Double totalProduceRequestsRate) {
this.totalProduceRequestsRate = totalProduceRequestsRate;
}
@Override
public String toString() {
return "AnalysisTopicDTO{" +
"topicName='" + topicName + '\'' +
", bytesIn=" + bytesIn +
", bytesInRate=" + bytesInRate +
", bytesOut=" + bytesOut +
", bytesOutRate=" + bytesOutRate +
", messagesIn=" + messagesIn +
", messagesInRate=" + messagesInRate +
", totalFetchRequests=" + totalFetchRequests +
", totalFetchRequestsRate=" + totalFetchRequestsRate +
", totalProduceRequests=" + totalProduceRequests +
", totalProduceRequestsRate=" + totalProduceRequestsRate +
'}';
}
}

View File

@@ -0,0 +1,57 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.consumer;
/**
* @author zengqiao
* @date 20/1/9
*/
public class ConsumeDetailDTO {
private Integer partitionId;
private Long offset;
private Long consumeOffset;
private String consumerId;
public Integer getPartitionId() {
return partitionId;
}
public void setPartitionId(Integer partitionId) {
this.partitionId = partitionId;
}
public Long getOffset() {
return offset;
}
public void setOffset(Long offset) {
this.offset = offset;
}
public Long getConsumeOffset() {
return consumeOffset;
}
public void setConsumeOffset(Long consumeOffset) {
this.consumeOffset = consumeOffset;
}
public String getConsumerId() {
return consumerId;
}
public void setConsumerId(String consumerId) {
this.consumerId = consumerId;
}
@Override
public String toString() {
return "ConsumeDetailDTO{" +
"partitionId=" + partitionId +
", offset=" + offset +
", consumeOffset=" + consumeOffset +
", consumerId='" + consumerId + '\'' +
'}';
}
}

View File

@@ -0,0 +1,61 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.consumer;
import com.xiaojukeji.kafka.manager.common.entity.zookeeper.PartitionState;
import java.util.List;
import java.util.Map;
/**
* Consumer实体类
* @author tukun
* @date 2015/11/12
*/
public class ConsumerDTO {
/**
* 消费group名
*/
private String consumerGroup;
/**
* 消费类型一般为static
*/
private String location;
/**
* 订阅的每个topic的partition状态列表
*/
private Map<String, List<PartitionState>> topicPartitionMap;
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
public String getLocation() {
return location;
}
public void setLocation(String location) {
this.location = location;
}
public Map<String, List<PartitionState>> getTopicPartitionMap() {
return topicPartitionMap;
}
public void setTopicPartitionMap(Map<String, List<PartitionState>> topicPartitionMap) {
this.topicPartitionMap = topicPartitionMap;
}
@Override
public String toString() {
return "Consumer{" +
"consumerGroup='" + consumerGroup + '\'' +
", location='" + location + '\'' +
", topicPartitionMap=" + topicPartitionMap +
'}';
}
}

View File

@@ -0,0 +1,76 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.consumer;
import com.xiaojukeji.kafka.manager.common.constant.OffsetStoreLocation;
import java.util.Objects;
/**
* 消费组信息
* @author zengqiao
* @date 19/4/18
*/
public class ConsumerGroupDTO {
private Long clusterId;
private String consumerGroup;
private OffsetStoreLocation offsetStoreLocation;
public ConsumerGroupDTO(Long clusterId, String consumerGroup, OffsetStoreLocation offsetStoreLocation) {
this.clusterId = clusterId;
this.consumerGroup = consumerGroup;
this.offsetStoreLocation = offsetStoreLocation;
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
public OffsetStoreLocation getOffsetStoreLocation() {
return offsetStoreLocation;
}
public void setOffsetStoreLocation(OffsetStoreLocation offsetStoreLocation) {
this.offsetStoreLocation = offsetStoreLocation;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ConsumerGroupDTO that = (ConsumerGroupDTO) o;
return clusterId.equals(that.clusterId)
&& consumerGroup.equals(that.consumerGroup)
&& offsetStoreLocation == that.offsetStoreLocation;
}
@Override
public int hashCode() {
return Objects.hash(clusterId, consumerGroup, offsetStoreLocation);
}
@Override
public String toString() {
return "ConsumerGroupDTO{" +
"clusterId=" + clusterId +
", consumerGroup='" + consumerGroup + '\'' +
", offsetStoreLocation=" + offsetStoreLocation +
'}';
}
}

View File

@@ -0,0 +1,394 @@
package com.xiaojukeji.kafka.manager.common.entity.metrics;
import com.xiaojukeji.kafka.manager.common.constant.MetricsType;
import com.xiaojukeji.kafka.manager.common.entity.annotations.FieldSelector;
import com.xiaojukeji.kafka.manager.common.entity.po.BaseEntryDO;
/**
* @author zengqiao
* @date 19/11/25
*/
public class BaseMetrics extends BaseEntryDO {
/**
* 每秒流入的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_TO_DB_METRICS,
MetricsType.BROKER_REAL_TIME_METRICS,
MetricsType.BROKER_OVER_VIEW_METRICS,
MetricsType.BROKER_ANALYSIS_METRICS,
MetricsType.BROKER_TOPIC_ANALYSIS_METRICS,
MetricsType.TOPIC_FLOW_DETAIL,
MetricsType.TOPIC_FLOW_OVERVIEW,
MetricsType.TOPIC_METRICS_TO_DB
})
protected Double bytesInPerSec = 0.0;
protected Double bytesInPerSecMeanRate = 0.0;
protected Double bytesInPerSecFiveMinuteRate = 0.0;
protected Double bytesInPerSecFifteenMinuteRate = 0.0;
/**
* 每秒流出的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_TO_DB_METRICS,
MetricsType.BROKER_REAL_TIME_METRICS,
MetricsType.BROKER_OVER_VIEW_METRICS,
MetricsType.BROKER_ANALYSIS_METRICS,
MetricsType.BROKER_TOPIC_ANALYSIS_METRICS,
MetricsType.TOPIC_FLOW_DETAIL,
MetricsType.TOPIC_METRICS_TO_DB
})
protected Double bytesOutPerSec = 0.0;
protected Double bytesOutPerSecMeanRate = 0.0;
protected Double bytesOutPerSecFiveMinuteRate = 0.0;
protected Double bytesOutPerSecFifteenMinuteRate = 0.0;
/**
* 每秒流入的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_TO_DB_METRICS,
MetricsType.BROKER_REAL_TIME_METRICS,
MetricsType.BROKER_ANALYSIS_METRICS,
MetricsType.BROKER_TOPIC_ANALYSIS_METRICS,
MetricsType.TOPIC_FLOW_DETAIL,
MetricsType.TOPIC_METRICS_TO_DB
})
protected Double messagesInPerSec = 0.0;
protected Double messagesInPerSecMeanRate = 0.0;
protected Double messagesInPerSecFiveMinuteRate = 0.0;
protected Double messagesInPerSecFifteenMinuteRate = 0.0;
/**
* 每秒拒绝的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_TO_DB_METRICS,
MetricsType.BROKER_REAL_TIME_METRICS,
MetricsType.TOPIC_FLOW_DETAIL,
MetricsType.TOPIC_METRICS_TO_DB
})
protected Double bytesRejectedPerSec = 0.0;
protected Double bytesRejectedPerSecMeanRate = 0.0;
protected Double bytesRejectedPerSecFiveMinuteRate = 0.0;
protected Double bytesRejectedPerSecFifteenMinuteRate = 0.0;
/**
* 每秒失败的Produce请求数的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_TO_DB_METRICS,
MetricsType.BROKER_REAL_TIME_METRICS,
MetricsType.TOPIC_FLOW_DETAIL
})
protected Double failProduceRequestPerSec = 0.0;
protected Double failProduceRequestPerSecMeanRate = 0.0;
protected Double failProduceRequestPerSecFiveMinuteRate = 0.0;
protected Double failProduceRequestPerSecFifteenMinuteRate = 0.0;
/**
* 每秒失败的Fetch请求数的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_TO_DB_METRICS,
MetricsType.BROKER_REAL_TIME_METRICS,
MetricsType.TOPIC_FLOW_DETAIL
})
protected Double failFetchRequestPerSec = 0.0;
protected Double failFetchRequestPerSecMeanRate = 0.0;
protected Double failFetchRequestPerSecFiveMinuteRate = 0.0;
protected Double failFetchRequestPerSecFifteenMinuteRate = 0.0;
/**
* 每秒总Produce请求数的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_ANALYSIS_METRICS,
MetricsType.BROKER_TOPIC_ANALYSIS_METRICS,
MetricsType.TOPIC_FLOW_DETAIL,
MetricsType.TOPIC_METRICS_TO_DB,
MetricsType.TOPIC_FLOW_OVERVIEW
})
protected Double totalProduceRequestsPerSec = 0.0;
protected Double totalProduceRequestsPerSecMeanRate = 0.0;
protected Double totalProduceRequestsPerSecFiveMinuteRate = 0.0;
protected Double totalProduceRequestsPerSecFifteenMinuteRate = 0.0;
/**
* 每秒总Fetch请求数的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_ANALYSIS_METRICS,
MetricsType.BROKER_TOPIC_ANALYSIS_METRICS,
MetricsType.TOPIC_FLOW_DETAIL
})
protected Double totalFetchRequestsPerSec = 0.0;
protected Double totalFetchRequestsPerSecMeanRate = 0.0;
protected Double totalFetchRequestsPerSecFiveMinuteRate = 0.0;
protected Double totalFetchRequestsPerSecFifteenMinuteRate = 0.0;
public Double getBytesInPerSec() {
return bytesInPerSec;
}
public void setBytesInPerSec(Double bytesInPerSec) {
this.bytesInPerSec = bytesInPerSec;
}
public Double getBytesInPerSecMeanRate() {
return bytesInPerSecMeanRate;
}
public void setBytesInPerSecMeanRate(Double bytesInPerSecMeanRate) {
this.bytesInPerSecMeanRate = bytesInPerSecMeanRate;
}
public Double getBytesInPerSecFiveMinuteRate() {
return bytesInPerSecFiveMinuteRate;
}
public void setBytesInPerSecFiveMinuteRate(Double bytesInPerSecFiveMinuteRate) {
this.bytesInPerSecFiveMinuteRate = bytesInPerSecFiveMinuteRate;
}
public Double getBytesInPerSecFifteenMinuteRate() {
return bytesInPerSecFifteenMinuteRate;
}
public void setBytesInPerSecFifteenMinuteRate(Double bytesInPerSecFifteenMinuteRate) {
this.bytesInPerSecFifteenMinuteRate = bytesInPerSecFifteenMinuteRate;
}
public Double getBytesOutPerSec() {
return bytesOutPerSec;
}
public void setBytesOutPerSec(Double bytesOutPerSec) {
this.bytesOutPerSec = bytesOutPerSec;
}
public Double getBytesOutPerSecMeanRate() {
return bytesOutPerSecMeanRate;
}
public void setBytesOutPerSecMeanRate(Double bytesOutPerSecMeanRate) {
this.bytesOutPerSecMeanRate = bytesOutPerSecMeanRate;
}
public Double getBytesOutPerSecFiveMinuteRate() {
return bytesOutPerSecFiveMinuteRate;
}
public void setBytesOutPerSecFiveMinuteRate(Double bytesOutPerSecFiveMinuteRate) {
this.bytesOutPerSecFiveMinuteRate = bytesOutPerSecFiveMinuteRate;
}
public Double getBytesOutPerSecFifteenMinuteRate() {
return bytesOutPerSecFifteenMinuteRate;
}
public void setBytesOutPerSecFifteenMinuteRate(Double bytesOutPerSecFifteenMinuteRate) {
this.bytesOutPerSecFifteenMinuteRate = bytesOutPerSecFifteenMinuteRate;
}
public Double getMessagesInPerSec() {
return messagesInPerSec;
}
public void setMessagesInPerSec(Double messagesInPerSec) {
this.messagesInPerSec = messagesInPerSec;
}
public Double getMessagesInPerSecMeanRate() {
return messagesInPerSecMeanRate;
}
public void setMessagesInPerSecMeanRate(Double messagesInPerSecMeanRate) {
this.messagesInPerSecMeanRate = messagesInPerSecMeanRate;
}
public Double getMessagesInPerSecFiveMinuteRate() {
return messagesInPerSecFiveMinuteRate;
}
public void setMessagesInPerSecFiveMinuteRate(Double messagesInPerSecFiveMinuteRate) {
this.messagesInPerSecFiveMinuteRate = messagesInPerSecFiveMinuteRate;
}
public Double getMessagesInPerSecFifteenMinuteRate() {
return messagesInPerSecFifteenMinuteRate;
}
public void setMessagesInPerSecFifteenMinuteRate(Double messagesInPerSecFifteenMinuteRate) {
this.messagesInPerSecFifteenMinuteRate = messagesInPerSecFifteenMinuteRate;
}
public Double getBytesRejectedPerSec() {
return bytesRejectedPerSec;
}
public void setBytesRejectedPerSec(Double bytesRejectedPerSec) {
this.bytesRejectedPerSec = bytesRejectedPerSec;
}
public Double getBytesRejectedPerSecMeanRate() {
return bytesRejectedPerSecMeanRate;
}
public void setBytesRejectedPerSecMeanRate(Double bytesRejectedPerSecMeanRate) {
this.bytesRejectedPerSecMeanRate = bytesRejectedPerSecMeanRate;
}
public Double getBytesRejectedPerSecFiveMinuteRate() {
return bytesRejectedPerSecFiveMinuteRate;
}
public void setBytesRejectedPerSecFiveMinuteRate(Double bytesRejectedPerSecFiveMinuteRate) {
this.bytesRejectedPerSecFiveMinuteRate = bytesRejectedPerSecFiveMinuteRate;
}
public Double getBytesRejectedPerSecFifteenMinuteRate() {
return bytesRejectedPerSecFifteenMinuteRate;
}
public void setBytesRejectedPerSecFifteenMinuteRate(Double bytesRejectedPerSecFifteenMinuteRate) {
this.bytesRejectedPerSecFifteenMinuteRate = bytesRejectedPerSecFifteenMinuteRate;
}
public Double getFailProduceRequestPerSec() {
return failProduceRequestPerSec;
}
public void setFailProduceRequestPerSec(Double failProduceRequestPerSec) {
this.failProduceRequestPerSec = failProduceRequestPerSec;
}
public Double getFailProduceRequestPerSecMeanRate() {
return failProduceRequestPerSecMeanRate;
}
public void setFailProduceRequestPerSecMeanRate(Double failProduceRequestPerSecMeanRate) {
this.failProduceRequestPerSecMeanRate = failProduceRequestPerSecMeanRate;
}
public Double getFailProduceRequestPerSecFiveMinuteRate() {
return failProduceRequestPerSecFiveMinuteRate;
}
public void setFailProduceRequestPerSecFiveMinuteRate(Double failProduceRequestPerSecFiveMinuteRate) {
this.failProduceRequestPerSecFiveMinuteRate = failProduceRequestPerSecFiveMinuteRate;
}
public Double getFailProduceRequestPerSecFifteenMinuteRate() {
return failProduceRequestPerSecFifteenMinuteRate;
}
public void setFailProduceRequestPerSecFifteenMinuteRate(Double failProduceRequestPerSecFifteenMinuteRate) {
this.failProduceRequestPerSecFifteenMinuteRate = failProduceRequestPerSecFifteenMinuteRate;
}
public Double getFailFetchRequestPerSec() {
return failFetchRequestPerSec;
}
public void setFailFetchRequestPerSec(Double failFetchRequestPerSec) {
this.failFetchRequestPerSec = failFetchRequestPerSec;
}
public Double getFailFetchRequestPerSecMeanRate() {
return failFetchRequestPerSecMeanRate;
}
public void setFailFetchRequestPerSecMeanRate(Double failFetchRequestPerSecMeanRate) {
this.failFetchRequestPerSecMeanRate = failFetchRequestPerSecMeanRate;
}
public Double getFailFetchRequestPerSecFiveMinuteRate() {
return failFetchRequestPerSecFiveMinuteRate;
}
public void setFailFetchRequestPerSecFiveMinuteRate(Double failFetchRequestPerSecFiveMinuteRate) {
this.failFetchRequestPerSecFiveMinuteRate = failFetchRequestPerSecFiveMinuteRate;
}
public Double getFailFetchRequestPerSecFifteenMinuteRate() {
return failFetchRequestPerSecFifteenMinuteRate;
}
public void setFailFetchRequestPerSecFifteenMinuteRate(Double failFetchRequestPerSecFifteenMinuteRate) {
this.failFetchRequestPerSecFifteenMinuteRate = failFetchRequestPerSecFifteenMinuteRate;
}
public Double getTotalProduceRequestsPerSec() {
return totalProduceRequestsPerSec;
}
public void setTotalProduceRequestsPerSec(Double totalProduceRequestsPerSec) {
this.totalProduceRequestsPerSec = totalProduceRequestsPerSec;
}
public Double getTotalProduceRequestsPerSecMeanRate() {
return totalProduceRequestsPerSecMeanRate;
}
public void setTotalProduceRequestsPerSecMeanRate(Double totalProduceRequestsPerSecMeanRate) {
this.totalProduceRequestsPerSecMeanRate = totalProduceRequestsPerSecMeanRate;
}
public Double getTotalProduceRequestsPerSecFiveMinuteRate() {
return totalProduceRequestsPerSecFiveMinuteRate;
}
public void setTotalProduceRequestsPerSecFiveMinuteRate(Double totalProduceRequestsPerSecFiveMinuteRate) {
this.totalProduceRequestsPerSecFiveMinuteRate = totalProduceRequestsPerSecFiveMinuteRate;
}
public Double getTotalProduceRequestsPerSecFifteenMinuteRate() {
return totalProduceRequestsPerSecFifteenMinuteRate;
}
public void setTotalProduceRequestsPerSecFifteenMinuteRate(Double totalProduceRequestsPerSecFifteenMinuteRate) {
this.totalProduceRequestsPerSecFifteenMinuteRate = totalProduceRequestsPerSecFifteenMinuteRate;
}
public Double getTotalFetchRequestsPerSec() {
return totalFetchRequestsPerSec;
}
public void setTotalFetchRequestsPerSec(Double totalFetchRequestsPerSec) {
this.totalFetchRequestsPerSec = totalFetchRequestsPerSec;
}
public Double getTotalFetchRequestsPerSecMeanRate() {
return totalFetchRequestsPerSecMeanRate;
}
public void setTotalFetchRequestsPerSecMeanRate(Double totalFetchRequestsPerSecMeanRate) {
this.totalFetchRequestsPerSecMeanRate = totalFetchRequestsPerSecMeanRate;
}
public Double getTotalFetchRequestsPerSecFiveMinuteRate() {
return totalFetchRequestsPerSecFiveMinuteRate;
}
public void setTotalFetchRequestsPerSecFiveMinuteRate(Double totalFetchRequestsPerSecFiveMinuteRate) {
this.totalFetchRequestsPerSecFiveMinuteRate = totalFetchRequestsPerSecFiveMinuteRate;
}
public Double getTotalFetchRequestsPerSecFifteenMinuteRate() {
return totalFetchRequestsPerSecFifteenMinuteRate;
}
public void setTotalFetchRequestsPerSecFifteenMinuteRate(Double totalFetchRequestsPerSecFifteenMinuteRate) {
this.totalFetchRequestsPerSecFifteenMinuteRate = totalFetchRequestsPerSecFifteenMinuteRate;
}
}

View File

@@ -0,0 +1,331 @@
package com.xiaojukeji.kafka.manager.common.entity.metrics;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.constant.MetricsType;
import com.xiaojukeji.kafka.manager.common.entity.annotations.FieldSelector;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.List;
/**
* 需要定时拉取的broker数据
* @author tukun
* @date 2015/11/6.
*/
public class BrokerMetrics extends BaseMetrics {
/**
* 集群ID
*/
private Long clusterId;
/**
* Topic名称
*/
private Integer brokerId;
/**
* 每秒Produce请求数的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_TO_DB_METRICS,
MetricsType.BROKER_REAL_TIME_METRICS
})
private Double produceRequestPerSec = 0.0;
private Double produceRequestPerSecMeanRate = 0.0;
private Double produceRequestPerSecFiveMinuteRate = 0.0;
private Double produceRequestPerSecFifteenMinuteRate = 0.0;
/**
* 每秒Fetch请求数的近一分钟的均值、平均字节数、近五分钟均值、近十五分钟均值
*/
@FieldSelector(types = {
MetricsType.BROKER_FLOW_DETAIL,
MetricsType.BROKER_TO_DB_METRICS,
MetricsType.BROKER_REAL_TIME_METRICS
})
private Double fetchConsumerRequestPerSec = 0.0;
private Double fetchConsumerRequestPerSecMeanRate = 0.0;
private Double fetchConsumerRequestPerSecFiveMinuteRate = 0.0;
private Double fetchConsumerRequestPerSecFifteenMinuteRate = 0.0;
/**
* Broker分区数量
*/
@FieldSelector(types = {MetricsType.BROKER_OVER_ALL_METRICS, 5})
private int partitionCount;
/**
* Broker已同步分区数量
*/
@FieldSelector(types = {MetricsType.BROKER_OVER_ALL_METRICS})
private int underReplicatedPartitions;
/**
* Broker Leader的数量
*/
@FieldSelector(types = {MetricsType.BROKER_OVER_ALL_METRICS, 5})
private int leaderCount;
/**
* Broker请求处理器空闲百分比
*/
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
private Double requestHandlerAvgIdlePercent = 0.0;
/**
* 网络处理器空闲百分比
*/
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
private Double networkProcessorAvgIdlePercent = 0.0;
/**
* 请求列表大小
*/
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
private Integer requestQueueSize = 0;
/**
* 响应列表大小
*/
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
private Integer responseQueueSize = 0;
/**
* 刷日志时间
*/
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
private Double logFlushRateAndTimeMs = 0.0;
/**
* produce请求总时间-平均值
*/
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
private Double totalTimeProduceMean = 0.0;
/**
* produce请求总时间-99th
*/
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
private Double totalTimeProduce99Th = 0.0;
/**
* fetch consumer请求总时间-平均值
*/
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
private Double totalTimeFetchConsumerMean = 0.0;
/**
* fetch consumer请求总时间-99th
*/
@FieldSelector(types = {MetricsType.BROKER_TO_DB_METRICS})
private Double totalTimeFetchConsumer99Th = 0.0;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public Double getProduceRequestPerSec() {
return produceRequestPerSec;
}
public void setProduceRequestPerSec(Double produceRequestPerSec) {
this.produceRequestPerSec = produceRequestPerSec;
}
public Double getProduceRequestPerSecMeanRate() {
return produceRequestPerSecMeanRate;
}
public void setProduceRequestPerSecMeanRate(Double produceRequestPerSecMeanRate) {
this.produceRequestPerSecMeanRate = produceRequestPerSecMeanRate;
}
public Double getProduceRequestPerSecFiveMinuteRate() {
return produceRequestPerSecFiveMinuteRate;
}
public void setProduceRequestPerSecFiveMinuteRate(Double produceRequestPerSecFiveMinuteRate) {
this.produceRequestPerSecFiveMinuteRate = produceRequestPerSecFiveMinuteRate;
}
public Double getProduceRequestPerSecFifteenMinuteRate() {
return produceRequestPerSecFifteenMinuteRate;
}
public void setProduceRequestPerSecFifteenMinuteRate(Double produceRequestPerSecFifteenMinuteRate) {
this.produceRequestPerSecFifteenMinuteRate = produceRequestPerSecFifteenMinuteRate;
}
public Double getFetchConsumerRequestPerSec() {
return fetchConsumerRequestPerSec;
}
public void setFetchConsumerRequestPerSec(Double fetchConsumerRequestPerSec) {
this.fetchConsumerRequestPerSec = fetchConsumerRequestPerSec;
}
public Double getFetchConsumerRequestPerSecMeanRate() {
return fetchConsumerRequestPerSecMeanRate;
}
public void setFetchConsumerRequestPerSecMeanRate(Double fetchConsumerRequestPerSecMeanRate) {
this.fetchConsumerRequestPerSecMeanRate = fetchConsumerRequestPerSecMeanRate;
}
public Double getFetchConsumerRequestPerSecFiveMinuteRate() {
return fetchConsumerRequestPerSecFiveMinuteRate;
}
public void setFetchConsumerRequestPerSecFiveMinuteRate(Double fetchConsumerRequestPerSecFiveMinuteRate) {
this.fetchConsumerRequestPerSecFiveMinuteRate = fetchConsumerRequestPerSecFiveMinuteRate;
}
public Double getFetchConsumerRequestPerSecFifteenMinuteRate() {
return fetchConsumerRequestPerSecFifteenMinuteRate;
}
public void setFetchConsumerRequestPerSecFifteenMinuteRate(Double fetchConsumerRequestPerSecFifteenMinuteRate) {
this.fetchConsumerRequestPerSecFifteenMinuteRate = fetchConsumerRequestPerSecFifteenMinuteRate;
}
public int getPartitionCount() {
return partitionCount;
}
public void setPartitionCount(int partitionCount) {
this.partitionCount = partitionCount;
}
public int getUnderReplicatedPartitions() {
return underReplicatedPartitions;
}
public void setUnderReplicatedPartitions(int underReplicatedPartitions) {
this.underReplicatedPartitions = underReplicatedPartitions;
}
public int getLeaderCount() {
return leaderCount;
}
public void setLeaderCount(int leaderCount) {
this.leaderCount = leaderCount;
}
public Double getRequestHandlerAvgIdlePercent() {
return requestHandlerAvgIdlePercent;
}
public void setRequestHandlerAvgIdlePercent(Double requestHandlerAvgIdlePercent) {
this.requestHandlerAvgIdlePercent = requestHandlerAvgIdlePercent;
}
public Double getNetworkProcessorAvgIdlePercent() {
return networkProcessorAvgIdlePercent;
}
public void setNetworkProcessorAvgIdlePercent(Double networkProcessorAvgIdlePercent) {
this.networkProcessorAvgIdlePercent = networkProcessorAvgIdlePercent;
}
public Integer getRequestQueueSize() {
return requestQueueSize;
}
public void setRequestQueueSize(Integer requestQueueSize) {
this.requestQueueSize = requestQueueSize;
}
public Integer getResponseQueueSize() {
return responseQueueSize;
}
public void setResponseQueueSize(Integer responseQueueSize) {
this.responseQueueSize = responseQueueSize;
}
public Double getLogFlushRateAndTimeMs() {
return logFlushRateAndTimeMs;
}
public void setLogFlushRateAndTimeMs(Double logFlushRateAndTimeMs) {
this.logFlushRateAndTimeMs = logFlushRateAndTimeMs;
}
public Double getTotalTimeProduceMean() {
return totalTimeProduceMean;
}
public void setTotalTimeProduceMean(Double totalTimeProduceMean) {
this.totalTimeProduceMean = totalTimeProduceMean;
}
public Double getTotalTimeProduce99Th() {
return totalTimeProduce99Th;
}
public void setTotalTimeProduce99Th(Double totalTimeProduce99Th) {
this.totalTimeProduce99Th = totalTimeProduce99Th;
}
public Double getTotalTimeFetchConsumerMean() {
return totalTimeFetchConsumerMean;
}
public void setTotalTimeFetchConsumerMean(Double totalTimeFetchConsumerMean) {
this.totalTimeFetchConsumerMean = totalTimeFetchConsumerMean;
}
public Double getTotalTimeFetchConsumer99Th() {
return totalTimeFetchConsumer99Th;
}
public void setTotalTimeFetchConsumer99Th(Double totalTimeFetchConsumer99Th) {
this.totalTimeFetchConsumer99Th = totalTimeFetchConsumer99Th;
}
private static void initialization(Field[] fields){
for(Field field : fields){
FieldSelector annotation = field.getAnnotation(FieldSelector.class);
if(annotation ==null){
continue;
}
String fieldName;
if("".equals(annotation.name())) {
fieldName = field.getName().substring(0,1).toUpperCase() + field.getName().substring(1);
} else{
fieldName = annotation.name();
}
for(int type: annotation.types()){
List<String> list = Constant.BROKER_METRICS_TYPE_MBEAN_NAME_MAP.getOrDefault(type, new ArrayList<>());
list.add(fieldName);
Constant.BROKER_METRICS_TYPE_MBEAN_NAME_MAP.put(type, list);
}
}
}
public static List<String> getFieldNameList(int metricsType){
synchronized (BrokerMetrics.class) {
if (Constant.BROKER_METRICS_TYPE_MBEAN_NAME_MAP.isEmpty()) {
initialization(BrokerMetrics.class.getDeclaredFields());
initialization(BaseMetrics.class.getDeclaredFields());
}
}
return Constant.BROKER_METRICS_TYPE_MBEAN_NAME_MAP.getOrDefault(metricsType, new ArrayList<>());
}
}

View File

@@ -0,0 +1,68 @@
package com.xiaojukeji.kafka.manager.common.entity.metrics;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.entity.annotations.FieldSelector;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.List;
public class TopicMetrics extends BaseMetrics {
/**
* 集群ID
*/
private Long clusterId;
/**
* Topic名称
*/
private String topicName;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
private static void initialization(Field[] fields){
for(Field field : fields){
FieldSelector annotation = field.getAnnotation(FieldSelector.class);
if(annotation ==null){
continue;
}
String fieldName;
if("".equals(annotation.name())){
String name = field.getName();
fieldName = name.substring(0,1).toUpperCase()+name.substring(1);
}else{
fieldName = annotation.name();
}
for(int type: annotation.types()){
List<String> list = Constant.TOPIC_METRICS_TYPE_MBEAN_NAME_MAP.getOrDefault(type, new ArrayList<>());
list.add(fieldName);
Constant.TOPIC_METRICS_TYPE_MBEAN_NAME_MAP.put(type, list);
}
}
}
public static List<String> getFieldNameList(int type){
synchronized (TopicMetrics.class) {
if (Constant.TOPIC_METRICS_TYPE_MBEAN_NAME_MAP.isEmpty()) {
initialization(TopicMetrics.class.getDeclaredFields());
initialization(BaseMetrics.class.getDeclaredFields());
}
}
return Constant.TOPIC_METRICS_TYPE_MBEAN_NAME_MAP.get(type);
}
}

View File

@@ -0,0 +1,50 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
/**
* @author zengqiao
* @date 19/5/3
*/
public class AccountDO extends BaseDO {
private String username;
private String password;
private Integer role;
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public Integer getRole() {
return role;
}
public void setRole(Integer role) {
this.role = role;
}
@Override
public String toString() {
return "AccountDO{" +
"username='" + username + '\'' +
", password='" + password + '\'' +
", role=" + role +
", id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -0,0 +1,68 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
public class AlarmRuleDO extends BaseDO {
private String alarmName;
private String strategyExpressions;
private String strategyFilters;
private String strategyActions;
private String principals;
public String getAlarmName() {
return alarmName;
}
public void setAlarmName(String alarmName) {
this.alarmName = alarmName;
}
public String getStrategyExpressions() {
return strategyExpressions;
}
public void setStrategyExpressions(String strategyExpressions) {
this.strategyExpressions = strategyExpressions;
}
public String getStrategyFilters() {
return strategyFilters;
}
public void setStrategyFilters(String strategyFilters) {
this.strategyFilters = strategyFilters;
}
public String getStrategyActions() {
return strategyActions;
}
public void setStrategyActions(String strategyActions) {
this.strategyActions = strategyActions;
}
public String getPrincipals() {
return principals;
}
public void setPrincipals(String principals) {
this.principals = principals;
}
@Override
public String toString() {
return "AlarmRuleDO{" +
"alarmName='" + alarmName + '\'' +
", strategyExpressions='" + strategyExpressions + '\'' +
", strategyFilters='" + strategyFilters + '\'' +
", strategyActions='" + strategyActions + '\'' +
", principals='" + principals + '\'' +
", id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -0,0 +1,59 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
import java.util.Date;
/**
* @author arthur
* @date 2017/7/25.
*/
public class BaseDO {
protected Long id;
protected Integer status;
protected Date gmtCreate;
protected Date gmtModify;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
public Date getGmtCreate() {
return gmtCreate;
}
public void setGmtCreate(Date gmtCreate) {
this.gmtCreate = gmtCreate;
}
public Date getGmtModify() {
return gmtModify;
}
public void setGmtModify(Date gmtModify) {
this.gmtModify = gmtModify;
}
@Override
public String toString() {
return "BaseDO{" +
"id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -0,0 +1,37 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
import java.util.Date;
/**
* @author zengqiao
* @date 19/11/25
*/
public abstract class BaseEntryDO {
protected Long id;
protected Date gmtCreate;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public Date getGmtCreate() {
return gmtCreate;
}
public void setGmtCreate(Date gmtCreate) {
this.gmtCreate = gmtCreate;
}
@Override
public String toString() {
return "BaseEntryDO{" +
"id=" + id +
", gmtCreate=" + gmtCreate +
'}';
}
}

View File

@@ -0,0 +1,72 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
/**
* @author zengqiao
* @date 19/4/3
*/
public class BrokerDO extends BaseDO {
private Long clusterId;
private Integer brokerId;
private String host;
private Integer port;
private Long timestamp;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Integer getPort() {
return port;
}
public void setPort(Integer port) {
this.port = port;
}
public Long getTimestamp() {
return timestamp;
}
public void setTimestamp(Long timestamp) {
this.timestamp = timestamp;
}
@Override
public String toString() {
return "BrokerDO{" +
"clusterId=" + clusterId +
", brokerId=" + brokerId +
", host='" + host + '\'' +
", port=" + port +
", timestamp=" + timestamp +
", id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -0,0 +1,127 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
import java.util.Date;
public class ClusterDO extends BaseDO{
private String clusterName;
private String zookeeper;
private String bootstrapServers;
private String kafkaVersion;
private Integer alarmFlag;
private String securityProtocol;
private String saslMechanism;
private String saslJaasConfig;
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
public String getZookeeper() {
return zookeeper;
}
public void setZookeeper(String zookeeper) {
this.zookeeper = zookeeper;
}
public String getBootstrapServers() {
return bootstrapServers;
}
public void setBootstrapServers(String bootstrapServers) {
this.bootstrapServers = bootstrapServers;
}
public String getKafkaVersion() {
return kafkaVersion;
}
public void setKafkaVersion(String kafkaVersion) {
this.kafkaVersion = kafkaVersion;
}
public Integer getAlarmFlag() {
return alarmFlag;
}
public void setAlarmFlag(Integer alarmFlag) {
this.alarmFlag = alarmFlag;
}
public String getSecurityProtocol() {
return securityProtocol;
}
public void setSecurityProtocol(String securityProtocol) {
this.securityProtocol = securityProtocol;
}
public String getSaslMechanism() {
return saslMechanism;
}
public void setSaslMechanism(String saslMechanism) {
this.saslMechanism = saslMechanism;
}
public String getSaslJaasConfig() {
return saslJaasConfig;
}
public void setSaslJaasConfig(String saslJaasConfig) {
this.saslJaasConfig = saslJaasConfig;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
public Date getGmtCreate() {
return gmtCreate;
}
public void setGmtCreate(Date gmtCreate) {
this.gmtCreate = gmtCreate;
}
public Date getGmtModify() {
return gmtModify;
}
public void setGmtModify(Date gmtModify) {
this.gmtModify = gmtModify;
}
@Override
public String toString() {
return "ClusterDO{" +
"clusterName='" + clusterName + '\'' +
", zookeeper='" + zookeeper + '\'' +
", bootstrapServers='" + bootstrapServers + '\'' +
", kafkaVersion='" + kafkaVersion + '\'' +
", alarmFlag=" + alarmFlag +
", securityProtocol='" + securityProtocol + '\'' +
", saslMechanism='" + saslMechanism + '\'' +
", saslJaasConfig='" + saslJaasConfig + '\'' +
", id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -0,0 +1,110 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
public class ClusterMetricsDO extends BaseEntryDO {
private Long clusterId;
private Integer topicNum = 0;
private Integer partitionNum = 0;
private Integer brokerNum = 0;
private Double bytesInPerSec = 0.0;
private Double bytesOutPerSec = 0.0;
private Double bytesRejectedPerSec = 0.0;
private Double messagesInPerSec = 0.0;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public Integer getTopicNum() {
return topicNum;
}
public void setTopicNum(Integer topicNum) {
this.topicNum = topicNum;
}
public Integer getPartitionNum() {
return partitionNum;
}
public void setPartitionNum(Integer partitionNum) {
this.partitionNum = partitionNum;
}
public Integer getBrokerNum() {
return brokerNum;
}
public void setBrokerNum(Integer brokerNum) {
this.brokerNum = brokerNum;
}
public Double getBytesInPerSec() {
return bytesInPerSec;
}
public void setBytesInPerSec(Double bytesInPerSec) {
this.bytesInPerSec = bytesInPerSec;
}
public Double getBytesOutPerSec() {
return bytesOutPerSec;
}
public void setBytesOutPerSec(Double bytesOutPerSec) {
this.bytesOutPerSec = bytesOutPerSec;
}
public Double getBytesRejectedPerSec() {
return bytesRejectedPerSec;
}
public void setBytesRejectedPerSec(Double bytesRejectedPerSec) {
this.bytesRejectedPerSec = bytesRejectedPerSec;
}
public Double getMessagesInPerSec() {
return messagesInPerSec;
}
public void setMessagesInPerSec(Double messagesInPerSec) {
this.messagesInPerSec = messagesInPerSec;
}
public void addBrokerMetrics(BrokerMetrics brokerMetrics) {
this.clusterId = brokerMetrics.getClusterId();
this.brokerNum += 1;
this.bytesInPerSec += brokerMetrics.getBytesInPerSec();
this.bytesOutPerSec += brokerMetrics.getBytesOutPerSec();
this.bytesRejectedPerSec += brokerMetrics.getBytesRejectedPerSec();
this.messagesInPerSec += brokerMetrics.getMessagesInPerSec();
}
@Override
public String toString() {
return "ClusterMetricsDO{" +
"clusterId=" + clusterId +
", topicNum=" + topicNum +
", partitionNum=" + partitionNum +
", brokerNum=" + brokerNum +
", bytesInPerSec=" + bytesInPerSec +
", bytesOutPerSec=" + bytesOutPerSec +
", bytesRejectedPerSec=" + bytesRejectedPerSec +
", messagesInPerSec=" + messagesInPerSec +
", id=" + id +
", gmtCreate=" + gmtCreate +
'}';
}
}

View File

@@ -0,0 +1,84 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
/**
* @author zengqiao
* @date 20/2/28
*/
public class ControllerDO extends BaseEntryDO {
private Long clusterId;
private Integer brokerId;
private String host;
private Long timestamp;
private Integer version;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Long getTimestamp() {
return timestamp;
}
public void setTimestamp(Long timestamp) {
this.timestamp = timestamp;
}
public Integer getVersion() {
return version;
}
public void setVersion(Integer version) {
this.version = version;
}
@Override
public String toString() {
return "ControllerDO{" +
"id=" + id +
", clusterId=" + clusterId +
", brokerId=" + brokerId +
", host='" + host + '\'' +
", timestamp=" + timestamp +
", version=" + version +
", gmtCreate=" + gmtCreate +
'}';
}
public static ControllerDO newInstance(Long clusterId,
Integer brokerId,
String host,
Long timestamp,
Integer version) {
ControllerDO controllerDO = new ControllerDO();
controllerDO.setClusterId(clusterId);
controllerDO.setBrokerId(brokerId);
controllerDO.setHost(host == null? "": host);
controllerDO.setTimestamp(timestamp);
controllerDO.setVersion(version);
return controllerDO;
}
}

View File

@@ -0,0 +1,96 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
/**
* migrate topic task do
* @author zengqiao
* @date 19/4/16
*/
public class MigrationTaskDO extends BaseDO {
private Long clusterId;
private String topicName;
private String reassignmentJson;
private Long throttle;
private String operator;
private String description;
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getOperator() {
return operator;
}
public void setOperator(String operator) {
this.operator = operator;
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public String getReassignmentJson() {
return reassignmentJson;
}
public void setReassignmentJson(String reassignmentJson) {
this.reassignmentJson = reassignmentJson;
}
public Long getThrottle() {
return throttle;
}
public void setThrottle(Long throttle) {
this.throttle = throttle;
}
@Override
public String toString() {
return "MigrationTaskDO{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", reassignmentJson='" + reassignmentJson + '\'' +
", throttle=" + throttle +
", id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
public static MigrationTaskDO createInstance(Long clusterId,
String topicName,
String reassignmentJson,
Long throttle,
String description) {
MigrationTaskDO migrationTaskDO = new MigrationTaskDO();
migrationTaskDO.setClusterId(clusterId);
migrationTaskDO.setTopicName(topicName);
migrationTaskDO.setReassignmentJson(reassignmentJson);
migrationTaskDO.setThrottle(throttle);
migrationTaskDO.setDescription(description);
return migrationTaskDO;
}
}

View File

@@ -0,0 +1,64 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
public class OperationHistoryDO extends BaseEntryDO {
private Long clusterId;
private String topicName;
private String operator;
private String operation;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public String getOperator() {
return operator;
}
public void setOperator(String operator) {
this.operator = operator;
}
public String getOperation() {
return operation;
}
public void setOperation(String operation) {
this.operation = operation;
}
@Override
public String toString() {
return "OperationHistoryDO{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", operator='" + operator + '\'' +
", operation='" + operation + '\'' +
", id=" + id +
", gmtCreate=" + gmtCreate +
'}';
}
public static OperationHistoryDO newInstance(Long clusterId, String topicName, String operator, String operation) {
OperationHistoryDO operationHistoryDO = new OperationHistoryDO();
operationHistoryDO.setClusterId(clusterId);
operationHistoryDO.setTopicName(topicName);
operationHistoryDO.setOperator(operator);
operationHistoryDO.setOperation(operation);
return operationHistoryDO;
}
}

View File

@@ -0,0 +1,112 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
public class OrderPartitionDO extends BaseDO{
private Long clusterId;
private String clusterName;
private String topicName;
private String applicant;
private Long peakBytesIn;
private String description;
private Integer orderStatus;
private String approver;
private String opinion;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public String getApplicant() {
return applicant;
}
public void setApplicant(String applicant) {
this.applicant = applicant;
}
public Long getPeakBytesIn() {
return peakBytesIn;
}
public void setPeakBytesIn(Long peakBytesIn) {
this.peakBytesIn = peakBytesIn;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Integer getOrderStatus() {
return orderStatus;
}
public void setOrderStatus(Integer orderStatus) {
this.orderStatus = orderStatus;
}
public String getApprover() {
return approver;
}
public void setApprover(String approver) {
this.approver = approver;
}
public String getOpinion() {
return opinion;
}
public void setOpinion(String opinion) {
this.opinion = opinion;
}
@Override
public String toString() {
return "OrderPartitionDO{" +
"clusterId=" + clusterId +
", clusterName='" + clusterName + '\'' +
", topicName='" + topicName + '\'' +
", applicant='" + applicant + '\'' +
", peakBytesIn=" + peakBytesIn +
", description='" + description + '\'' +
", orderStatus=" + orderStatus +
", approver='" + approver + '\'' +
", opinion='" + opinion + '\'' +
", id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -0,0 +1,178 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
public class OrderTopicDO extends BaseDO {
private Long clusterId;
private String clusterName;
private String topicName;
private Long retentionTime;
private Integer partitionNum;
private Integer replicaNum;
private String regions;
private String brokers;
private Long peakBytesIn;
private String applicant;
private String principals;
private String description;
private Integer orderStatus;
private String approver;
private String opinion;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Long getRetentionTime() {
return retentionTime;
}
public void setRetentionTime(Long retentionTime) {
this.retentionTime = retentionTime;
}
public Integer getPartitionNum() {
return partitionNum;
}
public void setPartitionNum(Integer partitionNum) {
this.partitionNum = partitionNum;
}
public Integer getReplicaNum() {
return replicaNum;
}
public void setReplicaNum(Integer replicaNum) {
this.replicaNum = replicaNum;
}
public String getRegions() {
return regions;
}
public void setRegions(String regions) {
this.regions = regions;
}
public String getBrokers() {
return brokers;
}
public void setBrokers(String brokers) {
this.brokers = brokers;
}
public Long getPeakBytesIn() {
return peakBytesIn;
}
public void setPeakBytesIn(Long peakBytesIn) {
this.peakBytesIn = peakBytesIn;
}
public String getApplicant() {
return applicant;
}
public void setApplicant(String applicant) {
this.applicant = applicant;
}
public String getPrincipals() {
return principals;
}
public void setPrincipals(String principals) {
this.principals = principals;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Integer getOrderStatus() {
return orderStatus;
}
public void setOrderStatus(Integer orderStatus) {
this.orderStatus = orderStatus;
}
public String getApprover() {
return approver;
}
public void setApprover(String approver) {
this.approver = approver;
}
public String getOpinion() {
return opinion;
}
public void setOpinion(String opinion) {
this.opinion = opinion;
}
@Override
public String toString() {
return "OrderTopicDO{" +
"clusterId=" + clusterId +
", clusterName='" + clusterName + '\'' +
", topicName='" + topicName + '\'' +
", retentionTime=" + retentionTime +
", partitionNum=" + partitionNum +
", replicaNum=" + replicaNum +
", regions='" + regions + '\'' +
", brokers='" + brokers + '\'' +
", peakBytesIn=" + peakBytesIn +
", applicant='" + applicant + '\'' +
", principals='" + principals + '\'' +
", description='" + description + '\'' +
", orderStatus=" + orderStatus +
", approver='" + approver + '\'' +
", opinion='" + opinion + '\'' +
", id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -0,0 +1,63 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
public class RegionDO extends BaseDO{
private String regionName;
private Long clusterId;
private String brokerList;
private Integer level;
private String description;
private String operator;
public String getRegionName() {
return regionName;
}
public void setRegionName(String regionName) {
this.regionName = regionName;
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getBrokerList() {
return brokerList;
}
public void setBrokerList(String brokerList) {
this.brokerList = brokerList;
}
public Integer getLevel() {
return level;
}
public void setLevel(Integer level) {
this.level = level;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getOperator() {
return operator;
}
public void setOperator(String operator) {
this.operator = operator;
}
}

View File

@@ -0,0 +1,68 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
public class TopicDO extends BaseDO{
private Long clusterId;
private String topicName;
private String applicant;
private String principals;
private String description;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public String getApplicant() {
return applicant;
}
public void setApplicant(String applicant) {
this.applicant = applicant;
}
public String getPrincipals() {
return principals;
}
public void setPrincipals(String principals) {
this.principals = principals;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Override
public String toString() {
return "TopicDO{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", applicant='" + applicant + '\'' +
", principals='" + principals + '\'' +
", description='" + description + '\'' +
", id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -0,0 +1,46 @@
package com.xiaojukeji.kafka.manager.common.entity.po;
public class TopicFavoriteDO extends BaseDO{
private String username;
private Long clusterId;
private String topicName;
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
@Override
public String toString() {
return "TopicFavoriteDO{" +
"username='" + username + '\'' +
", clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", id=" + id +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -0,0 +1,17 @@
package com.xiaojukeji.kafka.manager.common.entity.po.query;
/**
* @author zengqiao
* @date 19/12/2
*/
public class AlarmRuleQueryOption extends BaseQueryOption {
private String alarmName;
public String getAlarmName() {
return alarmName;
}
public void setAlarmName(String alarmName) {
this.alarmName = alarmName;
}
}

View File

@@ -0,0 +1,24 @@
package com.xiaojukeji.kafka.manager.common.entity.po.query;
/**
* @author zengqiao
* @date 19/12/2
*/
public class BaseQueryOption {
protected Long id;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
@Override
public String toString() {
return "BaseQueryOption{" +
"id=" + id +
'}';
}
}

View File

@@ -0,0 +1,17 @@
package com.xiaojukeji.kafka.manager.common.entity.po.query;
/**
* @author zengqiao
* @date 19/12/4
*/
public class ClusterQueryOption extends BaseQueryOption {
private String clusterName;
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
}

View File

@@ -0,0 +1,132 @@
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
/**
* @author zengqiao
* @date 19/4/3
*
* 存储Broker的元信息, 元信息对应的ZK节点是/brokers/ids/{brokerId}
* 节点结构:
* {
* "listener_security_protocol_map":{"SASL_PLAINTEXT":"SASL_PLAINTEXT"},
* "endpoints":["SASL_PLAINTEXT://10.179.162.202:9093"],
* "jmx_port":9999,
* "host":null,
* "timestamp":"1546632983233",
* "port":-1,
* "version":4
* }
*/
public class BrokerMetadata implements Cloneable {
private final static Logger LOGGER = LoggerFactory.getLogger(TopicMetadata.class);
private long clusterId;
private int brokerId;
private List<String> endpoints;
private String host;
private int port;
//zk上字段对应
private int jmx_port;
private String version;
private long timestamp;
public long getClusterId() {
return clusterId;
}
public void setClusterId(long clusterId) {
this.clusterId = clusterId;
}
public int getBrokerId() {
return brokerId;
}
public void setBrokerId(int brokerId) {
this.brokerId = brokerId;
}
public List<String> getEndpoints() {
return endpoints;
}
public void setEndpoints(List<String> endpoints) {
this.endpoints = endpoints;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
public int getJmxPort() {
return jmx_port;
}
public void setJmxPort(int jmxPort) {
this.jmx_port = jmxPort;
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public long getTimestamp() {
return timestamp;
}
public void setTimestamp(long timestamp) {
this.timestamp = timestamp;
}
@Override
public Object clone() {
try {
return super.clone();
} catch (CloneNotSupportedException var3) {
LOGGER.error("clone BrokerMetadata failed.", var3);
}
return null;
}
@Override
public String toString() {
return "BrokerMetadata{" +
"clusterId=" + clusterId +
", brokerId=" + brokerId +
", endpoints=" + endpoints +
", host='" + host + '\'' +
", port=" + port +
", jmxPort=" + jmx_port +
", version='" + version + '\'' +
", timestamp=" + timestamp +
'}';
}
}

View File

@@ -0,0 +1,46 @@
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
/**
* @author zengqiao
* @date 19/4/22
*/
public class ControllerData {
private Integer brokerid;
private Integer version;
private Long timestamp;
public Integer getBrokerid() {
return brokerid;
}
public void setBrokerid(Integer brokerid) {
this.brokerid = brokerid;
}
public Integer getVersion() {
return version;
}
public void setVersion(Integer version) {
this.version = version;
}
public Long getTimestamp() {
return timestamp;
}
public void setTimestamp(Long timestamp) {
this.timestamp = timestamp;
}
@Override
public String toString() {
return "ControllerData{" +
"brokerid=" + brokerid +
", version=" + version +
", timestamp=" + timestamp +
'}';
}
}

View File

@@ -0,0 +1,44 @@
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
import java.io.Serializable;
import java.util.List;
import java.util.Map;
/**
* 根据/brokers/topics/topic的节点内容定义
* @author tukun
* @date 2015/11/10.
*/
public class PartitionMap implements Serializable {
/**
* 版本号
*/
private int version;
/**
* Map<PartitionId副本所在的brokerId列表>
*/
private Map<Integer, List<Integer>> partitions;
public int getVersion() {
return version;
}
public void setVersion(int version) {
this.version = version;
}
public Map<Integer, List<Integer>> getPartitions() {
return partitions;
}
public void setPartitions(Map<Integer, List<Integer>> partitions) {
this.partitions = partitions;
}
@Override
public String toString() {
return "PartitionMap{" + "version=" + version + ", partitions=" + partitions + '}';
}
}

View File

@@ -0,0 +1,177 @@
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
import java.util.ArrayList;
import java.util.List;
/**
* PartitionState实例
* 对应zookeeper下的state节点信息以及partition的其它信息
* @author tukun
* @date 2015/11/10.
*/
public class PartitionState implements Cloneable {
/**
* partition id
*/
private int partitionId;
/**
* kafka集群中的中央控制器选举次数
*/
private int controller_epoch;
/**
* Partition所属的leader broker编号
*/
private int leader;
/**
* partition的版本号
*/
private int version;
/**
* 该partition leader选举次数
*/
private int leader_epoch;
/**
* 同步副本组brokerId列表
*/
private List<Integer> isr;
/**
* 是否处于复制同步状态
*/
private boolean isUnderReplicated;
/**
* Partition的offset
*/
private long offset;
/**
* 被消费的offset
*/
private long consumeOffset;
/**
* 消费者对应的消费group
*/
private String consumerGroup;
public int getPartitionId() {
return partitionId;
}
public void setPartitionId(int partitionId) {
this.partitionId = partitionId;
}
public int getControllerEpoch() {
return controller_epoch;
}
public void setControllerEpoch(int controllerEpoch) {
this.controller_epoch = controllerEpoch;
}
public int getLeader() {
return leader;
}
public void setLeader(int leader) {
this.leader = leader;
}
public int getVersion() {
return version;
}
public void setVersion(int version) {
this.version = version;
}
public int getLeaderEpoch() {
return leader_epoch;
}
public void setLeaderEpoch(int leaderEpoch) {
this.leader_epoch = leaderEpoch;
}
public List<Integer> getIsr() {
return isr;
}
public void setIsr(List<Integer> isr) {
this.isr = isr;
}
public boolean isUnderReplicated() {
return isUnderReplicated;
}
public void setUnderReplicated(boolean underReplicated) {
isUnderReplicated = underReplicated;
}
public long getOffset() {
return offset;
}
public void setOffset(long offset) {
this.offset = offset;
}
public long getConsumeOffset() {
return consumeOffset;
}
public void setConsumeOffset(long consumeOffset) {
this.consumeOffset = consumeOffset;
}
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
@Override
public String toString() {
return "PartitionState{" +
"partitionId=" + partitionId +
", controller_epoch=" + controller_epoch +
", leader=" + leader +
", version=" + version +
", leader_epoch=" + leader_epoch +
", isr=" + isr +
", isUnderReplicated=" + isUnderReplicated +
", offset=" + offset +
", consumeOffset=" + consumeOffset +
", consumerGroup='" + consumerGroup + '\'' +
'}';
}
@Override
public PartitionState clone() {
try {
PartitionState partitionState = (PartitionState) super.clone();
partitionState.setPartitionId(this.partitionId);
partitionState.setControllerEpoch(this.controller_epoch);
partitionState.setLeader(this.leader);
partitionState.setVersion(this.version);
partitionState.setLeaderEpoch(this.leader_epoch);
partitionState.setIsr(new ArrayList<>(this.isr));
partitionState.setOffset(this.offset);
partitionState.setConsumeOffset(this.consumeOffset);
partitionState.setConsumerGroup(this.consumerGroup);
return partitionState;
} catch (CloneNotSupportedException e) {
}
return null;
}
}

View File

@@ -0,0 +1,48 @@
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* @author zengqiao
* @date 20/1/15
*/
public class ReassignmentDTO {
private Integer version;
private List<Map<String, String>> topics;
public ReassignmentDTO(Integer version, String topicName) {
this.version = version;
Map<String, String> topic = new HashMap<>();
topic.put("topic", topicName);
topics = new ArrayList<>();
topics.add(topic);
}
public Integer getVersion() {
return version;
}
public void setVersion(Integer version) {
this.version = version;
}
public List<Map<String, String>> getTopics() {
return topics;
}
public void setTopics(List<Map<String, String>> topics) {
this.topics = topics;
}
@Override
public String toString() {
return "ReassignmentDTO{" +
"version=" + version +
", topics=" + topics +
'}';
}
}

View File

@@ -0,0 +1,48 @@
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
import java.util.List;
/**
* @author zengqiao
* @date 20/1/15
*/
public class ReassignmentElemDTO {
private String topic;
private Integer partition;
private List<Integer> replicas;
public String getTopic() {
return topic;
}
public void setTopic(String topic) {
this.topic = topic;
}
public Integer getPartition() {
return partition;
}
public void setPartition(Integer partition) {
this.partition = partition;
}
public List<Integer> getReplicas() {
return replicas;
}
public void setReplicas(List<Integer> replicas) {
this.replicas = replicas;
}
@Override
public String toString() {
return "ReassignmentElemDTO{" +
"topic='" + topic + '\'' +
", partition=" + partition +
", replicas=" + replicas +
'}';
}
}

View File

@@ -0,0 +1,37 @@
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
import java.util.List;
/**
* @author zengqiao
* @date 20/1/15
*/
public class ReassignmentJsonDTO {
private Integer version;
private List<ReassignmentElemDTO> partitions;
public Integer getVersion() {
return version;
}
public void setVersion(Integer version) {
this.version = version;
}
public List<ReassignmentElemDTO> getPartitions() {
return partitions;
}
public void setPartitions(List<ReassignmentElemDTO> partitions) {
this.partitions = partitions;
}
@Override
public String toString() {
return "ReassignmentJsonDTO{" +
"version=" + version +
", partitions=" + partitions +
'}';
}
}

View File

@@ -0,0 +1,93 @@
package com.xiaojukeji.kafka.manager.common.entity.zookeeper;
import java.util.Set;
/**
* 存储Topic的元信息, 元信息对应的ZK节点是/brokers/topics/${topicName}
* @author zengqiao
* @date 19/4/3
*/
public class TopicMetadata implements Cloneable {
private String topic; //topic名称
private PartitionMap partitionMap; //partition所在的Broker
private Set<Integer> brokerIdSet; //topic所在的broker, 由partitionMap获取得到
private int replicaNum; //副本数
private int partitionNum; //分区数
private long modifyTime; //修改节点的时间
private long createTime; //创建节点的时间
public String getTopic() {
return topic;
}
public void setTopic(String topic) {
this.topic = topic;
}
public int getReplicaNum() {
return replicaNum;
}
public void setReplicaNum(int replicaNum) {
this.replicaNum = replicaNum;
}
public PartitionMap getPartitionMap() {
return partitionMap;
}
public void setPartitionMap(PartitionMap partitionMap) {
this.partitionMap = partitionMap;
}
public Set<Integer> getBrokerIdSet() {
return brokerIdSet;
}
public void setBrokerIdSet(Set<Integer> brokerIdSet) {
this.brokerIdSet = brokerIdSet;
}
public int getPartitionNum() {
return partitionNum;
}
public void setPartitionNum(int partitionNum) {
this.partitionNum = partitionNum;
}
public long getModifyTime() {
return modifyTime;
}
public void setModifyTime(long modifyTime) {
this.modifyTime = modifyTime;
}
public long getCreateTime() {
return createTime;
}
public void setCreateTime(long createTime) {
this.createTime = createTime;
}
@Override
public String toString() {
return "TopicMetadata{" +
"topic='" + topic + '\'' +
", partitionMap=" + partitionMap +
", brokerIdSet=" + brokerIdSet +
", replicaNum=" + replicaNum +
", partitionNum=" + partitionNum +
", modifyTime=" + modifyTime +
", createTime=" + createTime +
'}';
}
}

View File

@@ -0,0 +1,25 @@
package com.xiaojukeji.kafka.manager.common.exception;
/**
* @author limeng
* @date 2017/12/22
*/
public class ConfigException extends Exception {
private static final long serialVersionUID = -3670649722021947735L;
public ConfigException(Throwable cause) {
super(cause);
}
public ConfigException(String message, Throwable cause) {
super(message, cause);
}
public ConfigException(String message) {
super(message);
}
public ConfigException() {
}
}

View File

@@ -0,0 +1,25 @@
package com.xiaojukeji.kafka.manager.common.exception;
/**
* @author huangyiminghappy@163.com
* @date 2019/3/15
*/
public class CopyException extends RuntimeException {
private final static long serialVersionUID = 1L;
public CopyException(String message) {
super(message);
}
public CopyException(String message, Throwable cause) {
super(message, cause);
}
public CopyException(Throwable cause) {
super(cause);
}
public CopyException() {
super();
}
}

View File

@@ -0,0 +1,482 @@
package com.xiaojukeji.kafka.manager.common.utils;
import com.xiaojukeji.kafka.manager.common.exception.CopyException;
import org.apache.commons.beanutils.PropertyUtils;
import java.beans.PropertyDescriptor;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
/**
* 对象复制新类型和同类型深度克隆工具类
* @author huangyiminghappy@163.com
* @date 2019/3/15
*/
public class CopyUtils {
@SuppressWarnings({"unchecked", "rawtypes"})
public static <T> T deepCopy(T obj) {
if (obj == null) {
return null;
} else if (obj instanceof String) {
return (T)(String) obj;
} else if (obj instanceof Integer) {
return (T)(Integer) obj;
} else if (obj instanceof Double) {
return (T)(Double) obj;
} else if (obj instanceof Byte) {
return (T)(Byte) obj;
} else if (obj instanceof Short) {
return (T)(Short) obj;
} else if (obj instanceof Long) {
return (T)(Long) obj;
} else if (obj instanceof Float) {
return (T)(Float) obj;
} else if (obj instanceof Character) {
return (T)(Character) obj;
} else if (obj instanceof Boolean) {
return (T)(Boolean) obj;
} else if (obj instanceof ArrayList<?>) {
return (T) arrayListHandler((ArrayList<?>) obj);
} else if (obj instanceof HashMap<?, ?>) {
return (T) mapHandler((Map<?, ?>) obj);
} else if (obj instanceof ConcurrentHashMap<?, ?>) {
return (T) concurrentMapHandler((Map<?, ?>) obj);
} else if (obj instanceof TreeMap<?, ?>) {
return (T) treeMapHandler((Map<?, ?>) obj);
} else if (obj instanceof LinkedList<?>) {
return (T) linkedListHandler((LinkedList<?>) obj);
} else if (obj instanceof HashSet<?>) {
return (T) hashSetHandler((HashSet<?>) obj);
} else if (isPrimitiveArray(obj)) {
return getPrimitiveArray(obj);
}
T finObj = null;
Class rezClass = obj.getClass();
rezClass.cast(finObj);
try {
Constructor<T> constructor = getCompleteConstructor(rezClass);
finObj = (T) constructor.newInstance(getParamsObjForConstructor(rezClass));
copyFields(rezClass, obj, finObj);
} catch (Exception e) {
e.printStackTrace();
}
return finObj;
}
@SuppressWarnings({"unchecked", "rawtypes"})
private static <T> T deepCopy(T obj, Object parrent) {
if (obj == null) {
return null;
} else if (obj instanceof String) {
return (T)String.valueOf((String) obj);
} else if (obj instanceof Integer) {
return (T)Integer.valueOf((Integer) obj);
} else if (obj instanceof Double) {
return (T)Double.valueOf((Double) obj);
} else if (obj instanceof Byte) {
return (T)Byte.valueOf((Byte) obj);
} else if (obj instanceof Short) {
return (T)Short.valueOf((Short) obj);
} else if (obj instanceof Long) {
return (T)Long.valueOf((Long) obj);
} else if (obj instanceof Float) {
return (T)Float.valueOf((Float) obj);
} else if (obj instanceof Character) {
return (T)Character.valueOf((Character) obj);
} else if (obj instanceof Boolean) {
return (T)Boolean.valueOf((Boolean) obj);
} else if (obj instanceof ArrayList<?>) {
return (T) arrayListHandler((ArrayList<?>) obj);
} else if (obj instanceof HashMap<?, ?>) {
return (T) mapHandler((Map<?, ?>) obj);
} else if (obj instanceof ConcurrentHashMap<?, ?>) {
return (T) concurrentMapHandler((Map<?, ?>) obj);
} else if (obj instanceof TreeMap<?, ?>) {
return (T) treeMapHandler((Map<?, ?>) obj);
} else if (obj instanceof LinkedList<?>) {
return (T) linkedListHandler((LinkedList<?>) obj);
} else if (obj instanceof HashSet<?>) {
return (T) hashSetHandler((HashSet<?>) obj);
} else if (isPrimitiveArray(obj)) {
return getPrimitiveArray(obj);
}
T finObj = null;
Class rezClass = obj.getClass();
rezClass.cast(finObj);
try {
Constructor<T> constructor = getCompleteConstructor(rezClass);
finObj = (T) constructor.newInstance(getParamsObjForConstructor(rezClass));
copyFields(rezClass, obj, finObj, parrent);
} catch (Exception e) {
e.printStackTrace();
}
return finObj;
}
@SuppressWarnings({"rawtypes", "unchecked"})
private static ArrayList<?> arrayListHandler(ArrayList<?> obj) {
ArrayList srcList = obj;
ArrayList finList = new ArrayList();
for (int i = 0; i < srcList.size(); i++) {
finList.add(CopyUtils.deepCopy(srcList.get(i)));
}
return finList;
}
@SuppressWarnings({"unchecked", "rawtypes"})
private static <K, V> Map<K, V> mapHandler(Map<K, V> obj) {
Map<K, V> src = obj;
Map<K, V> fin = new HashMap<K, V>();
for (Map.Entry entry : src.entrySet()) {
K key = (K) CopyUtils.deepCopy(entry.getKey());
V value = (V) CopyUtils.deepCopy(entry.getValue());
fin.put(key, value);
}
return fin;
}
@SuppressWarnings({"unchecked", "rawtypes"})
private static <K, V> Map<K, V> concurrentMapHandler(Map<K, V> obj) {
Map<K, V> src = obj;
Map<K, V> fin = new ConcurrentHashMap<K, V>();
for (Map.Entry entry : src.entrySet()) {
K key = (K) CopyUtils.deepCopy(entry.getKey());
V value = (V) CopyUtils.deepCopy(entry.getValue());
fin.put(key, value);
}
return fin;
}
@SuppressWarnings({"unchecked", "rawtypes"})
private static <K, V> Map<K, V> treeMapHandler(Map<K, V> obj) {
Map<K, V> src = obj;
Map<K, V> fin = new TreeMap<K, V>();
for (Map.Entry entry : src.entrySet()) {
K key = (K) CopyUtils.deepCopy(entry.getKey());
V value = (V) CopyUtils.deepCopy(entry.getValue());
fin.put(key, value);
}
return fin;
}
@SuppressWarnings({"rawtypes", "unchecked"})
private static LinkedList<?> linkedListHandler(LinkedList<?> obj) {
LinkedList srcList = obj;
LinkedList finList = new LinkedList<>();
for (int i = 0; i < srcList.size(); i++) {
finList.add(CopyUtils.deepCopy(srcList.get(i)));
}
return finList;
}
@SuppressWarnings({"rawtypes", "unchecked"})
private static HashSet<?> hashSetHandler(HashSet<?> obj) {
HashSet srcList = obj;
HashSet finList = new HashSet<>();
for (Object o : srcList) {
finList.add(CopyUtils.deepCopy(o));
}
return finList;
}
private static boolean isPrimitiveArray(Object obj) {
if (obj instanceof byte[] ||
obj instanceof short[] ||
obj instanceof int[] ||
obj instanceof long[] ||
obj instanceof float[] ||
obj instanceof double[] ||
obj instanceof char[] ||
obj instanceof boolean[]) {
return true;
} else {
return false;
}
}
private static boolean isPrimitiveArray(String type) {
if ("byte[]".equals(type) ||
"short[]".equals(type) ||
"int[]".equals(type) ||
"long[]".equals(type) ||
"float[]".equals(type) ||
"double[]".equals(type) ||
"char[]".equals(type) ||
"boolean[]".equals(type)) {
return true;
} else {
return false;
}
}
@SuppressWarnings("unchecked")
private static <T> T getPrimitiveArray(T obj) {
if (obj instanceof int[]) {
int[] arr = new int[((int[]) obj).length];
for (int i = 0; i < ((int[]) obj).length; i++) {
arr[i] = ((int[]) obj)[i];
}
return (T) arr;
} else if (obj instanceof byte[]) {
byte[] arr = new byte[((byte[]) obj).length];
for (int i = 0; i < ((byte[]) obj).length; i++) {
arr[i] = ((byte[]) obj)[i];
}
return (T) arr;
} else if (obj instanceof short[]) {
short[] arr = new short[((short[]) obj).length];
for (int i = 0; i < ((short[]) obj).length; i++) {
arr[i] = ((short[]) obj)[i];
}
return (T) arr;
} else if (obj instanceof long[]) {
long[] arr = new long[((long[]) obj).length];
for (int i = 0; i < ((long[]) obj).length; i++) {
arr[i] = ((long[]) obj)[i];
}
return (T) arr;
} else if (obj instanceof float[]) {
float[] arr = new float[((float[]) obj).length];
for (int i = 0; i < ((float[]) obj).length; i++) {
arr[i] = ((float[]) obj)[i];
}
return (T) arr;
} else if (obj instanceof double[]) {
double[] arr = new double[((double[]) obj).length];
for (int i = 0; i < ((double[]) obj).length; i++) {
arr[i] = ((double[]) obj)[i];
}
return (T) arr;
} else if (obj instanceof char[]) {
char[] arr = new char[((char[]) obj).length];
for (int i = 0; i < ((char[]) obj).length; i++) {
arr[i] = ((char[]) obj)[i];
}
return (T) arr;
} else if (obj instanceof boolean[]) {
boolean[] arr = new boolean[((boolean[]) obj).length];
for (int i = 0; i < ((boolean[]) obj).length; i++) {
arr[i] = ((boolean[]) obj)[i];
}
return (T) arr;
}
return null;
}
@SuppressWarnings("unchecked")
private static <T> T getPrimitiveArray(T obj, String type) {
if ("int[]".equals(type)) {
int[] arr = new int[1];
arr[0] = 0;
return (T) arr;
} else if ("byte[]".equals(type)) {
byte[] arr = new byte[1];
arr[0] = 0;
return (T) arr;
} else if ("short[]".equals(type)) {
short[] arr = new short[1];
arr[0] = 0;
return (T) arr;
} else if ("long[]".equals(type)) {
long[] arr = new long[1];
arr[0] = 0;
return (T) arr;
} else if ("float[]".equals(type)) {
float[] arr = new float[1];
arr[0] = 0;
return (T) arr;
} else if ("double[]".equals(type)) {
double[] arr = new double[1];
arr[0] = 0;
return (T) arr;
} else if ("char[]".equals(type)) {
char[] arr = new char[1];
arr[0] = 0;
return (T) arr;
} else if ("boolean[]".equals(type)) {
boolean[] arr = new boolean[1];
arr[0] = false;
return (T) arr;
}
return null;
}
@SuppressWarnings({"unchecked", "rawtypes"})
private static Constructor getCompleteConstructor(Class ourClass)
throws NoSuchMethodException, SecurityException {
Constructor constructor = null;
Class[] params = new Class[ourClass.getDeclaredConstructors()[0].getParameterTypes().length];
for (int i = 0; i < ourClass.getDeclaredConstructors()[0].getParameterTypes().length; i++) {
params[i] = ourClass.getDeclaredConstructors()[0].getParameterTypes()[i];
}
constructor = ourClass.getConstructor(params);
constructor.setAccessible(true);
return constructor;
}
@SuppressWarnings("rawtypes")
private static Object[] getParamsObjForConstructor(Class ourClass)
throws NoSuchMethodException, SecurityException {
Constructor constuctor = null;
constuctor = ourClass.getDeclaredConstructors()[0];
constuctor.setAccessible(true);
Object[] objParams = new Object[constuctor.getParameterTypes().length];
for (int i = 0; i < constuctor.getParameterTypes().length; i++) {
String fieldType = constuctor.getParameterTypes()[i].toString();
if ("int".equalsIgnoreCase(fieldType) ||
"double".toString().equalsIgnoreCase(fieldType) ||
"float".equalsIgnoreCase(fieldType) ||
"byte".toString().equalsIgnoreCase(fieldType) ||
"char".equalsIgnoreCase(fieldType) ||
"long".equalsIgnoreCase(fieldType)) {
objParams[i] = 0;
} else if ("boolean".equalsIgnoreCase(fieldType)) {
objParams[i] = false;
} else if (isPrimitiveArray(constuctor.getParameterTypes()[i].getCanonicalName())) {
objParams[i] = getPrimitiveArray(constuctor.getParameterTypes()[i],
constuctor.getParameterTypes()[i].getCanonicalName()
);
} else {
objParams[i] = null;
}
}
return objParams;
}
@SuppressWarnings("rawtypes")
private static <T> void copyFields(Class ourClass, T srcObj, T finObj)
throws IllegalArgumentException, IllegalAccessException, NoSuchFieldException, SecurityException {
Field[] fields = ourClass.getDeclaredFields();
for (int i = 0; i < fields.length; i++) {
fields[i].setAccessible(true);
Field modField = Field.class.getDeclaredField("modifiers");
modField.setAccessible(true);
modField.setInt(fields[i], fields[i].getModifiers() & ~Modifier.FINAL);
String fieldType = fields[i].getType().toString();
if ("int".equalsIgnoreCase(fieldType) ||
"double".equalsIgnoreCase(fieldType) ||
"float".equalsIgnoreCase(fieldType) ||
"byte".equalsIgnoreCase(fieldType) ||
"char".equalsIgnoreCase(fieldType) ||
"boolean".equalsIgnoreCase(fieldType) ||
"short".equalsIgnoreCase(fieldType) ||
"long".equalsIgnoreCase(fieldType)) {
fields[i].set(finObj, fields[i].get(srcObj));
} else {
fields[i].set(finObj, CopyUtils.deepCopy(fields[i].get(srcObj), finObj));
}
}
}
@SuppressWarnings("rawtypes")
private static <T> void copyFields(Class ourClass, T srcObj, T finObj, Object parent)
throws IllegalArgumentException, IllegalAccessException, NoSuchFieldException, SecurityException {
Field[] fields = ourClass.getDeclaredFields();
for (int i = 0; i < fields.length; i++) {
fields[i].setAccessible(true);
Field modField = Field.class.getDeclaredField("modifiers");
modField.setAccessible(true);
modField.setInt(fields[i], fields[i].getModifiers() & ~Modifier.FINAL);
String fieldType = fields[i].getType().toString();
if ("int".equalsIgnoreCase(fieldType) ||
"double".equalsIgnoreCase(fieldType) ||
"float".equalsIgnoreCase(fieldType) ||
"byte".equalsIgnoreCase(fieldType) ||
"char".equalsIgnoreCase(fieldType) ||
"boolean".equalsIgnoreCase(fieldType) ||
"short".equalsIgnoreCase(fieldType) ||
"long".equalsIgnoreCase(fieldType)) {
fields[i].set(finObj, fields[i].get(srcObj));
} else {
if (fields[i].get(srcObj).toString().equals(parent.toString())) {
fields[i].set(finObj, fields[i].get(srcObj));
} else {
fields[i].set(finObj, CopyUtils.deepCopy(fields[i].get(srcObj), finObj));
}
}
}
}
static void setFinalStaticField(Field field, Object newValue) throws Exception {
field.setAccessible(true);
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
field.set(null, newValue);
}
public static Object copyProperties(Object target, Object orig) {
if (target == null || orig == null) {
return target;
}
PropertyDescriptor[] destDesc = PropertyUtils.getPropertyDescriptors(target);
try {
for (int i = 0; i < destDesc.length; i++) {
Class destType = destDesc[i].getPropertyType();
Class origType = PropertyUtils.getPropertyType(orig, destDesc[i].getName());
if (destType != null && destType.equals(origType) && !destType.equals(Class.class)) {
if (!Collection.class.isAssignableFrom(origType)) {
try {
Object value = PropertyUtils.getProperty(orig, destDesc[i].getName());
PropertyUtils.setProperty(target, destDesc[i].getName(), value);
} catch (Exception ex) {
}
}
}
}
return target;
} catch (Exception ex) {
throw new CopyException(ex);
}
}
public static Object copyProperties(Object dest, Object orig, String[] ignores) {
if (dest == null || orig == null) {
return dest;
}
PropertyDescriptor[] destDesc = PropertyUtils.getPropertyDescriptors(dest);
try {
for (int i = 0; i < destDesc.length; i++) {
if (contains(ignores, destDesc[i].getName())) {
continue;
}
Class destType = destDesc[i].getPropertyType();
Class origType = PropertyUtils.getPropertyType(orig, destDesc[i].getName());
if (destType != null && destType.equals(origType) && !destType.equals(Class.class)) {
if (!Collection.class.isAssignableFrom(origType)) {
Object value = PropertyUtils.getProperty(orig, destDesc[i].getName());
PropertyUtils.setProperty(dest, destDesc[i].getName(), value);
}
}
}
return dest;
} catch (Exception ex) {
throw new CopyException(ex);
}
}
static boolean contains(String[] ignores, String name) {
boolean ignored = false;
for (int j = 0; ignores != null && j < ignores.length; j++) {
if (ignores[j].equals(name)) {
ignored = true;
break;
}
}
return ignored;
}
}

View File

@@ -0,0 +1,15 @@
package com.xiaojukeji.kafka.manager.common.utils;
import java.util.Calendar;
import java.util.Date;
/**
* 日期工具
* @author huangyiminghappy@163.com
* @date 2019-03-20
*/
public class DateUtils {
public static Date long2Date(Long time){
return new Date(time);
}
}

View File

@@ -0,0 +1,57 @@
package com.xiaojukeji.kafka.manager.common.utils;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Created by limeng on 2017/12/22
*/
public class DefaultThreadFactory implements ThreadFactory {
private static final AtomicInteger POOL_ID = new AtomicInteger();
private final AtomicInteger nextId;
private final String prefix;
private final boolean daemon;
private final int priority;
public DefaultThreadFactory(String poolName) {
this((String) poolName, false, 5);
}
public DefaultThreadFactory(String poolName, boolean daemon, int priority) {
this.nextId = new AtomicInteger();
if (poolName == null) {
throw new NullPointerException("poolName");
} else if (priority >= 1 && priority <= 10) {
this.prefix = poolName + '-' + POOL_ID.incrementAndGet() + '-';
this.daemon = daemon;
this.priority = priority;
} else {
throw new IllegalArgumentException(
"priority: " + priority
+ " (expected: Thread.MIN_PRIORITY <= priority <= Thread.MAX_PRIORITY)");
}
}
@Override
public Thread newThread(Runnable r) {
Thread t = new Thread(r, this.prefix + this.nextId.incrementAndGet());
try {
if (t.isDaemon()) {
if (!this.daemon) {
t.setDaemon(false);
}
} else if (this.daemon) {
t.setDaemon(true);
}
if (t.getPriority() != this.priority) {
t.setPriority(this.priority);
}
} catch (Exception e) {
;
}
return t;
}
}

View File

@@ -0,0 +1,36 @@
package com.xiaojukeji.kafka.manager.common.utils;
import java.security.MessageDigest;
/**
* @author zengqiao
* @date 20/3/17
*/
public class EncryptUtil {
private static final char[] HEX_DIGITS = {
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
};
public static String md5(String key) {
try {
byte[] btInput = key.getBytes();
MessageDigest mdInst = MessageDigest.getInstance("MD5");
// 使用指定的字节更新摘要
mdInst.update(btInput);
// 获得密文
byte[] md = mdInst.digest();
// 把密文转换成十六进制的字符串形式
char[] str = new char[md.length * 2];
for (int i = 0, k = 0; i < md.length; i++) {
str[k++] = HEX_DIGITS[md[i] >>> 4 & 0xf];
str[k++] = HEX_DIGITS[md[i] & 0xf];
}
return new String(str);
} catch (Exception e) {
return null;
}
}
}

View File

@@ -0,0 +1,72 @@
package com.xiaojukeji.kafka.manager.common.utils.jmx;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.management.remote.JMXConnector;
import javax.management.remote.JMXConnectorFactory;
import javax.management.remote.JMXServiceURL;
import java.io.IOException;
import java.net.MalformedURLException;
/**
* JMXConnector包装类
* @author tukun
* @date 2015/11/9.
*/
public class JmxConnectorWrap {
private final static Logger logger = LoggerFactory.getLogger(JmxConnectorWrap.class);
private JMXConnector jmxConnector;
/**
* JMX连接的主机名
*/
private String host;
/**
* JMX连接端口
*/
private int port;
public JmxConnectorWrap(String host, int port) {
this.host = host;
this.port = port;
}
public JMXConnector getJmxConnector() {
// 如果JMX连接断开则进行重新连接
if (jmxConnector == null && port != -1) {
createJMXConnector();
}
return jmxConnector;
}
private synchronized void createJMXConnector() {
if (jmxConnector != null) {
return;
}
String jmxUrl = String.format("service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi", host, port);
try {
JMXServiceURL url = new JMXServiceURL(jmxUrl);
jmxConnector = JMXConnectorFactory.connect(url, null);
} catch (MalformedURLException e) {
logger.error("JMX url exception, host:{} port:{} jmxUrl:{}", host, port, jmxUrl, e);
} catch (IOException e) {
logger.error("JMX connect exception, host:{} port:{}.", host, port, e);
}
logger.info("JMX connect success, host:{} port:{}.", host, port);
}
public void close() {
if (jmxConnector == null) {
return;
}
try {
jmxConnector.close();
} catch (IOException e) {
logger.warn("close JmxConnector exception, host:{} port:{}.", host, port, e);
}
}
}

View File

@@ -0,0 +1,62 @@
package com.xiaojukeji.kafka.manager.common.utils.jmx;
/**
* Mbean的对象封装
* @author tukun
* @date 2015/11/9.
*/
public class Mbean {
/**
* mbean的对象名称
*/
private String objectName;
/**
* mbean对象被监控的属性名称
*/
private String property;
/**
* mbean对象被监控的属性值对象类型
*/
private Class propertyClass;
public Mbean(String objectName, String property, Class propertyClass) {
this.objectName = objectName;
this.property = property;
this.propertyClass = propertyClass;
}
public String getObjectName() {
return objectName;
}
public void setObjectName(String objectName) {
this.objectName = objectName;
}
public String getProperty() {
return property;
}
public void setProperty(String property) {
this.property = property;
}
public Class getPropertyClass() {
return propertyClass;
}
public void setPropertyClass(Class propertyClass) {
this.propertyClass = propertyClass;
}
@Override
public String toString() {
return "Mbean{" +
"objectName='" + objectName + '\'' +
", property='" + property + '\'' +
", propertyClass=" + propertyClass +
'}';
}
}

View File

@@ -0,0 +1,93 @@
package com.xiaojukeji.kafka.manager.common.utils.jmx;
import java.util.HashMap;
import java.util.Map;
/**
* kafka集群的mbean的object name集合
* @author tukun, zengqiao
* @date 2015/11/5.
*/
public class MbeanNameUtil {
//broker监控参数
private static final String MESSAGE_IN_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec";
private static final String BYTES_IN_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec";
private static final String BYTES_OUT_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec";
private static final String BYTES_REJECTED_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=BytesRejectedPerSec";
private static final String FAILED_FETCH_REQUEST_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=FailedFetchRequestsPerSec";
private static final String FAILED_PRODUCE_REQUEST_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=FailedProduceRequestsPerSec";
private static final String PRODUCE_REQUEST_PER_SEC = "kafka.network:type=RequestMetrics,name=RequestsPerSec,request=Produce";
private static final String CONSUMER_REQUEST_PER_SEC = "kafka.network:type=RequestMetrics,name=RequestsPerSec,request=FetchConsumer";
private static final String TOTAL_PRODUCE_REQUEST_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=TotalProduceRequestsPerSec";
private static final String TOTAL_FETCH_REQUEST_PER_SEC = "kafka.server:type=BrokerTopicMetrics,name=TotalFetchRequestsPerSec";
private static final String REQUEST_HANDLER_AVG_IDLE_PERCENT = "kafka.server:type=KafkaRequestHandlerPool,name=RequestHandlerAvgIdlePercent";
private static final String NETWORK_PROCESSOR_AVG_IDLE_PERCENT = "kafka.network:type=SocketServer,name=NetworkProcessorAvgIdlePercent";
private static final String REQUEST_QUEUE_SIZE = "kafka.network:type=RequestChannel,name=RequestQueueSize";
private static final String RESPONSE_QUEUE_SIZE = "kafka.network:type=RequestChannel,name=ResponseQueueSize";
private static final String LOG_FLUSH_RATE_AND_TIME_MS = "kafka.log:type=LogFlushStats,name=LogFlushRateAndTimeMs";
private static final String TOTAL_TIME_PRODUCE = "kafka.network:type=RequestMetrics,name=TotalTimeMs,request=Produce";
private static final String TOTAL_TIME_FETCH_CONSUMER = "kafka.network:type=RequestMetrics,name=TotalTimeMs,request=FetchConsumer";
private static final String PART_COUNT = "kafka.server:type=ReplicaManager,name=PartitionCount";
private static final String PARTITION_OFFSET_PULL = "kafka.log:type=Log,name=LogEndOffset,topic=${topic},partition=${partition}";
private static final String UNDER_REPLICATED_PARTITIONS = "kafka.server:type=ReplicaManager,name=UnderReplicatedPartitions";
private static final String LEADER_COUNT = "kafka.server:type=ReplicaManager,name=LeaderCount";
// private static final String PRODUCE_REQUEST_TIME = "kafka.network:type=TopicRequestMetrics,name=TotalTimeMs,request=Produce";
// private static final String FETCH_REQUEST_TIME = "kafka.network:type=TopicRequestMetrics,name=TotalTimeMs,request=FetchConsumer";
//存储监控的参数name到获取的object_name的映射关系图
private static Map<String, Mbean> mbeanNameMap = new HashMap<String, Mbean>();
static {
//监控参数配置object_name和监控的属性名
mbeanNameMap.put("MessagesInPerSec", new Mbean(MESSAGE_IN_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("BytesInPerSec", new Mbean(BYTES_IN_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("BytesOutPerSec", new Mbean(BYTES_OUT_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("BytesRejectedPerSec", new Mbean(BYTES_REJECTED_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("FailFetchRequestPerSec", new Mbean(FAILED_FETCH_REQUEST_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("FailProduceRequestPerSec", new Mbean(FAILED_PRODUCE_REQUEST_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("ProduceRequestPerSec", new Mbean(PRODUCE_REQUEST_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("FetchConsumerRequestPerSec", new Mbean(CONSUMER_REQUEST_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("TotalProduceRequestsPerSec", new Mbean(TOTAL_PRODUCE_REQUEST_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("TotalFetchRequestsPerSec", new Mbean(TOTAL_FETCH_REQUEST_PER_SEC,"OneMinuteRate", Double.class));
mbeanNameMap.put("PartitionOffset", new Mbean(PARTITION_OFFSET_PULL,"Value", int.class));
mbeanNameMap.put("PartitionCount", new Mbean(PART_COUNT,"Value", int.class));
mbeanNameMap.put("UnderReplicatedPartitions", new Mbean(UNDER_REPLICATED_PARTITIONS,"Value", int.class));
mbeanNameMap.put("LeaderCount", new Mbean(LEADER_COUNT,"Value", int.class));
mbeanNameMap.put("RequestHandlerAvgIdlePercent", new Mbean(REQUEST_HANDLER_AVG_IDLE_PERCENT,"OneMinuteRate", Double.class));
mbeanNameMap.put("NetworkProcessorAvgIdlePercent", new Mbean(NETWORK_PROCESSOR_AVG_IDLE_PERCENT,"Value", Double.class));
mbeanNameMap.put("RequestQueueSize", new Mbean(REQUEST_QUEUE_SIZE,"Value", int.class));
mbeanNameMap.put("ResponseQueueSize", new Mbean(RESPONSE_QUEUE_SIZE, "Value", int.class));
mbeanNameMap.put("LogFlushRateAndTimeMs", new Mbean(LOG_FLUSH_RATE_AND_TIME_MS,"OneMinuteRate", Double.class));
mbeanNameMap.put("TotalTimeProduceMean", new Mbean(TOTAL_TIME_PRODUCE,"Mean", Double.class));
mbeanNameMap.put("TotalTimeProduce99Th", new Mbean(TOTAL_TIME_PRODUCE,"99thPercentile", Double.class));
mbeanNameMap.put("TotalTimeFetchConsumerMean", new Mbean(TOTAL_TIME_FETCH_CONSUMER,"Mean", Double.class));
mbeanNameMap.put("TotalTimeFetchConsumer99Th", new Mbean(TOTAL_TIME_FETCH_CONSUMER,"99thPercentile", Double.class));
// mbeanNameMap.put("ProduceRequestTime", new Mbean(PRODUCE_REQUEST_TIME,"Value"));
// mbeanNameMap.put("FetchRequestTime", new Mbean(FETCH_REQUEST_TIME,"Value"));
}
/**
* 根据属性名kafka版本topic获取相应的Mbean
*/
public static Mbean getMbean(String name, String topic) {
Mbean mbean = mbeanNameMap.get(name);
if (mbean == null) {
return null;
}
if (topic != null && !topic.isEmpty()) {
return new Mbean(mbean.getObjectName() + ",topic=" + topic, mbean.getProperty(), mbean.getPropertyClass());
}
return mbean;
}
}

View File

@@ -0,0 +1,172 @@
package com.xiaojukeji.kafka.manager.common.utils.zk;
import com.xiaojukeji.kafka.manager.common.exception.ConfigException;
import org.apache.zookeeper.data.Stat;
import java.util.List;
/**
* Created by limeng on 2017/12/22
*/
public interface ConfigClient {
/**
* 添加连接状态监听器
*
* @param listener
*/
void addStateChangeListener(StateChangeListener listener);
/**
* 检查节点是否存在
*
* @param path
* @return
* @throws ConfigException
*/
boolean checkPathExists(String path) throws ConfigException;
/**
* 获取节点信息
*
* @param path
* @return
* @throws ConfigException
*/
Stat getNodeStat(String path) throws ConfigException;
/**
* 重置zk下面数据
*
* @param path
* @param data
* @throws ConfigException
*/
Stat setNodeStat(String path, String data) throws ConfigException;
Stat setOrCreatePersistentNodeStat(String path, String data) throws ConfigException;
String createPersistentSequential(String path, String data) throws ConfigException;
/**
* 创建一个节点并包含数据,在失去连接后不会删除.
* <p/>
* save是持久化存储,如果是临时数据,请使用register
*
* @param path
* @param data
* @param <T>
* @throws ConfigException
*/
// <T> void save(String path, T data) throws ConfigException;
/**
* 创建一个节点并包含数据,在失去连接后不会删除.
* <p/>
* save是持久化存储,如果是临时数据,请使用register
*
* @param path
* @param data
* @param <T>
* @throws ConfigException
*/
// <T> void saveIfNotExisted(String path, T data) throws ConfigException;
// /**
// * 注册一个数据,在连接断开时需要重新删除,重连后重新注册
// *
// * @param path
// * @param data
// * @param <T>
// * @throws ConfigException
// */
// <T> void register(String path, T data) throws ConfigException;
/**
* 获取数据
*
* @param path
* @param clazz
* @param <T>
* @return
* @throws ConfigException
*/
<T> T get(String path, Class<T> clazz) throws ConfigException;
/**
* 删除数据,如果有子节点也会删除
*
* @param path
* @throws ConfigException
*/
void delete(String path) throws ConfigException;
/**
* 获取zkString字符
* @param path
* @return
* @throws ConfigException
*/
String get(String path) throws ConfigException;
/**
* 监听数据变化
*
* @param path
* @param listener
*/
void watch(String path, StateChangeListener listener) throws ConfigException;
/**
* 获取路径下的子节点
*
* @param path
* @return
* @throws ConfigException
*/
List<String> getChildren(String path) throws ConfigException;
/**
* 监听子节点的变化并通知出来
*
* @param path
* @param listener
* @return
* @throws ConfigException
*/
void watchChildren(String path, StateChangeListener listener) throws ConfigException;
/**
* 取消监听子节点的变化
*
* @param path
* @return
*/
void cancelWatchChildren(String path);
/**
* 锁住某个节点
*
* @param path
* @param timeoutMS
* @param data
* @param <T>
* @return
* @throws ConfigException
*/
<T> void lock(String path, long timeoutMS, T data) throws ConfigException;
/**
* 释放节点锁
*
* @param path
*/
void unLock(String path);
/**
* 资源释放
*/
void close();
// void setConfigClientTracer(ConfigClientTracer configClientTracer);
}

View File

@@ -0,0 +1,17 @@
package com.xiaojukeji.kafka.manager.common.utils.zk;
/**
* Created by limeng on 2017/12/22
*/
public interface StateChangeListener {
enum State {
CONNECTION_RECONNECT, //
CONNECTION_DISCONNECT, NODE_DATA_CHANGED, CHILD_UPDATED, CHILD_ADDED, CHILD_DELETED,
//
;
}
void onChange(State state, String path);
}

View File

@@ -0,0 +1,532 @@
package com.xiaojukeji.kafka.manager.common.utils.zk;
import com.alibaba.fastjson.JSON;
import com.xiaojukeji.kafka.manager.common.exception.ConfigException;
import com.google.common.base.Preconditions;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.recipes.cache.*;
import org.apache.curator.framework.recipes.locks.InterProcessSemaphoreMutex;
import org.apache.curator.framework.state.ConnectionState;
import org.apache.curator.framework.state.ConnectionStateListener;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.curator.utils.ThreadUtils;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
/**
* @author limeng
* @date 2017/12/22
*/
public class ZkConfigImpl implements ConfigClient, ConnectionStateListener {
private static final int DEFAULT_SESSION_TIMEOUT_MS = 12000;
private static final int DEFAULT_CONNECTION_TIMEOUT_MS = 3000;
private static final int DEFAULT_THREAD_POOL_SIZE = Math.max(Runtime.getRuntime().availableProcessors(), 16);
private final static Logger logger = LoggerFactory.getLogger(ZkConfigImpl.class);
final byte[] EMPTY = new byte[0];
/**
* 监听连接状态
*/
private final Map<String, java.util.concurrent.locks.Lock> registerLocks = new ConcurrentHashMap<>();
private Map<String, StateChangeListener> connectionListenerMap = new ConcurrentHashMap<>();
private Set<StateChangeListener> connectionStateListeners = new HashSet<>();
/**
* 监听节点数据变化的缓存
*/
private final Map<String, java.util.concurrent.locks.Lock> dataPathLocks = new ConcurrentHashMap<>();
private final Map<String, NodeCache> dataWatchers = new ConcurrentHashMap<>();
private final Map<String, List<StateChangeListener>> dataListeners = new ConcurrentHashMap<>();
/**
* 监听子节点变化的缓存
*/
private final Map<String, java.util.concurrent.locks.Lock> childrenPathLocks = new ConcurrentHashMap<>();
private final Map<String, PathChildrenCache> childrenWatcher = new ConcurrentHashMap<>();
private final Map<String, List<StateChangeListener>> childrenListeners = new ConcurrentHashMap<>();
/**
* 所有持有的锁
*/
private final Map<String, Lock> lockMap = new ConcurrentHashMap<>();
private final CuratorFramework curator;
private final ExecutorService executor;
public ZkConfigImpl(String zkAddress) {
this(zkAddress, DEFAULT_SESSION_TIMEOUT_MS, DEFAULT_CONNECTION_TIMEOUT_MS);
}
public ZkConfigImpl(String zkAddress, int sessionTimeoutMs, int connectionTimeoutMs) {
this(zkAddress, sessionTimeoutMs, connectionTimeoutMs, DEFAULT_THREAD_POOL_SIZE);
}
public ZkConfigImpl(String zkAddress, int sessionTimeoutMs, int connectionTimeoutMs, int threadPoolSize) {
ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3);
CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder().connectString(zkAddress);
builder.retryPolicy(retryPolicy);
builder.sessionTimeoutMs(sessionTimeoutMs).connectionTimeoutMs(connectionTimeoutMs);
curator = builder.build();
curator.getConnectionStateListenable().addListener(this);
curator.start();
executor = Executors.newFixedThreadPool(threadPoolSize, ThreadUtils.newThreadFactory("PathChildrenCache"));
}
private synchronized java.util.concurrent.locks.Lock getRegisterLock(String registerPath) {
registerLocks.putIfAbsent(registerPath, new ReentrantLock());
return registerLocks.get(registerPath);
}
private synchronized java.util.concurrent.locks.Lock getDataPathLock(String dataPath) {
dataPathLocks.putIfAbsent(dataPath, new ReentrantLock());
return dataPathLocks.get(dataPath);
}
private synchronized java.util.concurrent.locks.Lock getChildrenPathLock(String childrenPath) {
childrenPathLocks.putIfAbsent(childrenPath, new ReentrantLock());
return childrenPathLocks.get(childrenPath);
}
@Override
public void stateChanged(CuratorFramework client, ConnectionState newState) {
StateChangeListener.State state;
switch (newState) {
case LOST:
logger.error("[zk] current connection status is {}", newState);
releaseLocks();
state = StateChangeListener.State.CONNECTION_DISCONNECT;
break;
case CONNECTED:
case RECONNECTED:
logger.warn("[zk] current connection status is {}", newState);
state = StateChangeListener.State.CONNECTION_RECONNECT;
break;
default:
logger.info("[zk] current connection status is {}", newState);
return;
}
for (StateChangeListener listener : connectionListenerMap.values()) {
listener.onChange(state, null);
}
for (StateChangeListener listener : connectionStateListeners) {
listener.onChange(state, null);
}
}
@Override
public void addStateChangeListener(StateChangeListener listener) {
connectionStateListeners.add(listener);
}
@Override
public boolean checkPathExists(String path) throws ConfigException {
try {
return curator.checkExists().forPath(path) != null;
} catch (Exception e) {
String info = String.format("[zk] Failed to check EXIST for path [%s]", path);
logger.warn(info);
throw new ConfigException(e);
}
}
@Override
public Stat getNodeStat(String path) throws ConfigException {
try {
return curator.checkExists().forPath(path);
} catch (Exception e) {
String info = String.format("[zk] Failed to get node stat for path [%s]", path);
logger.warn(info);
throw new ConfigException(e);
}
}
@Override
public Stat setNodeStat(String path, String data) throws ConfigException {
try {
return curator.setData().forPath(path, data.getBytes());
} catch (Exception e) {
throw new ConfigException(e);
}
}
@Override
public Stat setOrCreatePersistentNodeStat(String path, String data) throws ConfigException {
try {
return curator.setData().forPath(path, data.getBytes());
} catch (KeeperException.NoNodeException e) {
try {
curator.create().withMode(CreateMode.PERSISTENT).forPath(path);
return setNodeStat(path, data);
} catch (KeeperException.NodeExistsException nee) {
return setNodeStat(path, data);
} catch (Exception e2) {
throw new ConfigException(e2);
}
} catch (Exception e) {
throw new ConfigException(e);
}
}
@Override
public String createPersistentSequential(String path, String data) throws ConfigException {
try {
return curator.create().withMode(CreateMode.PERSISTENT_SEQUENTIAL).forPath(path, data.getBytes());
} catch (Exception e) {
throw new ConfigException(e);
}
}
//
// @Override
// public <T> void save(String path, T data) throws ConfigException {
// try {
// byte[] bytes = EMPTY;
// if (data != null) {
// bytes = JSON.toJSONBytes(data);
// }
// Stat stat = curator.checkExists().forPath(path);
// if (stat == null) {
// curator.create().creatingParentsIfNeeded().forPath(path, bytes);
// } else {
// curator.setData().forPath(path, bytes);
// }
// } catch (Exception e) {
// logger.warn("create {} failed", path);
// throw new ConfigException(e);
// }
// }
//
// @Override
// public <T> void saveIfNotExisted(String path, T data) throws ConfigException {
// try {
// byte[] bytes = EMPTY;
// if (data != null) {
// bytes = JSON.toJSONBytes(data);
// }
// Stat stat = curator.checkExists().forPath(path);
// if (stat == null) {
// curator.create().creatingParentsIfNeeded().forPath(path, bytes);
// }
// } catch (Exception e) {
// logger.warn("create {} failed", path, e);
// throw new ConfigException(e);
// }
// }
// @Override
// public <T> void register(final String path, final T data) throws ConfigException {
// java.util.concurrent.locks.Lock registerLock = getRegisterLock(path);
// registerLock.lock();
// try {
// byte[] bytes = EMPTY;
// if (data != null) {
// bytes = JSON.toJSONBytes(data);
// }
// if (!connectionListenerMap.containsKey(path)) {
// connectionListenerMap.put(path, new StateChangeListener() {
// @Override
// public void onChange(State state, String stateChangePath) {
// logger.warn("on state change " + state);
// switch (state) {
// case CONNECTION_RECONNECT:
// try {
// register(path, data);
// } catch (ConfigException e) {
// logger.warn("register {} failed", path);
// }
// break;
// default:
// break;
// }
// }
// });
// }
// try {
// deletePath(path);
// logger.warn("register reconnect delete {} succeed", path);
// } catch (ConfigException e) {
// logger.warn("register reconnect delete {} failed", path);
// }
// curator.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL).forPath(path, bytes);
// logger.info("register reconnect create {} succeed", path);
// } catch (Exception e) {
// logger.warn("register reconnect create {} failed", path);
// throw new ConfigException(e);
// } finally {
// registerLock.unlock();
// }
// }
@Override
public <T> T get(String path, Class<T> clazz) throws ConfigException {
try {
byte[] bytes = curator.getData().forPath(path);
return JSON.parseObject(bytes, clazz);
} catch (Exception e) {
throw new ConfigException(e);
}
}
@Override
public String get(String path) throws ConfigException {
try {
byte[] bytes = curator.getData().forPath(path);
return new String(bytes);
} catch (Exception e) {
throw new ConfigException(e);
}
}
@Override
public void delete(String path) throws ConfigException {
try {
connectionListenerMap.remove(path);
if (curator.checkExists().forPath(path) != null) {
curator.delete().deletingChildrenIfNeeded().forPath(path);
}
} catch (Exception e) {
throw new ConfigException(e);
}
}
// private void deletePath(String path) throws ConfigException {
// try {
// if (curator.checkExists().forPath(path) != null) {
// curator.delete().deletingChildrenIfNeeded().forPath(path);
// }
// } catch (Exception e) {
// throw new ConfigException(e);
// }
// }
@SuppressWarnings("all")
@Override
public void watch(final String path, final StateChangeListener listener) throws ConfigException {
java.util.concurrent.locks.Lock dataLock = getDataPathLock(path);
dataLock.lock();
try {
NodeCache nodeCache = dataWatchers.get(path);
if (nodeCache == null) {
nodeCache = new NodeCache(curator, path);
nodeCache.start();
dataWatchers.put(path, nodeCache);
nodeCache.getListenable().addListener(new NodeCacheListener() {
@Override
public void nodeChanged() throws Exception {
listener.onChange(StateChangeListener.State.NODE_DATA_CHANGED, path);
}
});
List<StateChangeListener> listeners = new ArrayList<>();
listeners.add(listener);
dataListeners.put(path, listeners);
} else {
List<StateChangeListener> listeners = dataListeners.get(path);
Preconditions.checkState(listeners != null);
if (!listeners.contains(listener)) {
listeners.add(listener);
nodeCache.getListenable().addListener(new NodeCacheListener() {
@Override
public void nodeChanged() throws Exception {
listener.onChange(StateChangeListener.State.NODE_DATA_CHANGED, path);
}
});
}
}
} catch (Exception e) {
throw new ConfigException(e);
} finally {
dataLock.unlock();
}
}
@Override
public List<String> getChildren(String path) throws ConfigException{
try {
return curator.getChildren().forPath(path);
} catch (Exception e) {
throw new ConfigException(e);
}
}
@Override
public void watchChildren(final String path, final StateChangeListener listener) throws ConfigException {
java.util.concurrent.locks.Lock childrenLock = getChildrenPathLock(path);
childrenLock.lock();
try {
PathChildrenCache pathChildrenCache = childrenWatcher.get(path);
if (pathChildrenCache == null) {
pathChildrenCache = new PathChildrenCache(curator, path, false, false, executor);
pathChildrenCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT);
childrenWatcher.put(path, pathChildrenCache);
pathChildrenCache.getListenable().addListener(new PathChildrenCacheListenerImpl(listener));
List<StateChangeListener> listeners = new ArrayList<>();
listeners.add(listener);
childrenListeners.put(path, listeners);
} else {
List<StateChangeListener> listeners = childrenListeners.get(path);
Preconditions.checkState(listeners != null);
if (!listeners.contains(listener)) {
listeners.add(listener);
pathChildrenCache.getListenable().addListener(new PathChildrenCacheListenerImpl(listener));
}
}
} catch (Exception e) {
throw new ConfigException(e);
} finally {
childrenLock.unlock();
}
}
@Override
public void cancelWatchChildren(String path) {
java.util.concurrent.locks.Lock childrenLock = getChildrenPathLock(path);
childrenLock.lock();
try {
PathChildrenCache pathChildrenCache = childrenWatcher.get(path);
if (pathChildrenCache != null) {
try {
pathChildrenCache.close();
} catch (IOException e) {
logger.warn("close node cache for path {} error", path, e);
}
}
childrenWatcher.remove(path);
childrenListeners.remove(path);
} finally {
childrenLock.unlock();
}
}
private static class PathChildrenCacheListenerImpl implements PathChildrenCacheListener {
StateChangeListener listener;
public PathChildrenCacheListenerImpl(StateChangeListener listener) {
this.listener = listener;
}
@Override
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
String path = event.getData() == null ? null : event.getData().getPath();
switch (event.getType()) {
case CHILD_ADDED:
listener.onChange(StateChangeListener.State.CHILD_ADDED, path);
break;
case CHILD_UPDATED:
listener.onChange(StateChangeListener.State.CHILD_UPDATED, path);
break;
case CHILD_REMOVED:
listener.onChange(StateChangeListener.State.CHILD_DELETED, path);
break;
default:
break;
}
}
}
@Override
public <T> void lock(String path, long timeoutMS, T t) throws ConfigException {
try {
Lock lock = lockMap.get(path);
if (lock != null) {
if (lock.isAcquiredInThisProcess()) {
return;
}
lock.release();
lockMap.remove(path);
}
InterProcessSemaphoreMutex mutex = new InterProcessSemaphoreMutex(curator, path);
boolean locked = mutex.acquire(timeoutMS, TimeUnit.MILLISECONDS);
if (!locked) {
throw new ConfigException("lock " + path + " failed " + timeoutMS);
}
if (t != null) {
curator.setData().forPath(path, JSON.toJSONBytes(t));
}
lock = new Lock(mutex, path);
lockMap.put(path, lock);
} catch (Exception e) {
logger.warn("lock {} failed", path, e);
throw new ConfigException(e);
}
}
@Override
public void unLock(String path) {
Lock lock = lockMap.remove(path);
if (lock != null) {
lock.release();
}
}
public class Lock {
InterProcessSemaphoreMutex mutex;
String path;
public Lock(InterProcessSemaphoreMutex mutex, String path) {
this.mutex = mutex;
this.path = path;
}
public void release() {
lockMap.remove(path);
try {
mutex.release();
} catch (Exception e) {
logger.warn("release path {} lock error {}", path, e.getMessage());
}
}
public boolean isAcquiredInThisProcess() {
return mutex.isAcquiredInThisProcess();
}
}
@Override
public void close() {
connectionListenerMap.clear();
connectionStateListeners.clear();
for (NodeCache nodeCache : dataWatchers.values()) {
try {
nodeCache.close();
} catch (Exception e) {
logger.warn("close node cache error", e);
}
}
dataWatchers.clear();
for (PathChildrenCache pathChildrenCache : childrenWatcher.values()) {
try {
pathChildrenCache.close();
} catch (IOException e) {
logger.warn("close children cache error", e);
}
}
childrenWatcher.clear();
releaseLocks();
curator.close();
executor.shutdown();
}
private void releaseLocks() {
for (Lock lock : lockMap.values()) {
lock.release();
}
lockMap.clear();
}
}

View File

@@ -0,0 +1,165 @@
package com.xiaojukeji.kafka.manager.common.utils.zk;
import java.util.HashMap;
import java.util.Map;
/**
* 存储结构:
*
* <pre>
* /consumers
* consumer-group
* ids
* consumerId
* offsets
* topic-0
* 0(partition编号节点内容表示)
* 1
* 2
* topic-1
* owners
* /brokers
* topics
* topic-0 (节点内容是 ("0",[0,1,2]))
* partitions
* 0
* state节点内容是leader的brokerId同步副本信息等
* 1
* 2
* topic-x
* ids
* 1(临时节点broker编号节点信息为broker相关信息如JMX端口host和port等)
* 2
* n
* </pre>
*
* @author tukun @ 2015-11-5
* @version 1.0.0
*/
public class ZkPathUtil {
public static final String ZOOKEEPER_SEPARATOR = "/";
public static final String BROKER_ROOT_NODE = ZOOKEEPER_SEPARATOR + "brokers";
public static final String CONTROLLER_ROOT_NODE = ZOOKEEPER_SEPARATOR + "controller";
public static final String BROKER_IDS_ROOT = BROKER_ROOT_NODE
+ ZOOKEEPER_SEPARATOR + "ids";
public static final String BROKER_TOPICS_ROOT = BROKER_ROOT_NODE
+ ZOOKEEPER_SEPARATOR + "topics";
public static final String CONSUMER_ROOT_NODE = ZOOKEEPER_SEPARATOR + "consumers";
public static final String CONFIG_ROOT_NODE = ZOOKEEPER_SEPARATOR + "config";
public static final String CONFIG_TOPICS_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "topics";
//存储监控的参数name到获取的object_name的映射关系图
private static Map<String, String> zkPathMap = new HashMap<String, String>();
static {
zkPathMap.put("ConusmerPartitionOffset", CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR
+ "${consumerGroup}" + ZOOKEEPER_SEPARATOR
+ "offsets" + ZOOKEEPER_SEPARATOR + "${topic}"
+ ZOOKEEPER_SEPARATOR + "${partition}");
}
//for broker目录
public static String getBrokerIdNodePath(long brokerId) {
return String.format(BROKER_IDS_ROOT + ZOOKEEPER_SEPARATOR + "%d", brokerId);
}
public static String getBrokerTopicRoot(String topic) {
return BROKER_TOPICS_ROOT + ZOOKEEPER_SEPARATOR + topic;
}
public static String getBrokerTopicPartitionRoot(String topic) {
return BROKER_TOPICS_ROOT + ZOOKEEPER_SEPARATOR + topic + ZOOKEEPER_SEPARATOR
+ "partitions";
}
public static String getBrokerTopicPartitionStatePath(String topic, int partitionId) {
return String.format(getBrokerTopicPartitionRoot(topic) + ZOOKEEPER_SEPARATOR + "%d"
+ ZOOKEEPER_SEPARATOR + "state", partitionId);
}
//for consumer
public static String getConsumerTopicPartitionOffsetNodePath(String consumerGroup,
String topic, int partitionId) {
return String.format(CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + "%s" + ZOOKEEPER_SEPARATOR
+ "offset" + "%s" + "%d", consumerGroup, topic, partitionId);
}
public static String getConsumerGroupRoot(String consumerGroup) {
return CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + consumerGroup;
}
public static String getConsumerGroupIdsRoot(String consumerGroup) {
return CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + consumerGroup + ZOOKEEPER_SEPARATOR
+ "ids";
}
public static String getConsumerGroupOffsetRoot(String consumerGroup) {
return CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + consumerGroup + ZOOKEEPER_SEPARATOR
+ "offsets";
}
public static String getConsumerGroupOwnersRoot(String consumerGroup) {
return CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + consumerGroup + ZOOKEEPER_SEPARATOR
+ "owners";
}
public static String getConsumerGroupConsumerIdsNodePath(String consumerGroup, String consumerId) {
return getConsumerGroupIdsRoot(consumerGroup) + ZOOKEEPER_SEPARATOR + consumerId;
}
public static String getConsumerGroupOffsetTopicNode(String consumerGroup, String topic) {
return getConsumerGroupOffsetRoot(consumerGroup) + ZOOKEEPER_SEPARATOR + topic;
}
public static String getConsumerGroupOffsetTopicPartitionNode(String consumerGroup,
String topic, int partitionId) {
return getConsumerGroupOffsetTopicNode(consumerGroup, topic) + ZOOKEEPER_SEPARATOR
+ partitionId;
}
public static String getConsumerGroupOwnersTopicNode(String consumerGroup, String topic) {
return getConsumerGroupOwnersRoot(consumerGroup) + ZOOKEEPER_SEPARATOR + topic;
}
public static String getConsumerGroupOwnersTopicPartitionNode(String consumerGroup,
String topic, int partitionId) {
return getConsumerGroupOwnersTopicNode(consumerGroup, topic) + ZOOKEEPER_SEPARATOR
+ partitionId;
}
public static String getConfigTopicNode(String topicName) {
return CONFIG_TOPICS_ROOT_NODE + ZOOKEEPER_SEPARATOR + topicName;
}
public static String parseLastPartFromZkPath(String zkPath) {
return zkPath.substring(zkPath.lastIndexOf("/") + 1);
}
public static Map<String, String> getZkPathMap() {
return zkPathMap;
}
public static void setZkPathMap(Map<String, String> zkPathMap) {
ZkPathUtil.zkPathMap = zkPathMap;
}
public static String getControllerRootNode() {
return CONTROLLER_ROOT_NODE;
}
public static String getEntityConfigPath(String entityType, String entity) {
return getEntityConfigRootPath(entityType) + "/" + entity;
}
public static String getEntityConfigRootPath(String entityType) {
return CONFIG_ROOT_NODE + "/" + entityType;
}
}