kafka-manager 2.0

This commit is contained in:
zengqiao
2020-09-28 15:46:34 +08:00
parent 28d985aaf1
commit c6e4b60424
1253 changed files with 82183 additions and 37179 deletions

View File

@@ -0,0 +1,24 @@
package com.xiaojukeji.kafka.manager.common.annotations;
import com.xiaojukeji.kafka.manager.common.constant.ApiLevelContent;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
/**
* 接口分级限流
* @author zengqiao
* @date 2020-07-20
*/
@Target(ElementType.METHOD)
@Retention(RUNTIME)
@Documented
public @interface ApiLevel {
int level() default ApiLevelContent.LEVEL_DEFAULT_4;
int rateLimit() default Integer.MAX_VALUE;
}

View File

@@ -0,0 +1,50 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* 用户角色
* @author zengqiao_cn@163.com
* @date 19/4/15
*/
public enum AccountRoleEnum {
UNKNOWN(-1, "unknown"),
NORMAL(0, "normal"),
RD(1, "rd"),
OP(2, "op");
private Integer role;
private String message;
AccountRoleEnum(Integer role, String message) {
this.role = role;
this.message = message;
}
public Integer getRole() {
return role;
}
public String getMessage() {
return message;
}
@Override
public String toString() {
return "AccountRoleEnum{" +
"role=" + role +
", message='" + message + '\'' +
'}';
}
public static AccountRoleEnum getUserRoleEnum(Integer role) {
for (AccountRoleEnum elem: AccountRoleEnum.values()) {
if (elem.role.equals(role)) {
return elem;
}
}
return AccountRoleEnum.UNKNOWN;
}
}

View File

@@ -0,0 +1,19 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* @author zengqiao
* @date 20/7/27
*/
public enum ApiLevelEnum {
LEVEL_0(0),
LEVEL_1(1),
LEVEL_2(2),
LEVEL_3(3)
;
private int level;
ApiLevelEnum(int level) {
this.level = level;
}
}

View File

@@ -0,0 +1,37 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* @author zengqiao
* @date 20/4/21
*/
public enum ClusterComboEnum {
BYTES_IN_200(200*1024*1024, "200MB/s"),
BYTES_IN_400(400*1024*1024, "400MB/s"),
BYTES_IN_600(600*1024*1024, "600MB/s"),
;
private Integer code;
private String message;
ClusterComboEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
public Integer getCode() {
return code;
}
public String getMessage() {
return message;
}
@Override
public String toString() {
return "ClusterComboEnum{" +
"code=" + code +
", message='" + message + '\'' +
'}';
}
}

View File

@@ -0,0 +1,48 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* 集群模式
* @author zengqiao
* @date 20/4/1
*/
public enum ClusterModeEnum {
/**
* 共享模式
*/
SHARED_MODE(0, "共享集群"),
/**
* 独享模式
*/
EXCLUSIVE_MODE(1, "独享集群"),
/**
* 独立模式
*/
INDEPENDENT_MODE(2, "独立集群");
private Integer code;
private String message;
ClusterModeEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
public Integer getCode() {
return code;
}
public String getMessage() {
return message;
}
@Override
public String toString() {
return "ClusterModeEnum{" +
"code=" + code +
", message='" + message + '\'' +
'}';
}
}

View File

@@ -0,0 +1,25 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* @author zengqiao
* @date 20/6/4
*/
public enum DBStatusEnum {
DEAD(-1),
ALIVE(0)
;
private int status;
DBStatusEnum(int status) {
this.status = status;
}
public int getStatus() {
return status;
}
public void setStatus(int status) {
this.status = status;
}
}

View File

@@ -0,0 +1,45 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* @author zengqiao
* @date 20/5/26
*/
public enum IDCEnum {
CN("cn", "国内"),
US("us", "美东"),
RU("ru", "俄罗斯"),
;
private String idc;
private String name;
IDCEnum(String idc, String name) {
this.idc = idc;
this.name = name;
}
public String getIdc() {
return idc;
}
public void setIdc(String idc) {
this.idc = idc;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public String toString() {
return "IDCEnum{" +
"idc='" + idc + '\'' +
", name='" + name + '\'' +
'}';
}
}

View File

@@ -0,0 +1,34 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* @author zengqiao
* @date 20/5/20
*/
public enum KafkaBrokerRoleEnum {
NORMAL("NormalBroker"),
COORDINATOR("Coordinator"),
CONTROLLER("Controller"),
;
private String role;
KafkaBrokerRoleEnum(String role) {
this.role = role;
}
public String getRole() {
return role;
}
public void setRole(String role) {
this.role = role;
}
@Override
public String toString() {
return "KafkaBrokerRoleEnum{" +
"role='" + role + '\'' +
'}';
}
}

View File

@@ -0,0 +1,46 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* @author zengqiao
* @date 20/5/29
*/
public enum KafkaClientEnum {
PRODUCE_CLIENT(0, "Produce"),
FETCH_CLIENT(1, "Fetch"),
;
private Integer code;
private String name;
KafkaClientEnum(Integer code, String name) {
this.code = code;
this.name = name;
}
public Integer getCode() {
return code;
}
public void setCode(Integer code) {
this.code = code;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public String toString() {
return "KafkaClientEnum{" +
"code=" + code +
", name='" + name + '\'' +
'}';
}
}

View File

@@ -0,0 +1,54 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* @author zengqiao
* @date 20/4/26
*/
public enum KafkaFileEnum {
PACKAGE(0, "Kafka压缩包", ".tgz"),
SERVER_CONFIG(1, "KafkaServer配置", ".properties"),
;
private Integer code;
private String message;
private String suffix;
KafkaFileEnum(Integer code, String message, String suffix) {
this.code = code;
this.message = message;
this.suffix = suffix;
}
public Integer getCode() {
return code;
}
public String getMessage() {
return message;
}
public String getSuffix() {
return suffix;
}
@Override
public String toString() {
return "KafkaFileEnum{" +
"code=" + code +
", message='" + message + '\'' +
", suffix=" + suffix +
'}';
}
public static KafkaFileEnum getByCode(Integer code) {
for (KafkaFileEnum elem: KafkaFileEnum.values()) {
if (elem.getCode().equals(code)) {
return elem;
}
}
return null;
}
}

View File

@@ -0,0 +1,76 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
import com.google.common.collect.Maps;
import java.util.Map;
/**
* @author zhongyuankai_i
* @date 20/09/03
*/
public enum ModuleEnum {
TOPIC(0, "Topic"),
APP(1, "应用"),
QUOTA(2, "配额"),
AUTHORITY(3, "权限"),
CLUSTER(4, "集群"),
PARTITION(5, "分区"),
UNKNOWN(-1, "未知")
;
ModuleEnum(int code, String message) {
this.code = code;
this.message = message;
}
private int code;
private String message;
public int getCode() {
return code;
}
public String getMessage() {
return message;
}
public Map<String, Object> toMap() {
Map<String, Object> map = Maps.newHashMap();
map.put("code", code);
map.put("message", message);
return map;
}
public static ModuleEnum valueOf(Integer code) {
if (code == null) {
return ModuleEnum.UNKNOWN;
}
for (ModuleEnum state : ModuleEnum.values()) {
if (state.getCode() == code) {
return state;
}
}
return ModuleEnum.UNKNOWN;
}
public static boolean validate(Integer code) {
if (code == null) {
return false;
}
for (ModuleEnum state : ModuleEnum.values()) {
if (state.getCode() == code) {
return true;
}
}
return false;
}
}

View File

@@ -0,0 +1,36 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* @author limeng
* @date 2017/11/21
*/
public enum OffsetLocationEnum {
/**
* 存储于zk
*/
ZOOKEEPER("zookeeper"),
/**
* 存储于broker
*/
BROKER("broker");
public final String location;
OffsetLocationEnum(String location) {
this.location = location;
}
public static OffsetLocationEnum getOffsetStoreLocation(String location) {
if (location == null) {
return null;
}
for (OffsetLocationEnum offsetStoreLocation: OffsetLocationEnum.values()) {
if (offsetStoreLocation.location.equals(location)) {
return offsetStoreLocation;
}
}
return null;
}
}

View File

@@ -0,0 +1,42 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* offset获取的位置
* @author zengqiao
* @date 19/5/29
*/
public enum OffsetPosEnum {
NONE(0),
BEGINNING(1),
END(2),
BOTH(3);
public final Integer code;
OffsetPosEnum(Integer code) {
this.code = code;
}
public Integer getCode() {
return code;
}
public static OffsetPosEnum getOffsetPosEnum(Integer code) {
for (OffsetPosEnum offsetPosEnum : values()) {
if (offsetPosEnum.getCode().equals(code)) {
return offsetPosEnum;
}
}
return NONE;
}
@Override
public String toString() {
return "OffsetPosEnum{" +
"code=" + code +
'}';
}
}

View File

@@ -0,0 +1,59 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* @author zhongyuankai
* @date 20/09/03
*/
public enum OperateEnum {
ADD(0, "新增"),
DELETE(1, "删除"),
EDIT(2, "修改"),
UNKNOWN(-1, "unknown"),
;
OperateEnum(int code, String message) {
this.code = code;
this.message = message;
}
private int code;
private String message;
public int getCode() {
return code;
}
public String getMessage() {
return message;
}
public static OperateEnum valueOf(Integer code) {
if (code == null) {
return OperateEnum.UNKNOWN;
}
for (OperateEnum state : OperateEnum.values()) {
if (state.getCode() == code) {
return state;
}
}
return OperateEnum.UNKNOWN;
}
public static boolean validate(Integer code) {
if (code == null) {
return false;
}
for (OperateEnum state : OperateEnum.values()) {
if (state.getCode() == code) {
return true;
}
}
return false;
}
}

View File

@@ -0,0 +1,30 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* 操作状态类型
* @author zengqiao
* @date 19/11/21
*/
public enum OperationStatusEnum {
CREATE(0, "创建"),
UPDATE(1, "更新"),
DELETE(2, "删除"),
;
private Integer code;
private String message;
OperationStatusEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
public Integer getCode() {
return code;
}
public String getMessage() {
return message;
}
}

View File

@@ -0,0 +1,50 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* 峰值状态枚举
* @author zengqiao
* @date 20/5/11
*/
public enum PeakFlowStatusEnum {
BETWEEN_ALL(0, "全部"),
BETWEEN_00_60(1, "使用率0%-60%"),
BETWEEN_60_80(2, "使用率60%-80%"),
BETWEEN_80_100(3, "使用率80%-100%"),
BETWEEN_100_PLUS(4, "使用率大于100%"),
BETWEEN_EXCEPTION(5, "数据获取失败"),
;
public Integer code;
public String message;
PeakFlowStatusEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
public Integer getCode() {
return code;
}
public void setCode(Integer code) {
this.code = code;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
@Override
public String toString() {
return "PeakFlowStatusEnum{" +
"code=" + code +
", message='" + message + '\'' +
'}';
}
}

View File

@@ -0,0 +1,31 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* 优先副本选举维度
* @author zengqiao
* @date 20/4/23
*/
public enum RebalanceDimensionEnum {
CLUSTER(0, "Cluster维度"),
REGION(1, "Region维度"),
BROKER(2, "Broker维度"),
TOPIC(3, "Topic维度"),
;
private Integer code;
private String message;
RebalanceDimensionEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
public Integer getCode() {
return code;
}
public String getMessage() {
return message;
}
}

View File

@@ -0,0 +1,45 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* 是否上报监控系统
* @author zengqiao
* @date 20/9/25
*/
public enum SinkMonitorSystemEnum {
SINK_MONITOR_SYSTEM(0, "上报监控系统"),
NOT_SINK_MONITOR_SYSTEM(1, "不上报监控系统"),
;
private Integer code;
private String message;
SinkMonitorSystemEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
public Integer getCode() {
return code;
}
public void setCode(Integer code) {
this.code = code;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
@Override
public String toString() {
return "SinkMonitorSystemEnum{" +
"code=" + code +
", message='" + message + '\'' +
'}';
}
}

View File

@@ -0,0 +1,72 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* 任务状态
* @author zengqiao
* @date 2017/6/29.
*/
public enum TaskStatusEnum {
UNKNOWN( -1, "未知"),
NEW( 0, "新建"),
RUNNABLE( 20, "就绪"),
WAITING( 21, "等待"),
RUNNING( 30, "运行中"),
KILLING( 31, "杀死中"),
BLOCKED( 40, "暂停"),
UNFINISHED( 99, "未完成"),
FINISHED( 100, "完成"),
SUCCEED( 101, "成功"),
FAILED( 102, "失败"),
CANCELED( 103, "取消"),
IGNORED( 104, "忽略"),
TIMEOUT( 105, "超时"),
KILL_FAILED(106, "杀死失败"),
;
private Integer code;
private String message;
TaskStatusEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
public Integer getCode() {
return code;
}
public void setCode(Integer code) {
this.code = code;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
@Override
public String toString() {
return "TaskStatusEnum{" +
"code=" + code +
", message='" + message + '\'' +
'}';
}
public static Boolean isFinished(Integer code) {
if (code >= FINISHED.getCode()) {
return true;
}
return false;
}
}

View File

@@ -0,0 +1,55 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* @author zengqiao
* @date 20/6/11
*/
public enum TaskStatusReassignEnum {
UNKNOWN(TaskStatusEnum.UNKNOWN),
NEW(TaskStatusEnum.NEW),
RUNNABLE(TaskStatusEnum.RUNNABLE),
RUNNING(TaskStatusEnum.RUNNING),
// FINISHED(TaskStatusEnum.FINISHED),
SUCCEED(TaskStatusEnum.SUCCEED),
FAILED(TaskStatusEnum.FAILED),
CANCELED(TaskStatusEnum.CANCELED),
;
private Integer code;
private String message;
TaskStatusReassignEnum(TaskStatusEnum taskStatusEnum) {
this.code = taskStatusEnum.getCode();
this.message = taskStatusEnum.getMessage();
}
public Integer getCode() {
return code;
}
public String getMessage() {
return message;
}
@Override
public String toString() {
return "TaskStatusReassignEnum{" +
"code=" + code +
", message='" + message + '\'' +
'}';
}
public static Boolean isFinished(Integer code) {
if (SUCCEED.getCode().equals(code)
|| FAILED.getCode().equals(code)
|| CANCELED.getCode().equals(code)) {
return true;
}
return false;
}
}

View File

@@ -0,0 +1,36 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* topic权限
* @author zhongyuankai
* @date 20/4/29
*/
public enum TopicAuthorityEnum {
DENY(0, ""),
READ(1, "只读"),
WRITE(2, "只写"),
READ_WRITE(3, "可读可写"),
OWNER(4, "可管理"),
;
private Integer code;
private String message;
TopicAuthorityEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
public Integer getCode() {
return code;
}
public String getMessage() {
return message;
}
}

View File

@@ -0,0 +1,45 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* @author zengqiao
* @date 20/8/24
*/
public enum TopicOffsetChangedEnum {
UNKNOWN(-1, "unknown"),
NO(0, "no"),
YES(1, "yes"),
;
private Integer code;
private String message;
TopicOffsetChangedEnum(Integer code, String message) {
this.code = code;
this.message = message;
}
public Integer getCode() {
return code;
}
public void setCode(Integer code) {
this.code = code;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
@Override
public String toString() {
return "TopicOffsetChangedEnum{" +
"code=" + code +
", message='" + message + '\'' +
'}';
}
}

View File

@@ -0,0 +1,39 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* Topic迁移动作
* @author zengqiao
* @date 20/4/16
*/
public enum TopicReassignActionEnum {
START("start"),
MODIFY("modify"),
CANCEL("cancel"),
;
private String action;
TopicReassignActionEnum(String action) {
this.action = action;
}
public String getAction() {
return action;
}
@Override
public String toString() {
return "TopicReassignActionEnum{" +
"action='" + action + '\'' +
'}';
}
public static TopicReassignActionEnum getByAction(String action) {
for (TopicReassignActionEnum elem: TopicReassignActionEnum.values()) {
if (elem.action.equals(action)) {
return elem;
}
}
return null;
}
}

View File

@@ -0,0 +1,48 @@
package com.xiaojukeji.kafka.manager.common.bizenum.gateway;
/**
* @author zengqiao
* @date 20/7/28
*/
public enum GatewayConfigKeyEnum {
SD_CLUSTER_ID("SERVICE_DISCOVERY_CLUSTER_ID", "SERVICE_DISCOVERY_CLUSTER_ID"),
SD_QUEUE_SIZE("SERVICE_DISCOVERY_QUEUE_SIZE", "SERVICE_DISCOVERY_QUEUE_SIZE"),
SD_APP_ID_RATE("SERVICE_DISCOVERY_APPID_RATE", "SERVICE_DISCOVERY_APPID_RATE"),
SD_IP_RATE("SERVICE_DISCOVERY_IP_RATE", "SERVICE_DISCOVERY_IP_RATE"),
SD_SP_RATE("SERVICE_DISCOVERY_SP_RATE", "SERVICE_DISCOVERY_SP_RATE"),
;
private String configType;
private String configName;
GatewayConfigKeyEnum(String configType, String configName) {
this.configType = configType;
this.configName = configName;
}
public String getConfigType() {
return configType;
}
public void setConfigType(String configType) {
this.configType = configType;
}
public String getConfigName() {
return configName;
}
public void setConfigName(String configName) {
this.configName = configName;
}
@Override
public String toString() {
return "GatewayConfigKeyEnum{" +
"configType='" + configType + '\'' +
", configName='" + configName + '\'' +
'}';
}
}

View File

@@ -0,0 +1,15 @@
package com.xiaojukeji.kafka.manager.common.constant;
/**
* @author zengqiao
* @date 20/7/28
*/
public class ApiLevelContent {
public static final int LEVEL_VIP_1 = 1;
public static final int LEVEL_IMPORTANT_2 = 2;
public static final int LEVEL_NORMAL_3 = 3;
public static final int LEVEL_DEFAULT_4 = 4;
}

View File

@@ -0,0 +1,26 @@
package com.xiaojukeji.kafka.manager.common.constant;
/**
* Api前缀
* @author zengqiao
* @date 20/4/16
*/
public class ApiPrefix {
public static final String API_V1_SSO_PREFIX = "/api/v1/sso/";
public static final String API_V1_NORMAL_PREFIX = "/api/v1/normal/";
public static final String API_V1_RD_PREFIX = "/api/v1/rd/";
public static final String API_V1_OP_PREFIX = "/api/v1/op/";
public static final String API_V1_THIRD_PART_PREFIX = "/api/v1/third-part/";
public static final String API_V2_THIRD_PART_PREFIX = "/api/v2/third-part/";
public static final String API_V1_OBSOLETE_PREFIX = "/api/v1/";
public static final String API_V2_OBSOLETE_PREFIX = "/api/v2/";
public static final String GATEWAY_API_V1_PREFIX = "/gateway/api/v1/";
}

View File

@@ -0,0 +1,33 @@
package com.xiaojukeji.kafka.manager.common.constant;
/**
* 配置的常量KEY
* @author zengqiao
* @date 20/7/1
*/
public class ConfigConstant {
/**
* 专家服务
*/
public static final String REGION_HOT_TOPIC_CONFIG_KEY = "REGION_HOT_TOPIC_CONFIG";
public static final String TOPIC_INSUFFICIENT_PARTITION_CONFIG_KEY = "TOPIC_INSUFFICIENT_PARTITION_CONFIG";
public static final String EXPIRED_TOPIC_CONFIG_KEY = "EXPIRED_TOPIC_CONFIG";
/**
*
*/
public static final String PRODUCE_CONSUMER_METRICS_CONFIG_KEY = "PRODUCE_CONSUMER_METRICS_CONFIG_KEY";
public static final String PRODUCE_TOPIC_METRICS_CONFIG_KEY = "PRODUCE_TOPIC_METRICS_CONFIG_KEY";
public static final long MAX_LIMIT_NUM = 200L;
/**
* broker 默认最大峰值流量 100M
*/
public static final Long DEFAULT_BROKER_CAPACITY_LIMIT = 100 * 1024 * 1024L;
public static final String BROKER_CAPACITY_LIMIT_CONFIG_KEY = "BROKER_CAPACITY_LIMIT_CONFIG";
public static final String KAFKA_CLUSTER_DO_CONFIG_KEY = "KAFKA_CLUSTER_DO_CONFIG";
}

View File

@@ -0,0 +1,48 @@
package com.xiaojukeji.kafka.manager.common.constant;
/**
* @author zengqiao
* @date 20/2/28
*/
public class Constant {
public static final Integer SUCCESS = 0;
public static final Integer MAX_AVG_BYTES_DURATION = 10;
public static final Integer BATCH_INSERT_SIZE = 50;
public static final Integer DEFAULT_SESSION_TIMEOUT_UNIT_MS = 30000;
public static final Integer MAX_TOPIC_OPERATION_SIZE_PER_REQUEST = 10;
/**
* 不进行过滤的BrokerId
*/
public static final Integer NOT_FILTER_BROKER_ID = -1;
/**
* 默认最近20分钟的连接信息
*/
public static final Long TOPIC_CONNECTION_LATEST_TIME_MS = 20 * 60 * 1000L;
/**
* 工单相关
*/
public static final String HANDLE_APP_APPLY_MAX_NUM = "handle_app_apply_order_num";
public static final Integer HANDLE_APP_APPLY_MAX_NUM_DEFAULT = 10;
public static final String AUTO_HANDLE_USER_NAME = "auto_handle";
public static final String AUTO_HANDLE_CHINESE_NAME = "自动审批";
public static final String UNKNOWN_VERSION = "unknownVersion";
public static final String UNKNOWN_USER = "UNKNOWN_USER";
public static final String DEFAULT_USER_NAME = "kafka-admin";
public static final Integer DEFAULT_MAX_CAL_TOPIC_EXPIRED_DAY = 90;
public static final Integer INVALID_CODE = -1;
}

View File

@@ -0,0 +1,17 @@
package com.xiaojukeji.kafka.manager.common.constant;
/**
* @author zengqiao
* @date 20/5/20
*/
public class KafkaConstant {
public static final String COORDINATOR_TOPIC_NAME = "__consumer_offsets";
public static final String BROKER_HOST_NAME_SUFFIX = ".diditaxi.com";
public static final String CLIENT_VERSION_CODE_UNKNOWN = "-1";
public static final String CLIENT_VERSION_NAME_UNKNOWN = "unknown";
public static final String RETENTION_MS_KEY = "retention.ms";
}

View File

@@ -0,0 +1,42 @@
package com.xiaojukeji.kafka.manager.common.constant;
/**
*
* @author zengqiao
* @date 20/4/22
*/
public class KafkaMetricsCollections {
public static final int COMMON_DETAIL_METRICS = 0;
/**
* Broker流量详情
*/
public static final int BROKER_TO_DB_METRICS = 101; // Broker入DB的Metrics指标
public static final int BROKER_OVERVIEW_PAGE_METRICS = 103; // Broker状态概览的指标
public static final int BROKER_ANALYSIS_METRICS = 105; // Broker分析的指标
public static final int BROKER_TOPIC_ANALYSIS_METRICS = 106; // Broker分析的指标
public static final int BROKER_BASIC_PAGE_METRICS = 107; // Broker基本信息页面的指标
public static final int BROKER_STATUS_PAGE_METRICS = 108; // Broker状态
public static final int BROKER_HEALTH_SCORE_METRICS = 109; // Broker健康分
/**
* Topic流量详情
*/
public static final int TOPIC_FLOW_OVERVIEW = 201;
public static final int TOPIC_METRICS_TO_DB = 202;
public static final int TOPIC_REQUEST_TIME_METRICS_TO_DB = 203;
public static final int TOPIC_BASIC_PAGE_METRICS = 204;
public static final int TOPIC_REQUEST_TIME_DETAIL_PAGE_METRICS = 205;
public static final int TOPIC_THROTTLED_METRICS_TO_DB = 206;
/**
* App+Topic流量详情
*/
public static final int APP_TOPIC_METRICS_TO_DB = 300;
/**
* Broker信息
*/
public static final int BROKER_VERSION = 400;
}

View File

@@ -0,0 +1,13 @@
package com.xiaojukeji.kafka.manager.common.constant;
/**
* @author zengqiao
* @date 20/8/10
*/
public class LogConstant {
public static final String COLLECTOR_METRICS_LOGGER = "COLLECTOR_METRICS_LOGGER";
public static final String API_METRICS_LOGGER = "API_METRICS_LOGGER";
public static final String SCHEDULED_TASK_LOGGER = "SCHEDULED_TASK_LOGGER";
}

View File

@@ -0,0 +1,14 @@
package com.xiaojukeji.kafka.manager.common.constant;
/**
* 登录常量
* @author zengqiao
* @date 20/5/8
*/
public class LoginConstant {
public static final String SESSION_USERNAME_KEY = "username";
public static final String COOKIE_CHINESE_USERNAME_KEY = "chineseName";
public static final Integer COOKIE_OR_SESSION_MAX_AGE_UNIT_MS = 24 * 60 * 60 * 1000;
}

View File

@@ -0,0 +1,17 @@
package com.xiaojukeji.kafka.manager.common.constant;
/**
* @author zengqiao
* @date 20/7/28
*/
public class SystemCodeConstant {
public static final String LOG_X = "LogX";
public static final String LEO = "leo";
public static final String DATA_DREAM = "datadream";
public static final String KAFKA_MANAGER = "kafka-manager";
public static final String CHORUS = "chorus"; // 治理平台-服务治理
}

View File

@@ -0,0 +1,49 @@
package com.xiaojukeji.kafka.manager.common.constant;
import java.util.Properties;
/**
* @author zengqiao
* @date 20/7/28
*/
public class TopicCreationConstant {
/**
* LogX创建Topic配置KEY
*/
public static final String LOG_X_CREATE_TOPIC_CONFIG_KEY_NAME = "LOG_X_CREATE_TOPIC_CONFIG";
/**
* 治理平台创建Topic配置KEY
*/
public static final String CHORUS_CREATE_TOPIC_CONFIG_KEY_NAME = "CHORUS_CREATE_TOPIC_CONFIG";
/**
* 内部创建Topic配置KEY
*/
public static final String INNER_CREATE_TOPIC_CONFIG_KEY = "INNER_CREATE_TOPIC_CONFIG_KEY";
public static final Integer DEFAULT_REPLICA = 3;
public static final Integer DEFAULT_PARTITION_NUM = 1;
public static final Integer DEFAULT_RETENTION_TIME_UNIT_HOUR = 24;
public static final String TOPIC_RETENTION_TIME_KEY_NAME = "retention.ms";
public static Properties createNewProperties(Long retentionTime) {
Properties properties = new Properties();
properties.put(TOPIC_RETENTION_TIME_KEY_NAME, String.valueOf(retentionTime));
return properties;
}
public static final Long AUTO_EXEC_MAX_BYTES_IN_UNIT_B = 30 * 1024 * 1024L;
/**
* Topic 前缀
*/
public static final String TOPIC_NAME_PREFIX_US = "us01_";
public static final String TOPIC_NAME_PREFIX_RU = "ru01_";
public static final Integer TOPIC_NAME_MAX_LENGTH = 255;
}

View File

@@ -0,0 +1,19 @@
package com.xiaojukeji.kafka.manager.common.constant;
/**
* 采样相关配置
* @author zengqiao
* @date 20/5/8
*/
public class TopicSampleConstant {
/**
* TOPIC_SAMPLE_MAX_MSG_NUM: 最大采样条数
* TOPIC_SAMPLE_MAX_TIMEOUT_MS采样超时时间
* TOPIC_SAMPLE_POLL_TIME_OUT_MS采样单次poll超时时间
* TOPIC_SAMPLE_MAX_DATA_LENGTH截断情况下, 采样的数据最大长度
*/
public static final Integer MAX_MSG_NUM = 100;
public static final Integer MAX_TIMEOUT_UNIT_MS = 10000;
public static final Integer POLL_TIME_OUT_UNIT_MS = 2000;
public static final Integer MAX_DATA_LENGTH_UNIT_BYTE = 2048;
}

View File

@@ -0,0 +1,47 @@
package com.xiaojukeji.kafka.manager.common.entity;
import kafka.admin.AdminClient;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
/**
* @author zengqiao
* @date 19/5/14
*/
public class ConsumerMetadata {
private Set<String> consumerGroupSet = new HashSet<>();
private Map<String, Set<String>> topicNameConsumerGroupMap = new HashMap<>();
private Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap = new HashMap<>();
private Map<String, List<String>> consumerGroupAppMap = new ConcurrentHashMap<>();
public ConsumerMetadata(Set<String> consumerGroupSet,
Map<String, Set<String>> topicNameConsumerGroupMap,
Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap,
Map<String, List<String>> consumerGroupAppMap) {
this.consumerGroupSet = consumerGroupSet;
this.topicNameConsumerGroupMap = topicNameConsumerGroupMap;
this.consumerGroupSummaryMap = consumerGroupSummaryMap;
this.consumerGroupAppMap = consumerGroupAppMap;
}
public Set<String> getConsumerGroupSet() {
return consumerGroupSet;
}
public Map<String, Set<String>> getTopicNameConsumerGroupMap() {
return topicNameConsumerGroupMap;
}
public Map<String, AdminClient.ConsumerGroupSummary> getConsumerGroupSummaryMap() {
return consumerGroupSummaryMap;
}
public Map<String, List<String>> getConsumerGroupAppMap() {
return consumerGroupAppMap;
}
}

View File

@@ -0,0 +1,83 @@
package com.xiaojukeji.kafka.manager.common.entity;
/**
* @author zengqiao
* @date 20/7/27
*/
public class DeprecatedResponseResult<T> {
public static final String SUCCESS_STATUS = "success";
public static final String FAILED_STATUS = "failure";
public static final String SUCCESS_MESSAGE = "process succeeded!";
public static final String FAILED_MESSAGE = "process failed!";
private String status;
private String message;
private T data;
public static <T> DeprecatedResponseResult<T> success(T data) {
DeprecatedResponseResult<T> responseCommonResult = new DeprecatedResponseResult<T>();
responseCommonResult.setMessage(SUCCESS_MESSAGE);
responseCommonResult.setStatus(SUCCESS_STATUS);
responseCommonResult.setData(data);
return responseCommonResult;
}
public static <T> DeprecatedResponseResult<T> success() {
DeprecatedResponseResult<T> responseCommonResult = new DeprecatedResponseResult<T>();
responseCommonResult.setStatus(SUCCESS_STATUS);
responseCommonResult.setMessage(SUCCESS_MESSAGE);
return responseCommonResult;
}
public static <T> DeprecatedResponseResult<T> failure() {
DeprecatedResponseResult<T> responseCommonResult = new DeprecatedResponseResult<T>();
responseCommonResult.setMessage(FAILED_MESSAGE);
responseCommonResult.setStatus(FAILED_STATUS);
return responseCommonResult;
}
public static <T> DeprecatedResponseResult<T> failure(String message) {
DeprecatedResponseResult<T> responseCommonResult = new DeprecatedResponseResult<T>();
responseCommonResult.setMessage(message);
responseCommonResult.setStatus(FAILED_STATUS);
return responseCommonResult;
}
public String getStatus() {
return status;
}
public void setStatus(String status) {
this.status = status;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
public T getData() {
return data;
}
public void setData(T data) {
this.data = data;
}
@Override
public String toString() {
return "DeprecatedResponseResult{" +
"status='" + status + '\'' +
", message='" + message + '\'' +
", data=" + data +
'}';
}
}

View File

@@ -0,0 +1,82 @@
package com.xiaojukeji.kafka.manager.common.entity;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
/**
* @author zengqiao
* @date 20/6/15
*/
public class KafkaVersion {
private static final String DIDI_VERSION_EXTEND = "d";
public static final Long VERSION_0_10_3 = 10030000L; // 0.10.2+
public static final Long VERSION_MAX = Long.MAX_VALUE;
private volatile String version = null;
private volatile long versionNum = Long.MAX_VALUE;
public boolean initialized() {
if (ValidateUtils.isNull(version)) {
return false;
}
return true;
}
public String getVersion() {
return version;
}
public long getVersionNum() {
return versionNum;
}
@Override
public String toString() {
return "KafkaVersion{" +
"version='" + version + '\'' +
", versionNum=" + versionNum +
'}';
}
public long init(String version) {
version = version.toLowerCase();
String[] splitElems = version.split("-");
int splitElemLength = splitElems.length;
if (splitElemLength <= 0) {
versionNum = Long.MAX_VALUE;
return versionNum;
}
try {
// kafka的version
String[] kafkaVersion = splitElems[0].split("\\.");
int kafkaVersionLength = kafkaVersion.length;
versionNum = kafkaVersionLength > 0? Integer.valueOf(kafkaVersion[0]): 0;
versionNum = versionNum * 100 + (kafkaVersionLength > 1? Integer.valueOf(kafkaVersion[1]): 0);
versionNum = versionNum * 100 + (kafkaVersionLength > 2? Integer.valueOf(kafkaVersion[2]): 0);
} catch (Exception e) {
// Kafka版本信息获取不到时, 直接返回空
this.versionNum = Long.MAX_VALUE;
return versionNum;
}
// 成功获取版本信息
versionNum = versionNum * 10000;
this.version = version;
// 补充扩展信息
try {
for (int idx = 0; idx < splitElemLength; ++idx) {
if (splitElems[idx].equals(DIDI_VERSION_EXTEND) && idx < splitElemLength - 1) {
versionNum = versionNum + (Integer.valueOf(splitElems[idx + 1]));
return versionNum;
}
}
} catch (Exception e) {
// 扩展版本信息获取不到时, 忽略
}
return versionNum;
}
}

View File

@@ -0,0 +1,105 @@
package com.xiaojukeji.kafka.manager.common.entity;
import com.alibaba.fastjson.JSON;
import java.io.Serializable;
/**
* @author huangyiminghappy@163.com
* @date 2019-07-08
*/
public class Result<T> implements Serializable {
private static final long serialVersionUID = -2772975319944108658L;
private T data;
private String message;
private String tips;
private int code;
public Result(T data) {
this.data = data;
this.code = ResultStatus.SUCCESS.getCode();
this.message = ResultStatus.SUCCESS.getMessage();
}
public Result() {
this(null);
}
public Result(Integer code, String message) {
this.message = message;
this.code = code;
}
public Result(Integer code, T data, String message) {
this.data = data;
this.message = message;
this.code = code;
}
public T getData()
{
return (T)this.data;
}
public void setData(T data)
{
this.data = data;
}
public String getMessage()
{
return this.message;
}
public void setMessage(String message)
{
this.message = message;
}
public String getTips() {
return tips;
}
public void setTips(String tips) {
this.tips = tips;
}
public int getCode()
{
return this.code;
}
public void setCode(int code)
{
this.code = code;
}
@Override
public String toString()
{
return JSON.toJSONString(this);
}
public static Result buildSuc() {
Result result = new Result();
result.setCode(ResultStatus.SUCCESS.getCode());
result.setMessage(ResultStatus.SUCCESS.getMessage());
return result;
}
public static Result buildFrom(ResultStatus resultStatus) {
Result result = new Result();
result.setCode(resultStatus.getCode());
result.setMessage(resultStatus.getMessage());
return result;
}
public static Result buildFrom(ResultStatus resultStatus, Object data) {
Result result = new Result();
result.setCode(resultStatus.getCode());
result.setMessage(resultStatus.getMessage());
result.setData(data);
return result;
}
}

View File

@@ -0,0 +1,154 @@
package com.xiaojukeji.kafka.manager.common.entity;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
/**
* 返回状态
* @author zengqiao
* @date 20/4/16
*/
public enum ResultStatus {
SUCCESS(Constant.SUCCESS, "success"),
LOGIN_FAILED(1, "login failed, please check username and password"),
/**
* 内部依赖错误, [1000, 1200)
* ------------------------------------------------------------------------------------------
*/
MYSQL_ERROR(1000, "operate database failed"),
CONNECT_ZOOKEEPER_FAILED(1000, "connect zookeeper failed"),
READ_ZOOKEEPER_FAILED(1000, "read zookeeper failed"),
READ_JMX_FAILED(1000, "read jmx failed"),
// 内部依赖错误 —— Kafka特定错误, [1000, 1100)
BROKER_NUM_NOT_ENOUGH(1000, "broker not enough"),
CONTROLLER_NOT_ALIVE(1000, "controller not alive"),
CLUSTER_METADATA_ERROR(1000, "cluster metadata error"),
TOPIC_CONFIG_ERROR(1000, "topic config error"),
/**
* 外部依赖错误, [1200, 1400)
* ------------------------------------------------------------------------------------------
*/
CALL_CLUSTER_TASK_AGENT_FAILED(1000, " call cluster task agent failed"),
CALL_MONITOR_SYSTEM_ERROR(1000, " call monitor-system failed"),
/**
* 外部用户操作错误, [1400, 1600)
* ------------------------------------------------------------------------------------------
*/
PARAM_ILLEGAL(1400, "param illegal"),
OPERATION_FAILED(1401, "operation failed"),
OPERATION_FORBIDDEN(1402, "operation forbidden"),
API_CALL_EXCEED_LIMIT(1403, "api call exceed limit"),
// 资源不存在
CLUSTER_NOT_EXIST(10000, "cluster not exist"),
BROKER_NOT_EXIST(10000, "broker not exist"),
TOPIC_NOT_EXIST(10000, "topic not exist"),
PARTITION_NOT_EXIST(10000, "partition not exist"),
ACCOUNT_NOT_EXIST(10000, "account not exist"),
APP_NOT_EXIST(1000, "app not exist"),
ORDER_NOT_EXIST(1000, "order not exist"),
CONFIG_NOT_EXIST(1000, "config not exist"),
IDC_NOT_EXIST(1000, "idc not exist"),
TASK_NOT_EXIST(1110, "task not exist"),
AUTHORITY_NOT_EXIST(1000, "authority not exist"),
MONITOR_NOT_EXIST(1110, "monitor not exist"),
QUOTA_NOT_EXIST(1000, "quota not exist, please check clusterId, topicName and appId"),
// 资源不存在, 已存在, 已被使用
RESOURCE_NOT_EXIST(1200, "资源不存在"),
RESOURCE_ALREADY_EXISTED(1200, "资源已经存在"),
RESOURCE_NAME_DUPLICATED(1200, "资源名称重复"),
RESOURCE_ALREADY_USED(1000, "资源早已被使用"),
/**
* 资源参数错误
*/
CG_LOCATION_ILLEGAL(10000, "consumer group location illegal"),
ORDER_ALREADY_HANDLED(1000, "order already handled"),
APP_ID_OR_PASSWORD_ILLEGAL(1000, "app or password illegal"),
SYSTEM_CODE_ILLEGAL(1000, "system code illegal"),
///////////////////////////////////////////////////////////////
USER_WITHOUT_AUTHORITY(1000, "user without authority"),
JSON_PARSER_ERROR(1000, "json parser error"),
TOPIC_OPERATION_PARAM_NULL_POINTER(2, "参数错误"),
TOPIC_OPERATION_PARTITION_NUM_ILLEGAL(3, "分区数错误"),
TOPIC_OPERATION_BROKER_NUM_NOT_ENOUGH(4, "Broker数不足错误"),
TOPIC_OPERATION_TOPIC_NAME_ILLEGAL(5, "Topic名称非法"),
TOPIC_OPERATION_TOPIC_EXISTED(6, "Topic已存在"),
TOPIC_OPERATION_UNKNOWN_TOPIC_PARTITION(7, "Topic未知"),
TOPIC_OPERATION_TOPIC_CONFIG_ILLEGAL(8, "Topic配置错误"),
TOPIC_OPERATION_TOPIC_IN_DELETING(9, "Topic正在删除"),
TOPIC_OPERATION_UNKNOWN_ERROR(10, "未知错误"),
TOPIC_EXIST_CONNECT_CANNOT_DELETE(10, "topic exist connect cannot delete"),
EXIST_TOPIC_CANNOT_DELETE(10, "exist topic cannot delete"),
/**
* 工单
*/
CHANGE_ZOOKEEPER_FORBIDEN(100, "change zookeeper forbiden"),
// APP_EXIST_TOPIC_AUTHORITY_CANNOT_DELETE(1000, "app exist topic authority cannot delete"),
UPLOAD_FILE_FAIL(1000, "upload file fail"),
FILE_TYPE_NOT_SUPPORT(1000, "File type not support"),
DOWNLOAD_FILE_FAIL(1000, "download file fail"),
TOPIC_ALREADY_EXIST(17400, "topic already existed"),
CONSUMER_GROUP_NOT_EXIST(17411, "consumerGroup not exist"),
;
private int code;
private String message;
ResultStatus(int code, String message) {
this.code = code;
this.message = message;
}
public int getCode() {
return code;
}
public void setCode(int code) {
this.code = code;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
}

View File

@@ -0,0 +1,83 @@
package com.xiaojukeji.kafka.manager.common.entity;
import io.swagger.annotations.ApiModelProperty;
/**
* @author zengqiao
* @date 20/4/2
*/
public class TopicOperationResult {
@ApiModelProperty(value = "集群ID")
private Long clusterId;
@ApiModelProperty(value = "Topic名称")
private String topicName;
@ApiModelProperty(value = "状态码, 0:成功, 其他失败")
private Integer code;
@ApiModelProperty(value = "信息")
private String message;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Integer getCode() {
return code;
}
public void setCode(Integer code) {
this.code = code;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
@Override
public String toString() {
return "TopicOperationResult{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", code=" + code +
", message='" + message + '\'' +
'}';
}
public static TopicOperationResult buildFrom(Long clusterId, String topicName, Result rs) {
return buildFrom(clusterId, topicName, rs.getCode(), rs.getMessage());
}
public static TopicOperationResult buildFrom(Long clusterId, String topicName, ResultStatus rs) {
return buildFrom(clusterId, topicName, rs.getCode(), rs.getMessage());
}
private static TopicOperationResult buildFrom(Long clusterId,
String topicName,
Integer code,
String message) {
TopicOperationResult result = new TopicOperationResult();
result.setClusterId(clusterId);
result.setTopicName(topicName);
result.setCode(code);
result.setMessage(message);
return result;
}
}

View File

@@ -0,0 +1,91 @@
package com.xiaojukeji.kafka.manager.common.entity.ao;
/**
* AppTopic信息
* @author zengqiao
* @date 20/5/11
*/
public class AppTopicDTO {
private Long logicalClusterId;
private String logicalClusterName;
private Long physicalClusterId;
private String topicName;
private Integer access;
private String operator;
private Long gmtCreate;
public Long getLogicalClusterId() {
return logicalClusterId;
}
public void setLogicalClusterId(Long logicalClusterId) {
this.logicalClusterId = logicalClusterId;
}
public String getLogicalClusterName() {
return logicalClusterName;
}
public void setLogicalClusterName(String logicalClusterName) {
this.logicalClusterName = logicalClusterName;
}
public Long getPhysicalClusterId() {
return physicalClusterId;
}
public void setPhysicalClusterId(Long physicalClusterId) {
this.physicalClusterId = physicalClusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Integer getAccess() {
return access;
}
public void setAccess(Integer access) {
this.access = access;
}
public String getOperator() {
return operator;
}
public void setOperator(String operator) {
this.operator = operator;
}
public Long getGmtCreate() {
return gmtCreate;
}
public void setGmtCreate(Long gmtCreate) {
this.gmtCreate = gmtCreate;
}
@Override
public String toString() {
return "AppTopicDTO{" +
"logicalClusterId=" + logicalClusterId +
", logicalClusterName='" + logicalClusterName + '\'' +
", physicalClusterId=" + physicalClusterId +
", topicName='" + topicName + '\'' +
", access=" + access +
", operator='" + operator + '\'' +
", gmtCreate=" + gmtCreate +
'}';
}
}

View File

@@ -0,0 +1,91 @@
package com.xiaojukeji.kafka.manager.common.entity.ao;
/**
* Broker基本信息
* @author zengqiao_cn@163.com
* @date 19/4/8
*/
public class BrokerBasicDTO {
private String host;
private Integer port;
private Integer jmxPort;
private Integer topicNum;
private Integer partitionCount;
private Long startTime;
private Integer leaderCount;
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Integer getPort() {
return port;
}
public void setPort(Integer port) {
this.port = port;
}
public Integer getJmxPort() {
return jmxPort;
}
public void setJmxPort(Integer jmxPort) {
this.jmxPort = jmxPort;
}
public Integer getTopicNum() {
return topicNum;
}
public void setTopicNum(Integer topicNum) {
this.topicNum = topicNum;
}
public Integer getPartitionCount() {
return partitionCount;
}
public void setPartitionCount(Integer partitionCount) {
this.partitionCount = partitionCount;
}
public Long getStartTime() {
return startTime;
}
public void setStartTime(Long startTime) {
this.startTime = startTime;
}
public Integer getLeaderCount() {
return leaderCount;
}
public void setLeaderCount(Integer leaderCount) {
this.leaderCount = leaderCount;
}
@Override
public String toString() {
return "BrokerBasicInfoDTO{" +
"host='" + host + '\'' +
", port=" + port +
", jmxPort=" + jmxPort +
", topicNum=" + topicNum +
", partitionCount=" + partitionCount +
", startTime=" + startTime +
", leaderCount=" + leaderCount +
'}';
}
}

View File

@@ -0,0 +1,189 @@
package com.xiaojukeji.kafka.manager.common.entity.ao;
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.BrokerMetadata;
/**
* @author zengqiao_cn@163.com
* @date 19/4/21
*/
public class BrokerOverviewDTO {
private Integer brokerId;
private String host;
private Integer port;
private Integer jmxPort;
private Long startTime;
private Object byteIn;
private Object byteOut;
private Integer partitionCount;
private Integer underReplicatedPartitions;
private Boolean underReplicated;
private Integer status;
private Integer peakFlowStatus;
private String kafkaVersion;
private Integer leaderCount;
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Integer getPort() {
return port;
}
public void setPort(Integer port) {
this.port = port;
}
public Integer getJmxPort() {
return jmxPort;
}
public void setJmxPort(Integer jmxPort) {
this.jmxPort = jmxPort;
}
public Long getStartTime() {
return startTime;
}
public void setStartTime(Long startTime) {
this.startTime = startTime;
}
public Object getByteIn() {
return byteIn;
}
public void setByteIn(Object byteIn) {
this.byteIn = byteIn;
}
public Object getByteOut() {
return byteOut;
}
public void setByteOut(Object byteOut) {
this.byteOut = byteOut;
}
public Integer getPartitionCount() {
return partitionCount;
}
public void setPartitionCount(Integer partitionCount) {
this.partitionCount = partitionCount;
}
public Integer getUnderReplicatedPartitions() {
return underReplicatedPartitions;
}
public void setUnderReplicatedPartitions(Integer underReplicatedPartitions) {
this.underReplicatedPartitions = underReplicatedPartitions;
}
public Boolean getUnderReplicated() {
return underReplicated;
}
public void setUnderReplicated(Boolean underReplicated) {
this.underReplicated = underReplicated;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
public Integer getPeakFlowStatus() {
return peakFlowStatus;
}
public void setPeakFlowStatus(Integer peakFlowStatus) {
this.peakFlowStatus = peakFlowStatus;
}
public String getKafkaVersion() {
return kafkaVersion;
}
public void setKafkaVersion(String kafkaVersion) {
this.kafkaVersion = kafkaVersion;
}
public Integer getLeaderCount() {
return leaderCount;
}
public void setLeaderCount(Integer leaderCount) {
this.leaderCount = leaderCount;
}
public static BrokerOverviewDTO newInstance(BrokerMetadata brokerMetadata,
BrokerMetrics brokerMetrics,
String kafkaVersion) {
BrokerOverviewDTO brokerOverviewDTO = new BrokerOverviewDTO();
brokerOverviewDTO.setBrokerId(brokerMetadata.getBrokerId());
brokerOverviewDTO.setHost(brokerMetadata.getHost());
brokerOverviewDTO.setPort(brokerMetadata.getPort());
brokerOverviewDTO.setJmxPort(brokerMetadata.getJmxPort());
brokerOverviewDTO.setStartTime(brokerMetadata.getTimestamp());
brokerOverviewDTO.setStatus(0);
if (brokerMetrics == null) {
return brokerOverviewDTO;
}
brokerOverviewDTO.setByteIn(
brokerMetrics.getSpecifiedMetrics("BytesInPerSecOneMinuteRate")
);
brokerOverviewDTO.setByteOut(
brokerMetrics.getSpecifiedMetrics("BytesOutPerSecOneMinuteRate")
);
brokerOverviewDTO.setPartitionCount(
brokerMetrics.getSpecifiedMetrics("PartitionCountValue", Integer.class)
);
brokerOverviewDTO.setUnderReplicatedPartitions(
brokerMetrics.getSpecifiedMetrics("UnderReplicatedPartitionsValue", Integer.class)
);
if (!ValidateUtils.isNull(brokerOverviewDTO.getUnderReplicatedPartitions())) {
brokerOverviewDTO.setUnderReplicated(brokerOverviewDTO.getUnderReplicatedPartitions() > 0);
}
brokerOverviewDTO.setLeaderCount(
brokerMetrics.getSpecifiedMetrics("LeaderCountValue", Integer.class)
);
brokerOverviewDTO.setKafkaVersion(kafkaVersion);
return brokerOverviewDTO;
}
}

View File

@@ -0,0 +1,191 @@
package com.xiaojukeji.kafka.manager.common.entity.ao;
import java.util.Date;
/**
* @author zengqiao
* @date 20/4/23
*/
public class ClusterDetailDTO {
private Long clusterId;
private String clusterName;
private String zookeeper;
private String bootstrapServers;
private String kafkaVersion;
private String idc;
private Integer mode;
private String securityProperties;
private Integer status;
private Date gmtCreate;
private Date gmtModify;
private Integer brokerNum;
private Integer topicNum;
private Integer consumerGroupNum;
private Integer controllerId;
private Integer regionNum;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
public String getZookeeper() {
return zookeeper;
}
public void setZookeeper(String zookeeper) {
this.zookeeper = zookeeper;
}
public String getBootstrapServers() {
return bootstrapServers;
}
public void setBootstrapServers(String bootstrapServers) {
this.bootstrapServers = bootstrapServers;
}
public String getKafkaVersion() {
return kafkaVersion;
}
public void setKafkaVersion(String kafkaVersion) {
this.kafkaVersion = kafkaVersion;
}
public String getIdc() {
return idc;
}
public void setIdc(String idc) {
this.idc = idc;
}
public Integer getMode() {
return mode;
}
public void setMode(Integer mode) {
this.mode = mode;
}
public String getSecurityProperties() {
return securityProperties;
}
public void setSecurityProperties(String securityProperties) {
this.securityProperties = securityProperties;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
public Date getGmtCreate() {
return gmtCreate;
}
public void setGmtCreate(Date gmtCreate) {
this.gmtCreate = gmtCreate;
}
public Date getGmtModify() {
return gmtModify;
}
public void setGmtModify(Date gmtModify) {
this.gmtModify = gmtModify;
}
public Integer getBrokerNum() {
return brokerNum;
}
public void setBrokerNum(Integer brokerNum) {
this.brokerNum = brokerNum;
}
public Integer getTopicNum() {
return topicNum;
}
public void setTopicNum(Integer topicNum) {
this.topicNum = topicNum;
}
public Integer getConsumerGroupNum() {
return consumerGroupNum;
}
public void setConsumerGroupNum(Integer consumerGroupNum) {
this.consumerGroupNum = consumerGroupNum;
}
public Integer getControllerId() {
return controllerId;
}
public void setControllerId(Integer controllerId) {
this.controllerId = controllerId;
}
public Integer getRegionNum() {
return regionNum;
}
public void setRegionNum(Integer regionNum) {
this.regionNum = regionNum;
}
@Override
public String toString() {
return "ClusterDetailDTO{" +
"clusterId=" + clusterId +
", clusterName='" + clusterName + '\'' +
", zookeeper='" + zookeeper + '\'' +
", bootstrapServers='" + bootstrapServers + '\'' +
", kafkaVersion='" + kafkaVersion + '\'' +
", idc='" + idc + '\'' +
", mode='" + mode + '\'' +
", securityProperties='" + securityProperties + '\'' +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
", brokerNum=" + brokerNum +
", topicNum=" + topicNum +
", consumerGroupNum=" + consumerGroupNum +
", controllerId=" + controllerId +
", regionNum=" + regionNum +
'}';
}
}

View File

@@ -0,0 +1,24 @@
package com.xiaojukeji.kafka.manager.common.entity.ao;
/**
* @author zhongyuankai
* @date 2020/5/26
*/
public class PartitionAttributeDTO {
private Long logSize;
public Long getLogSize() {
return logSize;
}
public void setLogSize(Long logSize) {
this.logSize = logSize;
}
@Override
public String toString() {
return "PartitionAttributeDTO{" +
"logSize=" + logSize +
'}';
}
}

View File

@@ -0,0 +1,62 @@
package com.xiaojukeji.kafka.manager.common.entity.ao;
/**
* Topic Offset
* @author zengqiao
* @date 19/6/2
*/
public class PartitionOffsetDTO {
private Integer partitionId;
private Long offset;
private Long timestamp;
public PartitionOffsetDTO() {
}
public PartitionOffsetDTO(Integer partitionId, Long offset) {
this.partitionId = partitionId;
this.offset = offset;
}
public PartitionOffsetDTO(Integer partitionId, Long offset, Long timestamp) {
this.partitionId = partitionId;
this.offset = offset;
this.timestamp = timestamp;
}
public Integer getPartitionId() {
return partitionId;
}
public void setPartitionId(Integer partitionId) {
this.partitionId = partitionId;
}
public Long getOffset() {
return offset;
}
public void setOffset(Long offset) {
this.offset = offset;
}
public Long getTimestamp() {
return timestamp;
}
public void setTimestamp(Long timestamp) {
this.timestamp = timestamp;
}
@Override
public String toString() {
return "TopicOffsetDTO{" +
", partitionId=" + partitionId +
", offset=" + offset +
", timestamp=" + timestamp +
'}';
}
}

View File

@@ -0,0 +1,103 @@
package com.xiaojukeji.kafka.manager.common.entity.ao;
import java.util.Properties;
/**
* @author zengqiao
* @date 20/6/10
*/
public class RdTopicBasic {
private Long clusterId;
private String clusterName;
private String topicName;
private Long retentionTime;
private String appId;
private String appName;
private Properties properties;
private String description;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Long getRetentionTime() {
return retentionTime;
}
public void setRetentionTime(Long retentionTime) {
this.retentionTime = retentionTime;
}
public String getAppId() {
return appId;
}
public void setAppId(String appId) {
this.appId = appId;
}
public String getAppName() {
return appName;
}
public void setAppName(String appName) {
this.appName = appName;
}
public Properties getProperties() {
return properties;
}
public void setProperties(Properties properties) {
this.properties = properties;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Override
public String toString() {
return "RdTopicBasic{" +
"clusterId=" + clusterId +
", clusterName='" + clusterName + '\'' +
", topicName='" + topicName + '\'' +
", retentionTime=" + retentionTime +
", appId='" + appId + '\'' +
", appName='" + appName + '\'' +
", properties=" + properties +
", description='" + description + '\'' +
'}';
}
}

View File

@@ -0,0 +1,103 @@
package com.xiaojukeji.kafka.manager.common.entity.ao;
import java.util.List;
/**
* @author zengqiao
* @date 20/7/8
*/
public class TopicDiskLocation {
private Long clusterId;
private String topicName;
private Integer brokerId;
private String diskName;
private List<Integer> leaderPartitions;
private List<Integer> followerPartitions;
private Boolean isUnderReplicated;
private List<Integer> underReplicatedPartitions;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getDiskName() {
return diskName;
}
public void setDiskName(String diskName) {
this.diskName = diskName;
}
public List<Integer> getLeaderPartitions() {
return leaderPartitions;
}
public void setLeaderPartitions(List<Integer> leaderPartitions) {
this.leaderPartitions = leaderPartitions;
}
public List<Integer> getFollowerPartitions() {
return followerPartitions;
}
public void setFollowerPartitions(List<Integer> followerPartitions) {
this.followerPartitions = followerPartitions;
}
public Boolean getUnderReplicated() {
return isUnderReplicated;
}
public void setUnderReplicated(Boolean underReplicated) {
isUnderReplicated = underReplicated;
}
public List<Integer> getUnderReplicatedPartitions() {
return underReplicatedPartitions;
}
public void setUnderReplicatedPartitions(List<Integer> underReplicatedPartitions) {
this.underReplicatedPartitions = underReplicatedPartitions;
}
@Override
public String toString() {
return "TopicDiskLocation{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", brokerId=" + brokerId +
", diskName='" + diskName + '\'' +
", leaderPartitions=" + leaderPartitions +
", followerPartitions=" + followerPartitions +
", isUnderReplicated=" + isUnderReplicated +
", underReplicatedPartitions=" + underReplicatedPartitions +
'}';
}
}

View File

@@ -0,0 +1,71 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.account;
import com.xiaojukeji.kafka.manager.common.bizenum.AccountRoleEnum;
/**
* 用户信息
* @author zengqiao
* @date 20/6/10
*/
public class Account {
private String username;
private String chineseName;
private String department;
private AccountRoleEnum accountRoleEnum;
public Account(String username, String chineseName, String department, AccountRoleEnum accountRoleEnum) {
this.username = username;
this.chineseName = chineseName;
this.department = department;
this.accountRoleEnum = accountRoleEnum;
}
public Account() {
super();
}
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getChineseName() {
return chineseName;
}
public void setChineseName(String chineseName) {
this.chineseName = chineseName;
}
public String getDepartment() {
return department;
}
public void setDepartment(String department) {
this.department = department;
}
public AccountRoleEnum getAccountRoleEnum() {
return accountRoleEnum;
}
public void setAccountRoleEnum(AccountRoleEnum accountRoleEnum) {
this.accountRoleEnum = accountRoleEnum;
}
@Override
public String toString() {
return "Account{" +
"username='" + username + '\'' +
", chineseName='" + chineseName + '\'' +
", department='" + department + '\'' +
", accountRoleEnum=" + accountRoleEnum +
'}';
}
}

View File

@@ -0,0 +1,114 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.analysis;
import java.util.List;
/**
* @author zengqiao
* @date 19/12/29
*/
public class AnalysisBrokerDTO {
private Long clusterId;
private Integer brokerId;
private Long baseTime;
private Double bytesIn;
private Double bytesOut;
private Double messagesIn;
private Double totalFetchRequests;
private Double totalProduceRequests;
List<AnalysisTopicDTO> topicAnalysisVOList;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public Long getBaseTime() {
return baseTime;
}
public void setBaseTime(Long baseTime) {
this.baseTime = baseTime;
}
public Double getBytesIn() {
return bytesIn;
}
public void setBytesIn(Double bytesIn) {
this.bytesIn = bytesIn;
}
public Double getBytesOut() {
return bytesOut;
}
public void setBytesOut(Double bytesOut) {
this.bytesOut = bytesOut;
}
public Double getMessagesIn() {
return messagesIn;
}
public void setMessagesIn(Double messagesIn) {
this.messagesIn = messagesIn;
}
public Double getTotalFetchRequests() {
return totalFetchRequests;
}
public void setTotalFetchRequests(Double totalFetchRequests) {
this.totalFetchRequests = totalFetchRequests;
}
public Double getTotalProduceRequests() {
return totalProduceRequests;
}
public void setTotalProduceRequests(Double totalProduceRequests) {
this.totalProduceRequests = totalProduceRequests;
}
public List<AnalysisTopicDTO> getTopicAnalysisVOList() {
return topicAnalysisVOList;
}
public void setTopicAnalysisVOList(List<AnalysisTopicDTO> topicAnalysisVOList) {
this.topicAnalysisVOList = topicAnalysisVOList;
}
@Override
public String toString() {
return "AnalysisBrokerDTO{" +
"clusterId=" + clusterId +
", brokerId=" + brokerId +
", baseTime=" + baseTime +
", bytesIn=" + bytesIn +
", bytesOut=" + bytesOut +
", messagesIn=" + messagesIn +
", totalFetchRequests=" + totalFetchRequests +
", totalProduceRequests=" + totalProduceRequests +
", topicAnalysisVOList=" + topicAnalysisVOList +
'}';
}
}

View File

@@ -0,0 +1,134 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.analysis;
/**
* @author zengqiao
* @date 19/12/29
*/
public class AnalysisTopicDTO {
private String topicName;
private Double bytesIn;
private Double bytesInRate;
private Double bytesOut;
private Double bytesOutRate;
private Double messagesIn;
private Double messagesInRate;
private Double totalFetchRequests;
private Double totalFetchRequestsRate;
private Double totalProduceRequests;
private Double totalProduceRequestsRate;
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Double getBytesIn() {
return bytesIn;
}
public void setBytesIn(Double bytesIn) {
this.bytesIn = bytesIn;
}
public Double getBytesInRate() {
return bytesInRate;
}
public void setBytesInRate(Double bytesInRate) {
this.bytesInRate = bytesInRate;
}
public Double getBytesOut() {
return bytesOut;
}
public void setBytesOut(Double bytesOut) {
this.bytesOut = bytesOut;
}
public Double getBytesOutRate() {
return bytesOutRate;
}
public void setBytesOutRate(Double bytesOutRate) {
this.bytesOutRate = bytesOutRate;
}
public Double getMessagesIn() {
return messagesIn;
}
public void setMessagesIn(Double messagesIn) {
this.messagesIn = messagesIn;
}
public Double getMessagesInRate() {
return messagesInRate;
}
public void setMessagesInRate(Double messagesInRate) {
this.messagesInRate = messagesInRate;
}
public Double getTotalFetchRequests() {
return totalFetchRequests;
}
public void setTotalFetchRequests(Double totalFetchRequests) {
this.totalFetchRequests = totalFetchRequests;
}
public Double getTotalFetchRequestsRate() {
return totalFetchRequestsRate;
}
public void setTotalFetchRequestsRate(Double totalFetchRequestsRate) {
this.totalFetchRequestsRate = totalFetchRequestsRate;
}
public Double getTotalProduceRequests() {
return totalProduceRequests;
}
public void setTotalProduceRequests(Double totalProduceRequests) {
this.totalProduceRequests = totalProduceRequests;
}
public Double getTotalProduceRequestsRate() {
return totalProduceRequestsRate;
}
public void setTotalProduceRequestsRate(Double totalProduceRequestsRate) {
this.totalProduceRequestsRate = totalProduceRequestsRate;
}
@Override
public String toString() {
return "AnalysisTopicDTO{" +
"topicName='" + topicName + '\'' +
", bytesIn=" + bytesIn +
", bytesInRate=" + bytesInRate +
", bytesOut=" + bytesOut +
", bytesOutRate=" + bytesOutRate +
", messagesIn=" + messagesIn +
", messagesInRate=" + messagesInRate +
", totalFetchRequests=" + totalFetchRequests +
", totalFetchRequestsRate=" + totalFetchRequestsRate +
", totalProduceRequests=" + totalProduceRequests +
", totalProduceRequestsRate=" + totalProduceRequestsRate +
'}';
}
}

View File

@@ -0,0 +1,50 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.api;
import java.util.concurrent.atomic.AtomicInteger;
/**
* @author zengqiao
* @date 20/7/27
*/
public class ApiCount {
private int apiLevel;
private Integer maxNum;
private AtomicInteger currentNum;
public ApiCount(int apiLevel, Integer maxNum, AtomicInteger currentNum) {
this.apiLevel = apiLevel;
this.maxNum = maxNum;
this.currentNum = currentNum;
}
public int getApiLevel() {
return apiLevel;
}
public Integer getMaxNum() {
return maxNum;
}
public AtomicInteger getCurrentNum() {
return currentNum;
}
public Boolean incAndCheckIsOverFlow() {
return maxNum < currentNum.incrementAndGet();
}
public int decPresentNum() {
return currentNum.decrementAndGet();
}
@Override
public String toString() {
return "ApiCount{" +
"apiLevel=" + apiLevel +
", maxNum=" + maxNum +
", currentNum=" + currentNum +
'}';
}
}

View File

@@ -0,0 +1,37 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.cluster;
import java.util.List;
/**
* @author zengqiao
* @date 20/7/14
*/
public class ClusterBrokerStatus {
private List<Integer> brokerReplicaStatusList;
private List<Integer> brokerBytesInStatusList;
public List<Integer> getBrokerReplicaStatusList() {
return brokerReplicaStatusList;
}
public void setBrokerReplicaStatusList(List<Integer> brokerReplicaStatusList) {
this.brokerReplicaStatusList = brokerReplicaStatusList;
}
public List<Integer> getBrokerBytesInStatusList() {
return brokerBytesInStatusList;
}
public void setBrokerBytesInStatusList(List<Integer> brokerBytesInStatusList) {
this.brokerBytesInStatusList = brokerBytesInStatusList;
}
@Override
public String toString() {
return "ClusterBrokerStatus{" +
"brokerReplicaStatusList=" + brokerReplicaStatusList +
", brokerBytesInStatusList=" + brokerBytesInStatusList +
'}';
}
}

View File

@@ -0,0 +1,123 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.cluster;
/**
* @author zengqiao
* @date 20/4/1
*/
public class LogicalCluster {
private Long logicalClusterId;
private String logicalClusterName;
private Integer mode;
private Integer topicNum;
private String clusterVersion;
private Long physicalClusterId;
private String bootstrapServers;
private String description;
private Long gmtCreate;
private Long gmtModify;
public Long getLogicalClusterId() {
return logicalClusterId;
}
public void setLogicalClusterId(Long logicalClusterId) {
this.logicalClusterId = logicalClusterId;
}
public String getLogicalClusterName() {
return logicalClusterName;
}
public void setLogicalClusterName(String logicalClusterName) {
this.logicalClusterName = logicalClusterName;
}
public Integer getMode() {
return mode;
}
public void setMode(Integer mode) {
this.mode = mode;
}
public Integer getTopicNum() {
return topicNum;
}
public void setTopicNum(Integer topicNum) {
this.topicNum = topicNum;
}
public String getClusterVersion() {
return clusterVersion;
}
public void setClusterVersion(String clusterVersion) {
this.clusterVersion = clusterVersion;
}
public Long getPhysicalClusterId() {
return physicalClusterId;
}
public void setPhysicalClusterId(Long physicalClusterId) {
this.physicalClusterId = physicalClusterId;
}
public String getBootstrapServers() {
return bootstrapServers;
}
public void setBootstrapServers(String bootstrapServers) {
this.bootstrapServers = bootstrapServers;
}
public Long getGmtCreate() {
return gmtCreate;
}
public void setGmtCreate(Long gmtCreate) {
this.gmtCreate = gmtCreate;
}
public Long getGmtModify() {
return gmtModify;
}
public void setGmtModify(Long gmtModify) {
this.gmtModify = gmtModify;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Override
public String toString() {
return "LogicalCluster{" +
"logicalClusterId=" + logicalClusterId +
", logicalClusterName='" + logicalClusterName + '\'' +
", mode=" + mode +
", topicNum=" + topicNum +
", clusterVersion='" + clusterVersion + '\'' +
", physicalClusterId=" + physicalClusterId +
", bootstrapServers='" + bootstrapServers + '\'' +
", description='" + description + '\'' +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -0,0 +1,80 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.cluster;
/**
* @author zengqiao
* @date 20/6/29
*/
public class LogicalClusterMetrics {
private Double totalProduceRequestsPerSec = 0.0;
private Double bytesInPerSec = 0.0;
private Double bytesOutPerSec = 0.0;
private Double bytesRejectedPerSec = 0.0;
private Double messagesInPerSec = 0.0;
private Long gmtCreate;
public Double getBytesInPerSec() {
return bytesInPerSec;
}
public void setBytesInPerSec(Double bytesInPerSec) {
this.bytesInPerSec = bytesInPerSec;
}
public Double getBytesOutPerSec() {
return bytesOutPerSec;
}
public void setBytesOutPerSec(Double bytesOutPerSec) {
this.bytesOutPerSec = bytesOutPerSec;
}
public Double getBytesRejectedPerSec() {
return bytesRejectedPerSec;
}
public void setBytesRejectedPerSec(Double bytesRejectedPerSec) {
this.bytesRejectedPerSec = bytesRejectedPerSec;
}
public Double getMessagesInPerSec() {
return messagesInPerSec;
}
public void setMessagesInPerSec(Double messagesInPerSec) {
this.messagesInPerSec = messagesInPerSec;
}
public Long getGmtCreate() {
return gmtCreate;
}
public void setGmtCreate(Long gmtCreate) {
this.gmtCreate = gmtCreate;
}
public Double getTotalProduceRequestsPerSec() {
return totalProduceRequestsPerSec;
}
public void setTotalProduceRequestsPerSec(Double totalProduceRequestsPerSec) {
this.totalProduceRequestsPerSec = totalProduceRequestsPerSec;
}
@Override
public String toString() {
return "LogicalClusterMetrics{" +
"totalProduceRequestsPerSec=" + totalProduceRequestsPerSec +
", bytesInPerSec=" + bytesInPerSec +
", bytesOutPerSec=" + bytesOutPerSec +
", bytesRejectedPerSec=" + bytesRejectedPerSec +
", messagesInPerSec=" + messagesInPerSec +
", gmtCreate=" + gmtCreate +
'}';
}
}

View File

@@ -0,0 +1,26 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.config;
import java.util.List;
/**
* @author zengqiao
* @date 20/7/24
*/
public class CreateTopicConfig {
private List<CreateTopicElemConfig> configList;
public List<CreateTopicElemConfig> getConfigList() {
return configList;
}
public void setConfigList(List<CreateTopicElemConfig> configList) {
this.configList = configList;
}
@Override
public String toString() {
return "CreateTopicConfig{" +
"configList=" + configList +
'}';
}
}

View File

@@ -0,0 +1,92 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.config;
import java.util.List;
/**
* @author zengqiao
* @date 20/7/24
*/
public class CreateTopicElemConfig {
private Long clusterId;
private List<Integer> brokerIdList;
private List<Long> regionIdList;
private Integer partitionNum;
private Integer replicaNum;
private Integer retentionTimeUnitHour;
private Long autoExecMaxPeakBytesInUnitB;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public List<Integer> getBrokerIdList() {
return brokerIdList;
}
public void setBrokerIdList(List<Integer> brokerIdList) {
this.brokerIdList = brokerIdList;
}
public List<Long> getRegionIdList() {
return regionIdList;
}
public void setRegionIdList(List<Long> regionIdList) {
this.regionIdList = regionIdList;
}
public Integer getReplicaNum() {
return replicaNum;
}
public void setReplicaNum(Integer replicaNum) {
this.replicaNum = replicaNum;
}
public Integer getRetentionTimeUnitHour() {
return retentionTimeUnitHour;
}
public void setRetentionTimeUnitHour(Integer retentionTimeUnitHour) {
this.retentionTimeUnitHour = retentionTimeUnitHour;
}
public Long getAutoExecMaxPeakBytesInUnitB() {
return autoExecMaxPeakBytesInUnitB;
}
public void setAutoExecMaxPeakBytesInUnitB(Long autoExecMaxPeakBytesInUnitB) {
this.autoExecMaxPeakBytesInUnitB = autoExecMaxPeakBytesInUnitB;
}
public Integer getPartitionNum() {
return partitionNum;
}
public void setPartitionNum(Integer partitionNum) {
this.partitionNum = partitionNum;
}
@Override
public String toString() {
return "CreateTopicElemConfig{" +
"clusterId=" + clusterId +
", brokerIdList=" + brokerIdList +
", regionIdList=" + regionIdList +
", partitionNum=" + partitionNum +
", replicaNum=" + replicaNum +
", retentionTimeUnitHour=" + retentionTimeUnitHour +
", autoExecMaxPeakBytesInUnitB=" + autoExecMaxPeakBytesInUnitB +
'}';
}
}

View File

@@ -0,0 +1,25 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.config;
/**
* 峰值均值流入流量配置
* @author zengqiao
* @date 20/6/9
*/
public class MaxAvgBytesInConfig {
private Integer duration;
public Integer getDuration() {
return duration;
}
public void setDuration(Integer duration) {
this.duration = duration;
}
@Override
public String toString() {
return "MaxAvgBytesInConfig{" +
"duration=" + duration +
'}';
}
}

View File

@@ -0,0 +1,57 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.config;
/**
* @author zengqiao
* @date 20/9/7
*/
public class SinkTopicRequestTimeMetricsConfig {
private Long clusterId;
private String topicName;
private Long startId;
private Long step;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Long getStartId() {
return startId;
}
public void setStartId(Long startId) {
this.startId = startId;
}
public Long getStep() {
return step;
}
public void setStep(Long step) {
this.step = step;
}
@Override
public String toString() {
return "SinkTopicRequestTimeMetricsConfig{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", startId=" + startId +
", step=" + step +
'}';
}
}

View File

@@ -0,0 +1,57 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.config;
/**
* @author zengqiao
* @date 20/8/23
*/
public class TopicAnomalyFlowConfig {
private Long minTopicBytesInUnitB;
private Double bytesInIncUnitB;
private Long minTopicProduceQps;
private Double produceQpsInc;
public Long getMinTopicBytesInUnitB() {
return minTopicBytesInUnitB;
}
public void setMinTopicBytesInUnitB(Long minTopicBytesInUnitB) {
this.minTopicBytesInUnitB = minTopicBytesInUnitB;
}
public Double getBytesInIncUnitB() {
return bytesInIncUnitB;
}
public void setBytesInIncUnitB(Double bytesInIncUnitB) {
this.bytesInIncUnitB = bytesInIncUnitB;
}
public Long getMinTopicProduceQps() {
return minTopicProduceQps;
}
public void setMinTopicProduceQps(Long minTopicProduceQps) {
this.minTopicProduceQps = minTopicProduceQps;
}
public Double getProduceQpsInc() {
return produceQpsInc;
}
public void setProduceQpsInc(Double produceQpsInc) {
this.produceQpsInc = produceQpsInc;
}
@Override
public String toString() {
return "TopicAnomalyFlowConfig{" +
"minTopicBytesInUnitB=" + minTopicBytesInUnitB +
", bytesInIncUnitB=" + bytesInIncUnitB +
", minTopicProduceQps=" + minTopicProduceQps +
", produceQpsInc=" + produceQpsInc +
'}';
}
}

View File

@@ -0,0 +1,44 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.config;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
/**
* @author zengqiao
* @date 20/8/31
*/
public class TopicNameConfig {
private Long clusterId;
private String topicName;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
@Override
public String toString() {
return "TopicNameConfig{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
'}';
}
public boolean legal() {
if (ValidateUtils.isNull(clusterId) || ValidateUtils.isBlank(topicName)) {
return false;
}
return true;
}
}

View File

@@ -0,0 +1,58 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.config.expert;
import java.util.ArrayList;
import java.util.List;
/**
* @author zengqiao
* @date 20/8/23
*/
public class RegionTopicHotConfig {
private Long minTopicBytesInUnitB;
private Integer maxDisPartitionNum;
private List<Long> ignoreClusterIdList;
public Long getMinTopicBytesInUnitB() {
if (minTopicBytesInUnitB == null) {
return 3 * 1024 * 1024L;
}
return minTopicBytesInUnitB;
}
public void setMinTopicBytesInUnitB(Long minTopicBytesInUnitB) {
this.minTopicBytesInUnitB = minTopicBytesInUnitB;
}
public Integer getMaxDisPartitionNum() {
if (maxDisPartitionNum == null) {
return 3;
}
return maxDisPartitionNum;
}
public void setMaxDisPartitionNum(Integer maxDisPartitionNum) {
this.maxDisPartitionNum = maxDisPartitionNum;
}
public List<Long> getIgnoreClusterIdList() {
if (ignoreClusterIdList == null) {
return new ArrayList<>();
}
return ignoreClusterIdList;
}
public void setIgnoreClusterIdList(List<Long> ignoreClusterIdList) {
this.ignoreClusterIdList = ignoreClusterIdList;
}
@Override
public String toString() {
return "RegionTopicHotConfig{" +
"minTopicBytesInUnitB=" + minTopicBytesInUnitB +
", maxDisPartitionNum=" + maxDisPartitionNum +
", ignoreClusterIdList=" + ignoreClusterIdList +
'}';
}
}

View File

@@ -0,0 +1,38 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.config.expert;
import java.util.ArrayList;
import java.util.List;
/**
* @author zengqiao
* @date 20/9/17
*/
public class TopicExpiredConfig {
private Integer minExpiredDay = 30;
private List<Long> ignoreClusterIdList = new ArrayList<>();
public Integer getMinExpiredDay() {
return minExpiredDay;
}
public void setMinExpiredDay(Integer minExpiredDay) {
this.minExpiredDay = minExpiredDay;
}
public List<Long> getIgnoreClusterIdList() {
return ignoreClusterIdList;
}
public void setIgnoreClusterIdList(List<Long> ignoreClusterIdList) {
this.ignoreClusterIdList = ignoreClusterIdList;
}
@Override
public String toString() {
return "TopicExpiredConfig{" +
"minExpiredDay=" + minExpiredDay +
", ignoreClusterIdList=" + ignoreClusterIdList +
'}';
}
}

View File

@@ -0,0 +1,50 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.config.expert;
import java.util.ArrayList;
import java.util.List;
/**
* 专家服务-Topic分区不足配置
* @author zengqiao
* @date 20/8/23
*/
public class TopicInsufficientPartitionConfig {
private Long maxBytesInPerPartitionUnitB = 3 * 1024 * 1024L;
private Long minTopicBytesInUnitB = 3 * 1024 * 1024L;
private List<Long> ignoreClusterIdList = new ArrayList<>();
public Long getMaxBytesInPerPartitionUnitB() {
return maxBytesInPerPartitionUnitB;
}
public void setMaxBytesInPerPartitionUnitB(Long maxBytesInPerPartitionUnitB) {
this.maxBytesInPerPartitionUnitB = maxBytesInPerPartitionUnitB;
}
public Long getMinTopicBytesInUnitB() {
return minTopicBytesInUnitB;
}
public void setMinTopicBytesInUnitB(Long minTopicBytesInUnitB) {
this.minTopicBytesInUnitB = minTopicBytesInUnitB;
}
public List<Long> getIgnoreClusterIdList() {
return ignoreClusterIdList;
}
public void setIgnoreClusterIdList(List<Long> ignoreClusterIdList) {
this.ignoreClusterIdList = ignoreClusterIdList;
}
@Override
public String toString() {
return "TopicInsufficientPartitionConfig{" +
"maxBytesInPerPartitionUnitB=" + maxBytesInPerPartitionUnitB +
", minTopicBytesInUnitB=" + minTopicBytesInUnitB +
", ignoreClusterIdList=" + ignoreClusterIdList +
'}';
}
}

View File

@@ -0,0 +1,57 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.consumer;
/**
* @author zengqiao
* @date 20/1/9
*/
public class ConsumeDetailDTO {
private Integer partitionId;
private Long offset;
private Long consumeOffset;
private String consumerId;
public Integer getPartitionId() {
return partitionId;
}
public void setPartitionId(Integer partitionId) {
this.partitionId = partitionId;
}
public Long getOffset() {
return offset;
}
public void setOffset(Long offset) {
this.offset = offset;
}
public Long getConsumeOffset() {
return consumeOffset;
}
public void setConsumeOffset(Long consumeOffset) {
this.consumeOffset = consumeOffset;
}
public String getConsumerId() {
return consumerId;
}
public void setConsumerId(String consumerId) {
this.consumerId = consumerId;
}
@Override
public String toString() {
return "ConsumeDetailDTO{" +
"partitionId=" + partitionId +
", offset=" + offset +
", consumeOffset=" + consumeOffset +
", consumerId='" + consumerId + '\'' +
'}';
}
}

View File

@@ -0,0 +1,92 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.consumer;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetLocationEnum;
import java.util.List;
import java.util.Objects;
/**
* 消费组信息
* @author zengqiao
* @date 19/4/18
*/
public class ConsumerGroupDTO {
private Long clusterId;
private String consumerGroup;
private List<String> appIdList;
private OffsetLocationEnum offsetStoreLocation;
public ConsumerGroupDTO(Long clusterId,
String consumerGroup,
List<String> appIdList,
OffsetLocationEnum offsetStoreLocation) {
this.clusterId = clusterId;
this.consumerGroup = consumerGroup;
this.appIdList = appIdList;
this.offsetStoreLocation = offsetStoreLocation;
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
public List<String> getAppIdList() {
return appIdList;
}
public void setAppIdList(List<String> appIdList) {
this.appIdList = appIdList;
}
public OffsetLocationEnum getOffsetStoreLocation() {
return offsetStoreLocation;
}
public void setOffsetStoreLocation(OffsetLocationEnum offsetStoreLocation) {
this.offsetStoreLocation = offsetStoreLocation;
}
@Override
public String toString() {
return "ConsumerGroupDTO{" +
"clusterId=" + clusterId +
", consumerGroup='" + consumerGroup + '\'' +
", appIdList=" + appIdList +
", offsetStoreLocation=" + offsetStoreLocation +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ConsumerGroupDTO that = (ConsumerGroupDTO) o;
return clusterId.equals(that.clusterId)
&& consumerGroup.equals(that.consumerGroup)
&& offsetStoreLocation == that.offsetStoreLocation;
}
@Override
public int hashCode() {
return Objects.hash(clusterId, consumerGroup, offsetStoreLocation);
}
}

View File

@@ -0,0 +1,90 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.expert;
/**
* @author zengqiao
* @date 20/3/30
*/
public class TopicAnomalyFlow {
private Long clusterId;
private String clusterName;
private String topicName;
private Double bytesIn;
private Double bytesInIncr;
private Double iops;
private Double iopsIncr;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Double getBytesIn() {
return bytesIn;
}
public void setBytesIn(Double bytesIn) {
this.bytesIn = bytesIn;
}
public Double getBytesInIncr() {
return bytesInIncr;
}
public void setBytesInIncr(Double bytesInIncr) {
this.bytesInIncr = bytesInIncr;
}
public Double getIops() {
return iops;
}
public void setIops(Double iops) {
this.iops = iops;
}
public Double getIopsIncr() {
return iopsIncr;
}
public void setIopsIncr(Double iopsIncr) {
this.iopsIncr = iopsIncr;
}
@Override
public String toString() {
return "AnomalyFlowTopicDTO{" +
"clusterId=" + clusterId +
", clusterName='" + clusterName + '\'' +
", topicName='" + topicName + '\'' +
", bytesIn=" + bytesIn +
", bytesInIncr=" + bytesInIncr +
", iops=" + iops +
", iopsIncr=" + iopsIncr +
'}';
}
}

View File

@@ -0,0 +1,111 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.expert;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import java.util.List;
/**
* @author zengqiao
* @date 20/3/30
*/
public class TopicInsufficientPartition {
private ClusterDO clusterDO;
private String topicName;
private Integer presentPartitionNum;
private Integer suggestedPartitionNum;
private List<Double> maxAvgBytesInList;
private Double bytesInPerPartition;
private List<Integer> brokerIdList;
public TopicInsufficientPartition(
ClusterDO clusterDO,
String topicName,
Integer presentPartitionNum,
Integer suggestedPartitionNum,
List<Double> maxAvgBytesInList,
Double bytesInPerPartition,
List<Integer> brokerIdList) {
this.clusterDO = clusterDO;
this.topicName = topicName;
this.presentPartitionNum = presentPartitionNum;
this.suggestedPartitionNum = suggestedPartitionNum;
this.maxAvgBytesInList = maxAvgBytesInList;
this.bytesInPerPartition = bytesInPerPartition;
this.brokerIdList = brokerIdList;
}
public ClusterDO getClusterDO() {
return clusterDO;
}
public void setClusterDO(ClusterDO clusterDO) {
this.clusterDO = clusterDO;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Integer getPresentPartitionNum() {
return presentPartitionNum;
}
public void setPresentPartitionNum(Integer presentPartitionNum) {
this.presentPartitionNum = presentPartitionNum;
}
public Integer getSuggestedPartitionNum() {
return suggestedPartitionNum;
}
public void setSuggestedPartitionNum(Integer suggestedPartitionNum) {
this.suggestedPartitionNum = suggestedPartitionNum;
}
public List<Double> getMaxAvgBytesInList() {
return maxAvgBytesInList;
}
public void setMaxAvgBytesInList(List<Double> maxAvgBytesInList) {
this.maxAvgBytesInList = maxAvgBytesInList;
}
public Double getBytesInPerPartition() {
return bytesInPerPartition;
}
public void setBytesInPerPartition(Double bytesInPerPartition) {
this.bytesInPerPartition = bytesInPerPartition;
}
public List<Integer> getBrokerIdList() {
return brokerIdList;
}
public void setBrokerIdList(List<Integer> brokerIdList) {
this.brokerIdList = brokerIdList;
}
@Override
public String toString() {
return "TopicInsufficientPartition{" +
"clusterDO=" + clusterDO +
", topicName='" + topicName + '\'' +
", presentPartitionNum=" + presentPartitionNum +
", suggestedPartitionNum=" + suggestedPartitionNum +
", maxAvgBytesInList=" + maxAvgBytesInList +
", bytesInPerPartition=" + bytesInPerPartition +
", brokerIdList=" + brokerIdList +
'}';
}
}

View File

@@ -0,0 +1,70 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.expert;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import java.util.Map;
/**
* Region内热点Topic
* @author zengqiao
* @date 20/3/27
*/
public class TopicRegionHot {
private ClusterDO clusterDO;
private String topicName;
private Long retentionTime;
private Map<Integer, Integer> brokerIdPartitionNumMap;
public TopicRegionHot(ClusterDO clusterDO, String topicName, Long retentionTime, Map<Integer, Integer>
brokerIdPartitionNumMap) {
this.clusterDO = clusterDO;
this.topicName = topicName;
this.retentionTime = retentionTime;
this.brokerIdPartitionNumMap = brokerIdPartitionNumMap;
}
public ClusterDO getClusterDO() {
return clusterDO;
}
public void setClusterDO(ClusterDO clusterDO) {
this.clusterDO = clusterDO;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Long getRetentionTime() {
return retentionTime;
}
public void setRetentionTime(Long retentionTime) {
this.retentionTime = retentionTime;
}
public Map<Integer, Integer> getBrokerIdPartitionNumMap() {
return brokerIdPartitionNumMap;
}
public void setBrokerIdPartitionNumMap(Map<Integer, Integer> brokerIdPartitionNumMap) {
this.brokerIdPartitionNumMap = brokerIdPartitionNumMap;
}
@Override
public String toString() {
return "ExpertRegionTopicHot{" +
"clusterDO=" + clusterDO +
", topicName='" + topicName + '\'' +
", retentionTime=" + retentionTime +
", brokerIdPartitionNumMap=" + brokerIdPartitionNumMap +
'}';
}
}

View File

@@ -0,0 +1,30 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.gateway;
/**
* @author zengqiao
* @date 20/7/29
*/
public class AppRateConfig extends BaseGatewayConfig {
private Long appRateLimit;
public AppRateConfig(Long version, Long appRateLimit) {
this.version = version;
this.appRateLimit = appRateLimit;
}
public Long getAppRateLimit() {
return appRateLimit;
}
public void setAppRateLimit(Long appRateLimit) {
this.appRateLimit = appRateLimit;
}
@Override
public String toString() {
return "AppRateConfig{" +
"appRateLimit=" + appRateLimit +
", version=" + version +
'}';
}
}

View File

@@ -0,0 +1,24 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.gateway;
/**
* @author zengqiao
* @date 20/7/29
*/
public class BaseGatewayConfig {
protected Long version;
public Long getVersion() {
return version;
}
public void setVersion(Long version) {
this.version = version;
}
@Override
public String toString() {
return "GatewayConfig{" +
"version=" + version +
'}';
}
}

View File

@@ -0,0 +1,30 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.gateway;
/**
* @author zengqiao
* @date 20/7/29
*/
public class IpRateConfig extends BaseGatewayConfig {
private Long ipRateLimit;
public IpRateConfig(Long version, Long ipRateLimit) {
this.version = version;
this.ipRateLimit = ipRateLimit;
}
public Long getIpRateLimit() {
return ipRateLimit;
}
public void setIpRateLimit(Long ipRateLimit) {
this.ipRateLimit = ipRateLimit;
}
@Override
public String toString() {
return "IpRateConfig{" +
"ipRateLimit=" + ipRateLimit +
", version=" + version +
'}';
}
}

View File

@@ -0,0 +1,33 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.gateway;
import java.util.List;
import java.util.Map;
/**
* @author zengqiao
* @date 20/7/29
*/
public class KafkaBootstrapServerConfig extends BaseGatewayConfig {
private Map<Long, List<String>> clusterIdBootstrapServersMap;
public KafkaBootstrapServerConfig(Long version, Map<Long, List<String>> clusterIdBootstrapServersMap) {
this.version = version;
this.clusterIdBootstrapServersMap = clusterIdBootstrapServersMap;
}
public Map<Long, List<String>> getClusterIdBootstrapServersMap() {
return clusterIdBootstrapServersMap;
}
public void setClusterIdBootstrapServersMap(Map<Long, List<String>> clusterIdBootstrapServersMap) {
this.clusterIdBootstrapServersMap = clusterIdBootstrapServersMap;
}
@Override
public String toString() {
return "KafkaBootstrapServerConfig{" +
"clusterIdBootstrapServersMap=" + clusterIdBootstrapServersMap +
", version=" + version +
'}';
}
}

View File

@@ -0,0 +1,30 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.gateway;
/**
* @author zengqiao
* @date 20/7/29
*/
public class RequestQueueConfig extends BaseGatewayConfig {
private Long maxRequestQueueSize;
public RequestQueueConfig(Long version, Long maxRequestQueueSize) {
this.version = version;
this.maxRequestQueueSize = maxRequestQueueSize;
}
public Long getMaxRequestQueueSize() {
return maxRequestQueueSize;
}
public void setMaxRequestQueueSize(Long maxRequestQueueSize) {
this.maxRequestQueueSize = maxRequestQueueSize;
}
@Override
public String toString() {
return "RequestQueueConfig{" +
"maxRequestQueueSize=" + maxRequestQueueSize +
", version=" + version +
'}';
}
}

View File

@@ -0,0 +1,32 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.gateway;
import java.util.Map;
/**
* @author zengqiao
* @date 20/7/29
*/
public class SpRateConfig extends BaseGatewayConfig {
private Map<String, Long> spRateMap;
public SpRateConfig(Long version, Map<String, Long> spRateMap) {
this.version = version;
this.spRateMap = spRateMap;
}
public Map<String, Long> getSpRateMap() {
return spRateMap;
}
public void setSpRateMap(Map<String, Long> spRateMap) {
this.spRateMap = spRateMap;
}
@Override
public String toString() {
return "SpRateConfig{" +
"spRateMap=" + spRateMap +
", version=" + version +
'}';
}
}

View File

@@ -0,0 +1,68 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.gateway;
/**
* @author zhongyuankai
* @date 2020/4/27
*/
public class TopicQuota {
private String appId;
private Long clusterId;
private String topicName;
private Long produceQuota;
private Long consumeQuota;
public String getAppId() {
return appId;
}
public void setAppId(String appId) {
this.appId = appId;
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Long getProduceQuota() {
return produceQuota;
}
public void setProduceQuota(Long produceQuota) {
this.produceQuota = produceQuota;
}
public Long getConsumeQuota() {
return consumeQuota;
}
public void setConsumeQuota(Long consumeQuota) {
this.consumeQuota = consumeQuota;
}
@Override
public String toString() {
return "TopicQuota{" +
"appId='" + appId + '\'' +
", clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", produceQuota=" + produceQuota +
", consumeQuota=" + consumeQuota +
'}';
}
}

View File

@@ -0,0 +1,130 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.reassign;
import com.xiaojukeji.kafka.manager.common.bizenum.TaskStatusReassignEnum;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.ReassignmentElemData;
import kafka.common.TopicAndPartition;
import java.util.List;
import java.util.Map;
/**
* @author zengqiao
* @date 20/5/14
*/
public class ReassignStatus {
private Long subTaskId;
private Long clusterId;
private String clusterName;
private String topicName;
private Integer status;
private Long realThrottle;
private Long maxThrottle;
private Long minThrottle;
private List<ReassignmentElemData> reassignList;
private Map<TopicAndPartition, TaskStatusReassignEnum> reassignStatusMap;
public Long getSubTaskId() {
return subTaskId;
}
public void setSubTaskId(Long subTaskId) {
this.subTaskId = subTaskId;
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
public Long getRealThrottle() {
return realThrottle;
}
public void setRealThrottle(Long realThrottle) {
this.realThrottle = realThrottle;
}
public Long getMaxThrottle() {
return maxThrottle;
}
public void setMaxThrottle(Long maxThrottle) {
this.maxThrottle = maxThrottle;
}
public Long getMinThrottle() {
return minThrottle;
}
public void setMinThrottle(Long minThrottle) {
this.minThrottle = minThrottle;
}
public List<ReassignmentElemData> getReassignList() {
return reassignList;
}
public void setReassignList(List<ReassignmentElemData> reassignList) {
this.reassignList = reassignList;
}
public Map<TopicAndPartition, TaskStatusReassignEnum> getReassignStatusMap() {
return reassignStatusMap;
}
public void setReassignStatusMap(Map<TopicAndPartition, TaskStatusReassignEnum> reassignStatusMap) {
this.reassignStatusMap = reassignStatusMap;
}
@Override
public String toString() {
return "ReassignStatus{" +
"subTaskId=" + subTaskId +
", clusterId=" + clusterId +
", clusterName='" + clusterName + '\'' +
", topicName='" + topicName + '\'' +
", status=" + status +
", realThrottle=" + realThrottle +
", maxThrottle=" + maxThrottle +
", minThrottle=" + minThrottle +
", reassignList=" + reassignList +
", reassignStatusMap=" + reassignStatusMap +
'}';
}
}

View File

@@ -0,0 +1,92 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.remote;
import java.util.List;
/**
* @author zengqiao
* @date 20/8/25
*/
public class KafkaConsumerMetrics {
private Long clusterId;
private String topicName;
private String consumerGroup;
private String location;
private Integer partitionNum;
private List<KafkaConsumerMetricsElem> consumeDetailList;
private Long createTime;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
public String getLocation() {
return location;
}
public void setLocation(String location) {
this.location = location;
}
public Integer getPartitionNum() {
return partitionNum;
}
public void setPartitionNum(Integer partitionNum) {
this.partitionNum = partitionNum;
}
public List<KafkaConsumerMetricsElem> getConsumeDetailList() {
return consumeDetailList;
}
public void setConsumeDetailList(List<KafkaConsumerMetricsElem> consumeDetailList) {
this.consumeDetailList = consumeDetailList;
}
public Long getCreateTime() {
return createTime;
}
public void setCreateTime(Long createTime) {
this.createTime = createTime;
}
@Override
public String toString() {
return "KafkaConsumerMetrics{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", consumerGroup='" + consumerGroup + '\'' +
", location='" + location + '\'' +
", partitionNum=" + partitionNum +
", consumeDetailList=" + consumeDetailList +
", createTime=" + createTime +
'}';
}
}

View File

@@ -0,0 +1,46 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.remote;
/**
* @author zengqiao
* @date 20/8/31
*/
public class KafkaConsumerMetricsElem {
private Integer partitionId;
private Long partitionOffset;
private Long consumeOffset;
public Integer getPartitionId() {
return partitionId;
}
public void setPartitionId(Integer partitionId) {
this.partitionId = partitionId;
}
public Long getPartitionOffset() {
return partitionOffset;
}
public void setPartitionOffset(Long partitionOffset) {
this.partitionOffset = partitionOffset;
}
public Long getConsumeOffset() {
return consumeOffset;
}
public void setConsumeOffset(Long consumeOffset) {
this.consumeOffset = consumeOffset;
}
@Override
public String toString() {
return "KafkaConsumerMetricsElem{" +
"partitionId=" + partitionId +
", partitionOffset=" + partitionOffset +
", consumeOffset=" + consumeOffset +
'}';
}
}

View File

@@ -0,0 +1,79 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.remote;
/**
* @author zengqiao
* @date 20/8/31
*/
public class KafkaTopicMetrics {
private Long clusterId;
private String topic;
private Integer partitionNum;
private Double messagesInPerSec;
private Double bytesInPerSec;
private Long timestamp;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopic() {
return topic;
}
public void setTopic(String topic) {
this.topic = topic;
}
public Integer getPartitionNum() {
return partitionNum;
}
public void setPartitionNum(Integer partitionNum) {
this.partitionNum = partitionNum;
}
public Double getMessagesInPerSec() {
return messagesInPerSec;
}
public void setMessagesInPerSec(Double messagesInPerSec) {
this.messagesInPerSec = messagesInPerSec;
}
public Double getBytesInPerSec() {
return bytesInPerSec;
}
public void setBytesInPerSec(Double bytesInPerSec) {
this.bytesInPerSec = bytesInPerSec;
}
public Long getTimestamp() {
return timestamp;
}
public void setTimestamp(Long timestamp) {
this.timestamp = timestamp;
}
@Override
public String toString() {
return "KafkaTopicMetrics{" +
"clusterId=" + clusterId +
", topic='" + topic + '\'' +
", partitionNum=" + partitionNum +
", messagesInPerSec=" + messagesInPerSec +
", bytesInPerSec=" + bytesInPerSec +
", timestamp=" + timestamp +
'}';
}
}

View File

@@ -0,0 +1,123 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.topic;
/**
* @author zengqiao
* @date 20/5/12
*/
public class MineTopicSummary {
private Long logicalClusterId;
private String logicalClusterName;
private Long physicalClusterId;
private String topicName;
private Object bytesIn;
private Object bytesOut;
private String appId;
private String appName;
private String appPrincipals;
private Integer access;
public Long getLogicalClusterId() {
return logicalClusterId;
}
public void setLogicalClusterId(Long logicalClusterId) {
this.logicalClusterId = logicalClusterId;
}
public String getLogicalClusterName() {
return logicalClusterName;
}
public void setLogicalClusterName(String logicalClusterName) {
this.logicalClusterName = logicalClusterName;
}
public Long getPhysicalClusterId() {
return physicalClusterId;
}
public void setPhysicalClusterId(Long physicalClusterId) {
this.physicalClusterId = physicalClusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Object getBytesIn() {
return bytesIn;
}
public void setBytesIn(Object bytesIn) {
this.bytesIn = bytesIn;
}
public Object getBytesOut() {
return bytesOut;
}
public void setBytesOut(Object bytesOut) {
this.bytesOut = bytesOut;
}
public String getAppId() {
return appId;
}
public void setAppId(String appId) {
this.appId = appId;
}
public String getAppName() {
return appName;
}
public void setAppName(String appName) {
this.appName = appName;
}
public String getAppPrincipals() {
return appPrincipals;
}
public void setAppPrincipals(String appPrincipals) {
this.appPrincipals = appPrincipals;
}
public Integer getAccess() {
return access;
}
public void setAccess(Integer access) {
this.access = access;
}
@Override
public String toString() {
return "MineTopicSummary{" +
"logicalClusterId=" + logicalClusterId +
", logicalClusterName='" + logicalClusterName + '\'' +
", physicalClusterId=" + physicalClusterId +
", topicName='" + topicName + '\'' +
", bytesIn=" + bytesIn +
", bytesOut=" + bytesOut +
", appId='" + appId + '\'' +
", appName='" + appName + '\'' +
", appPrincipals='" + appPrincipals + '\'' +
", access=" + access +
'}';
}
}

View File

@@ -0,0 +1,123 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.topic;
/**
* @author zhongyuankai
* @date 2020/6/8
*/
public class TopicAppData {
private Long clusterId;
private String topicName;
private String appId;
private String appName;
private String appPrincipals;
private Long produceQuota;
private Long consumerQuota;
private Boolean produceThrottled;
private Boolean fetchThrottled;
private Integer access;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public String getAppId() {
return appId;
}
public void setAppId(String appId) {
this.appId = appId;
}
public String getAppName() {
return appName;
}
public void setAppName(String appName) {
this.appName = appName;
}
public String getAppPrincipals() {
return appPrincipals;
}
public void setAppPrincipals(String appPrincipals) {
this.appPrincipals = appPrincipals;
}
public Long getProduceQuota() {
return produceQuota;
}
public void setProduceQuota(Long produceQuota) {
this.produceQuota = produceQuota;
}
public Long getConsumerQuota() {
return consumerQuota;
}
public void setConsumerQuota(Long consumerQuota) {
this.consumerQuota = consumerQuota;
}
public Boolean getProduceThrottled() {
return produceThrottled;
}
public void setProduceThrottled(Boolean produceThrottled) {
this.produceThrottled = produceThrottled;
}
public Boolean getFetchThrottled() {
return fetchThrottled;
}
public void setFetchThrottled(Boolean fetchThrottled) {
this.fetchThrottled = fetchThrottled;
}
public Integer getAccess() {
return access;
}
public void setAccess(Integer access) {
this.access = access;
}
@Override
public String toString() {
return "TopicAppDTO{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", appId='" + appId + '\'' +
", appName='" + appName + '\'' +
", appPrincipals='" + appPrincipals + '\'' +
", produceQuota=" + produceQuota +
", consumerQuota=" + consumerQuota +
", produceThrottled=" + produceThrottled +
", fetchThrottled=" + fetchThrottled +
", access=" + access +
'}';
}
}

View File

@@ -0,0 +1,178 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.topic;
/**
* @author arthur
* @date 2018/09/03
*/
public class TopicBasicDTO {
private Long clusterId;
private String appId;
private String appName;
private String principals;
private String topicName;
private String description;
private String region;
private Integer score;
private String topicCodeC;
private Integer partitionNum;
private Integer replicaNum;
private Integer brokerNum;
private Long modifyTime;
private Long createTime;
private Long retentionTime;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getAppId() {
return appId;
}
public void setAppId(String appId) {
this.appId = appId;
}
public String getAppName() {
return appName;
}
public void setAppName(String appName) {
this.appName = appName;
}
public String getPrincipals() {
return principals;
}
public void setPrincipals(String principals) {
this.principals = principals;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getRegion() {
return region;
}
public void setRegion(String region) {
this.region = region;
}
public Integer getScore() {
return score;
}
public void setScore(Integer score) {
this.score = score;
}
public String getTopicCodeC() {
return topicCodeC;
}
public void setTopicCodeC(String topicCodeC) {
this.topicCodeC = topicCodeC;
}
public Integer getPartitionNum() {
return partitionNum;
}
public void setPartitionNum(Integer partitionNum) {
this.partitionNum = partitionNum;
}
public Integer getReplicaNum() {
return replicaNum;
}
public void setReplicaNum(Integer replicaNum) {
this.replicaNum = replicaNum;
}
public Integer getBrokerNum() {
return brokerNum;
}
public void setBrokerNum(Integer brokerNum) {
this.brokerNum = brokerNum;
}
public Long getModifyTime() {
return modifyTime;
}
public void setModifyTime(Long modifyTime) {
this.modifyTime = modifyTime;
}
public Long getCreateTime() {
return createTime;
}
public void setCreateTime(Long createTime) {
this.createTime = createTime;
}
public Long getRetentionTime() {
return retentionTime;
}
public void setRetentionTime(Long retentionTime) {
this.retentionTime = retentionTime;
}
@Override
public String toString() {
return "TopicBasicDTO{" +
"clusterId=" + clusterId +
", appId='" + appId + '\'' +
", appName='" + appName + '\'' +
", principals='" + principals + '\'' +
", topicName='" + topicName + '\'' +
", description='" + description + '\'' +
", region='" + region + '\'' +
", score=" + score +
", topicCodeC='" + topicCodeC + '\'' +
", partitionNum=" + partitionNum +
", replicaNum=" + replicaNum +
", brokerNum=" + brokerNum +
", modifyTime=" + modifyTime +
", createTime=" + createTime +
", retentionTime=" + retentionTime +
'}';
}
}

View File

@@ -0,0 +1,81 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.topic;
import java.util.List;
/**
* @author zhongyuankai
* @date 20/4/17
*/
public class TopicBrokerDTO {
private Integer brokerId;
private String host;
private Integer partitionNum;
private List<Integer> partitionIdList;
private List<Integer> leaderPartitionIdList;
private boolean alive;
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Integer getPartitionNum() {
return partitionNum;
}
public void setPartitionNum(Integer partitionNum) {
this.partitionNum = partitionNum;
}
public List<Integer> getPartitionIdList() {
return partitionIdList;
}
public void setPartitionIdList(List<Integer> partitionIdList) {
this.partitionIdList = partitionIdList;
}
public List<Integer> getLeaderPartitionIdList() {
return leaderPartitionIdList;
}
public void setLeaderPartitionIdList(List<Integer> leaderPartitionIdList) {
this.leaderPartitionIdList = leaderPartitionIdList;
}
public boolean isAlive() {
return alive;
}
public void setAlive(boolean alive) {
this.alive = alive;
}
@Override
public String toString() {
return "TopicBrokerDTO{" +
"brokerId=" + brokerId +
", host='" + host + '\'' +
", partitionNum=" + partitionNum +
", partitionIdList=" + partitionIdList +
", leaderPartitionIdList=" + leaderPartitionIdList +
", alive=" + alive +
'}';
}
}

View File

@@ -0,0 +1,68 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.topic;
/**
* @author zhongyuankai
* @date 20/09/08
*/
public class TopicBusinessInfo {
private String appId;
private String appName;
private String principals;
private Long clusterId;
private String topicName;
public String getAppId() {
return appId;
}
public void setAppId(String appId) {
this.appId = appId;
}
public String getAppName() {
return appName;
}
public void setAppName(String appName) {
this.appName = appName;
}
public String getPrincipals() {
return principals;
}
public void setPrincipals(String principals) {
this.principals = principals;
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
@Override
public String toString() {
return "TopicBusinessInfoVO{" +
"appId='" + appId + '\'' +
", appName='" + appName + '\'' +
", principals='" + principals + '\'' +
", clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
'}';
}
}

View File

@@ -0,0 +1,90 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.topic;
/**
* @author zengqiao
* @date 20/4/20
*/
public class TopicConnection {
private Long clusterId;
private String topicName;
private String appId;
private String ip;
private String hostname;
private String clientType;
private String clientVersion;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public String getAppId() {
return appId;
}
public void setAppId(String appId) {
this.appId = appId;
}
public String getIp() {
return ip;
}
public void setIp(String ip) {
this.ip = ip;
}
public String getHostname() {
return hostname;
}
public void setHostname(String hostname) {
this.hostname = hostname;
}
public String getClientType() {
return clientType;
}
public void setClientType(String clientType) {
this.clientType = clientType;
}
public String getClientVersion() {
return clientVersion;
}
public void setClientVersion(String clientVersion) {
this.clientVersion = clientVersion;
}
@Override
public String toString() {
return "TopicConnectionDTO{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", appId='" + appId + '\'' +
", ip='" + ip + '\'' +
", hostname='" + hostname + '\'' +
", clientType='" + clientType + '\'' +
", clientVersion='" + clientVersion + '\'' +
'}';
}
}

View File

@@ -0,0 +1,101 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.topic;
/**
* @author zengqiao
* @date 20/5/12
*/
public class TopicDTO {
private Long logicalClusterId;
private String logicalClusterName;
private String topicName;
private String description;
private String appId;
private String appName;
private String appPrincipals;
private Boolean needAuth;
public Long getLogicalClusterId() {
return logicalClusterId;
}
public void setLogicalClusterId(Long logicalClusterId) {
this.logicalClusterId = logicalClusterId;
}
public String getLogicalClusterName() {
return logicalClusterName;
}
public void setLogicalClusterName(String logicalClusterName) {
this.logicalClusterName = logicalClusterName;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getAppId() {
return appId;
}
public void setAppId(String appId) {
this.appId = appId;
}
public String getAppName() {
return appName;
}
public void setAppName(String appName) {
this.appName = appName;
}
public String getAppPrincipals() {
return appPrincipals;
}
public void setAppPrincipals(String appPrincipals) {
this.appPrincipals = appPrincipals;
}
public Boolean getNeedAuth() {
return needAuth;
}
public void setNeedAuth(Boolean needAuth) {
this.needAuth = needAuth;
}
@Override
public String toString() {
return "TopicDTO{" +
"logicalClusterId=" + logicalClusterId +
", logicalClusterName='" + logicalClusterName + '\'' +
", topicName='" + topicName + '\'' +
", description='" + description + '\'' +
", appId='" + appId + '\'' +
", appName='" + appName + '\'' +
", appPrincipals='" + appPrincipals + '\'' +
", needAuth=" + needAuth +
'}';
}
}

View File

@@ -0,0 +1,71 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.topic;
import com.xiaojukeji.kafka.manager.common.entity.pojo.LogicalClusterDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.AppDO;
/**
* @author zengqiao
* @date 20/9/2
*/
public class TopicExpiredData {
private Long clusterId;
private String topicName;
private LogicalClusterDO logicalClusterDO;
private AppDO appDO;
private Integer fetchConnectionNum;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public LogicalClusterDO getLogicalClusterDO() {
return logicalClusterDO;
}
public void setLogicalClusterDO(LogicalClusterDO logicalClusterDO) {
this.logicalClusterDO = logicalClusterDO;
}
public AppDO getAppDO() {
return appDO;
}
public void setAppDO(AppDO appDO) {
this.appDO = appDO;
}
public Integer getFetchConnectionNum() {
return fetchConnectionNum;
}
public void setFetchConnectionNum(Integer fetchConnectionNum) {
this.fetchConnectionNum = fetchConnectionNum;
}
@Override
public String toString() {
return "TopicExpiredData{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", logicalClusterDO=" + logicalClusterDO +
", appDO=" + appDO +
", fetchConnectionNum=" + fetchConnectionNum +
'}';
}
}

View File

@@ -0,0 +1,147 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.topic;
import io.swagger.annotations.ApiModelProperty;
/**
* @author zengqiao
* @date 20/5/11
*/
public class TopicMetricsDTO {
@ApiModelProperty(value = "每秒流入消息数")
private Object messagesInPerSec;
@ApiModelProperty(value = "每秒流入字节数")
private Object bytesInPerSec;
@ApiModelProperty(value = "每秒流出字节数")
private Object bytesOutPerSec;
@ApiModelProperty(value = "每秒拒绝字节数")
private Object bytesRejectedPerSec;
@ApiModelProperty(value = "每秒请求数")
private Object totalProduceRequestsPerSec;
@ApiModelProperty(value = "appId维度每秒流入消息数")
private Object appIdMessagesInPerSec;
@ApiModelProperty(value = "appId维度每秒流入字节数")
private Object appIdBytesInPerSec;
@ApiModelProperty(value = "appId维度每秒流出字节数")
private Object appIdBytesOutPerSec;
@ApiModelProperty(value = "produce限流")
private Boolean produceThrottled;
@ApiModelProperty(value = "consume限流")
private Boolean consumeThrottled;
@ApiModelProperty(value = "创建时间")
private Long gmtCreate;
public Object getMessagesInPerSec() {
return messagesInPerSec;
}
public void setMessagesInPerSec(Object messagesInPerSec) {
this.messagesInPerSec = messagesInPerSec;
}
public Object getBytesInPerSec() {
return bytesInPerSec;
}
public void setBytesInPerSec(Object bytesInPerSec) {
this.bytesInPerSec = bytesInPerSec;
}
public Object getBytesOutPerSec() {
return bytesOutPerSec;
}
public void setBytesOutPerSec(Object bytesOutPerSec) {
this.bytesOutPerSec = bytesOutPerSec;
}
public Object getBytesRejectedPerSec() {
return bytesRejectedPerSec;
}
public void setBytesRejectedPerSec(Object bytesRejectedPerSec) {
this.bytesRejectedPerSec = bytesRejectedPerSec;
}
public Object getTotalProduceRequestsPerSec() {
return totalProduceRequestsPerSec;
}
public void setTotalProduceRequestsPerSec(Object totalProduceRequestsPerSec) {
this.totalProduceRequestsPerSec = totalProduceRequestsPerSec;
}
public Object getAppIdMessagesInPerSec() {
return appIdMessagesInPerSec;
}
public void setAppIdMessagesInPerSec(Object appIdMessagesInPerSec) {
this.appIdMessagesInPerSec = appIdMessagesInPerSec;
}
public Object getAppIdBytesInPerSec() {
return appIdBytesInPerSec;
}
public void setAppIdBytesInPerSec(Object appIdBytesInPerSec) {
this.appIdBytesInPerSec = appIdBytesInPerSec;
}
public Object getAppIdBytesOutPerSec() {
return appIdBytesOutPerSec;
}
public void setAppIdBytesOutPerSec(Object appIdBytesOutPerSec) {
this.appIdBytesOutPerSec = appIdBytesOutPerSec;
}
public Boolean getProduceThrottled() {
return produceThrottled;
}
public void setProduceThrottled(Boolean produceThrottled) {
this.produceThrottled = produceThrottled;
}
public Boolean getConsumeThrottled() {
return consumeThrottled;
}
public void setConsumeThrottled(Boolean consumeThrottled) {
this.consumeThrottled = consumeThrottled;
}
public Long getGmtCreate() {
return gmtCreate;
}
public void setGmtCreate(Long gmtCreate) {
this.gmtCreate = gmtCreate;
}
@Override
public String toString() {
return "TopicMetricsDTO{" +
"messagesInPerSec=" + messagesInPerSec +
", bytesInPerSec=" + bytesInPerSec +
", bytesOutPerSec=" + bytesOutPerSec +
", bytesRejectedPerSec=" + bytesRejectedPerSec +
", totalProduceRequestsPerSec=" + totalProduceRequestsPerSec +
", appIdMessagesInPerSec=" + appIdMessagesInPerSec +
", appIdBytesInPerSec=" + appIdBytesInPerSec +
", appIdBytesOutPerSec=" + appIdBytesOutPerSec +
", produceThrottled=" + produceThrottled +
", consumeThrottled=" + consumeThrottled +
", gmtCreate=" + gmtCreate +
'}';
}
}

View File

@@ -0,0 +1,146 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.topic;
/**
* Topic概览信息
* @author zengqiao
* @date 20/5/14
*/
public class TopicOverview {
private Long clusterId;
private String topicName;
private Integer replicaNum;
private Integer partitionNum;
private Long retentionTime;
private Object byteIn;
private Object produceRequest;
private String appName;
private String appId;
private String description;
private Long updateTime;
private Long logicalClusterId;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Integer getReplicaNum() {
return replicaNum;
}
public void setReplicaNum(Integer replicaNum) {
this.replicaNum = replicaNum;
}
public Integer getPartitionNum() {
return partitionNum;
}
public void setPartitionNum(Integer partitionNum) {
this.partitionNum = partitionNum;
}
public Long getRetentionTime() {
return retentionTime;
}
public void setRetentionTime(Long retentionTime) {
this.retentionTime = retentionTime;
}
public Object getByteIn() {
return byteIn;
}
public void setByteIn(Object byteIn) {
this.byteIn = byteIn;
}
public Object getProduceRequest() {
return produceRequest;
}
public void setProduceRequest(Object produceRequest) {
this.produceRequest = produceRequest;
}
public String getAppName() {
return appName;
}
public void setAppName(String appName) {
this.appName = appName;
}
public String getAppId() {
return appId;
}
public void setAppId(String appId) {
this.appId = appId;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Long getUpdateTime() {
return updateTime;
}
public void setUpdateTime(Long updateTime) {
this.updateTime = updateTime;
}
public Long getLogicalClusterId() {
return logicalClusterId;
}
public void setLogicalClusterId(Long logicalClusterId) {
this.logicalClusterId = logicalClusterId;
}
@Override
public String toString() {
return "TopicOverview{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", replicaNum=" + replicaNum +
", partitionNum=" + partitionNum +
", retentionTime=" + retentionTime +
", byteIn=" + byteIn +
", produceRequest=" + produceRequest +
", appName='" + appName + '\'' +
", appId='" + appId + '\'' +
", description='" + description + '\'' +
", updateTime=" + updateTime +
", logicalClusterId=" + logicalClusterId +
'}';
}
}

View File

@@ -0,0 +1,147 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.topic;
import java.util.List;
/**
* @author arthur
* @date 2017/6/6.
*/
public class TopicPartitionDTO {
private Integer partitionId;
private Long beginningOffset;
private Long endOffset;
private Long msgNum;
private Integer leaderBrokerId;
private Integer preferredBrokerId;
private Integer leaderEpoch;
private List<Integer> replicaBrokerIdList;
private List<Integer> isrBrokerIdList;
private Boolean underReplicated;
private Long logSize;
private String location;
public Integer getPartitionId() {
return partitionId;
}
public void setPartitionId(Integer partitionId) {
this.partitionId = partitionId;
}
public Long getBeginningOffset() {
return beginningOffset;
}
public void setBeginningOffset(Long beginningOffset) {
this.beginningOffset = beginningOffset;
}
public Long getEndOffset() {
return endOffset;
}
public void setEndOffset(Long endOffset) {
this.endOffset = endOffset;
}
public Long getMsgNum() {
return msgNum;
}
public void setMsgNum(Long msgNum) {
this.msgNum = msgNum;
}
public Integer getLeaderBrokerId() {
return leaderBrokerId;
}
public void setLeaderBrokerId(Integer leaderBrokerId) {
this.leaderBrokerId = leaderBrokerId;
}
public Integer getPreferredBrokerId() {
return preferredBrokerId;
}
public void setPreferredBrokerId(Integer preferredBrokerId) {
this.preferredBrokerId = preferredBrokerId;
}
public Integer getLeaderEpoch() {
return leaderEpoch;
}
public void setLeaderEpoch(Integer leaderEpoch) {
this.leaderEpoch = leaderEpoch;
}
public List<Integer> getReplicaBrokerIdList() {
return replicaBrokerIdList;
}
public void setReplicaBrokerIdList(List<Integer> replicaBrokerIdList) {
this.replicaBrokerIdList = replicaBrokerIdList;
}
public List<Integer> getIsrBrokerIdList() {
return isrBrokerIdList;
}
public void setIsrBrokerIdList(List<Integer> isrBrokerIdList) {
this.isrBrokerIdList = isrBrokerIdList;
}
public Boolean getUnderReplicated() {
return underReplicated;
}
public void setUnderReplicated(Boolean underReplicated) {
this.underReplicated = underReplicated;
}
public Long getLogSize() {
return logSize;
}
public void setLogSize(Long logSize) {
this.logSize = logSize;
}
public String getLocation() {
return location;
}
public void setLocation(String location) {
this.location = location;
}
@Override
public String toString() {
return "TopicPartitionDTO{" +
"partitionId=" + partitionId +
", beginningOffset=" + beginningOffset +
", endOffset=" + endOffset +
", msgNum=" + msgNum +
", leaderBrokerId=" + leaderBrokerId +
", preferredBrokerId=" + preferredBrokerId +
", leaderEpoch=" + leaderEpoch +
", replicaBrokerIdList=" + replicaBrokerIdList +
", isrBrokerIdList=" + isrBrokerIdList +
", underReplicated=" + underReplicated +
", logSize=" + logSize +
", location='" + location + '\'' +
'}';
}
}

View File

@@ -0,0 +1,50 @@
package com.xiaojukeji.kafka.manager.common.entity.dto;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
/**
* @author zengqiao
* @date 20/4/23
*/
@ApiModel(description="Topic信息")
public class ClusterTopicDTO {
@ApiModelProperty(value = "集群ID")
protected Long clusterId;
@ApiModelProperty(value = "Topic名称")
protected String topicName;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
@Override
public String toString() {
return "ClusterTopicDTO{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
'}';
}
public boolean paramLegal() {
if (ValidateUtils.isNull(clusterId)
|| ValidateUtils.isNull(topicName)) {
return false;
}
return true;
}
}

View File

@@ -0,0 +1,65 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.config;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
/**
* @author zengqiao
* @date 20/3/20
*/
@JsonIgnoreProperties(ignoreUnknown = true)
@ApiModel(description = "配置")
public class ConfigDTO {
@ApiModelProperty(value = "配置key")
private String configKey;
@ApiModelProperty(value = "配置value")
private String configValue;
@ApiModelProperty(value = "备注")
private String configDescription;
public String getConfigKey() {
return configKey;
}
public void setConfigKey(String configKey) {
this.configKey = configKey;
}
public String getConfigValue() {
return configValue;
}
public void setConfigValue(String configValue) {
this.configValue = configValue;
}
public String getConfigDescription() {
return configDescription;
}
public void setConfigDescription(String configDescription) {
this.configDescription = configDescription;
}
@Override
public String toString() {
return "ConfigDTO{" +
"configKey='" + configKey + '\'' +
", configValue='" + configValue + '\'' +
", configDescription='" + configDescription + '\'' +
'}';
}
public boolean paramLegal() {
if (ValidateUtils.isExistBlank(configKey)
|| ValidateUtils.isBlank(configValue)
|| ValidateUtils.isBlank(configDescription)) {
return false;
}
return true;
}
}

View File

@@ -0,0 +1,61 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.gateway;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import io.swagger.annotations.ApiModelProperty;
/**
* @author zengqiao
* @date 20/7/7
*/
public class KafkaAclSearchDTO {
@ApiModelProperty(value = "集群ID")
private Long clusterId;
@ApiModelProperty(value = "开始时间(ms)")
private Long start;
@ApiModelProperty(value = "结束时间(ms)")
private Long end;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public Long getStart() {
return start;
}
public void setStart(Long start) {
this.start = start;
}
public Long getEnd() {
return end;
}
public void setEnd(Long end) {
this.end = end;
}
@Override
public String toString() {
return "KafkaAclSearchDTO{" +
"clusterId=" + clusterId +
", start=" + start +
", end=" + end +
'}';
}
public boolean paramLegal() {
if (ValidateUtils.isNull(clusterId)
|| ValidateUtils.isNull(start)
|| ValidateUtils.isNull(end)) {
return false;
}
return true;
}
}

View File

@@ -0,0 +1,51 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.gateway;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
/**
* @author zengqiao
* @date 20/7/7
*/
@JsonIgnoreProperties(ignoreUnknown = true)
@ApiModel(description = "Kafka用户查询")
public class KafkaUserSearchDTO {
@ApiModelProperty(value = "开始时间(ms)")
private Long start;
@ApiModelProperty(value = "结束时间(ms)")
private Long end;
public Long getStart() {
return start;
}
public void setStart(Long start) {
this.start = start;
}
public Long getEnd() {
return end;
}
public void setEnd(Long end) {
this.end = end;
}
@Override
public String toString() {
return "KafkaUserSearchDTO{" +
"start=" + start +
", end=" + end +
'}';
}
public boolean paramLegal() {
if (ValidateUtils.isNullOrLessThanZero(start) || ValidateUtils.isNullOrLessThanZero(end)) {
return false;
}
return true;
}
}

View File

@@ -0,0 +1,127 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.gateway;
import java.util.Date;
/**
* @author zengqiao
* @date 20/7/6
*/
public class TopicConnectionDTO {
private Long id;
private Long clusterId;
private String topicName;
// producer or consumer
private String type;
// appId#ip#clientVersion
private String clientInfo;
private String appId;
private String ip;
private String clientVersion;
private Date gmtCreate;
private Date gmtModify;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getClientInfo() {
return clientInfo;
}
public void setClientInfo(String clientInfo) {
this.clientInfo = clientInfo;
}
public String getAppId() {
return appId;
}
public void setAppId(String appId) {
this.appId = appId;
}
public String getIp() {
return ip;
}
public void setIp(String ip) {
this.ip = ip;
}
public String getClientVersion() {
return clientVersion;
}
public void setClientVersion(String clientVersion) {
this.clientVersion = clientVersion;
}
public Date getGmtCreate() {
return gmtCreate;
}
public void setGmtCreate(Date gmtCreate) {
this.gmtCreate = gmtCreate;
}
public Date getGmtModify() {
return gmtModify;
}
public void setGmtModify(Date gmtModify) {
this.gmtModify = gmtModify;
}
@Override
public String toString() {
return "TopicConnectionDTO{" +
"id=" + id +
", clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", type='" + type + '\'' +
", clientInfo='" + clientInfo + '\'' +
", appId='" + appId + '\'' +
", ip='" + ip + '\'' +
", clientVersion='" + clientVersion + '\'' +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
'}';
}
}

View File

@@ -0,0 +1,76 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.normal;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
/**
* @author zengqiao
* @date 20/5/4
*/
@ApiModel(description="App信息")
public class AppDTO {
@ApiModelProperty(value="AppId, 不可修改")
private String appId;
@ApiModelProperty(value="App名称")
private String name;
@ApiModelProperty(value="App描述")
private String description;
@ApiModelProperty(value="App负责人")
private String principals;
public String getAppId() {
return appId;
}
public void setAppId(String appId) {
this.appId = appId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getPrincipals() {
return principals;
}
public void setPrincipals(String principals) {
this.principals = principals;
}
@Override
public String toString() {
return "AppDTO{" +
"appId='" + appId + '\'' +
", name='" + name + '\'' +
", description='" + description + '\'' +
", principals='" + principals + '\'' +
'}';
}
public boolean legal() {
if (ValidateUtils.isBlank(appId)
|| ValidateUtils.isBlank(name)
|| ValidateUtils.isBlank(principals)
|| ValidateUtils.isBlank(description)) {
return false;
}
return true;
}
}

View File

@@ -0,0 +1,126 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.normal;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
/**
* @author zengqiao
* @date 20/8/21
*/
@ApiModel(description = "JmxSwitch开关")
@JsonIgnoreProperties(ignoreUnknown = true)
public class JmxSwitchDTO {
@ApiModelProperty(value = "集群ID")
private Long clusterId;
@ApiModelProperty(value = "是否是物理集群ID, True:是, False:否")
private Boolean isPhysicalClusterId;
@ApiModelProperty(value = "Topic请求你JMX")
private String topicName;
@ApiModelProperty(value = "Topic请求你JMX")
private Boolean openTopicRequestMetrics;
@ApiModelProperty(value = "AppTopicJMX")
private Boolean openAppIdTopicMetrics;
@ApiModelProperty(value = "客户端请求JMX")
private Boolean openClientRequestMetrics;
@ApiModelProperty(value = "磁盘JMX")
private Boolean openDiskMetrics;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public Boolean getPhysicalClusterId() {
return isPhysicalClusterId;
}
public void setPhysicalClusterId(Boolean physicalClusterId) {
isPhysicalClusterId = physicalClusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Boolean getOpenTopicRequestMetrics() {
return openTopicRequestMetrics;
}
public void setOpenTopicRequestMetrics(Boolean openTopicRequestMetrics) {
this.openTopicRequestMetrics = openTopicRequestMetrics;
}
public Boolean getOpenAppIdTopicMetrics() {
return openAppIdTopicMetrics;
}
public void setOpenAppIdTopicMetrics(Boolean openAppIdTopicMetrics) {
this.openAppIdTopicMetrics = openAppIdTopicMetrics;
}
public Boolean getOpenClientRequestMetrics() {
return openClientRequestMetrics;
}
public void setOpenClientRequestMetrics(Boolean openClientRequestMetrics) {
this.openClientRequestMetrics = openClientRequestMetrics;
}
public Boolean getOpenDiskMetrics() {
return openDiskMetrics;
}
public void setOpenDiskMetrics(Boolean openDiskMetrics) {
this.openDiskMetrics = openDiskMetrics;
}
@Override
public String toString() {
return "JmxSwitchDTO{" +
"clusterId=" + clusterId +
", isPhysicalClusterId=" + isPhysicalClusterId +
", topicName='" + topicName + '\'' +
", openTopicRequestMetrics=" + openTopicRequestMetrics +
", openAppIdTopicMetrics=" + openAppIdTopicMetrics +
", openClientRequestMetrics=" + openClientRequestMetrics +
", openDiskMetrics=" + openDiskMetrics +
'}';
}
public boolean paramLegal() {
if (ValidateUtils.isNull(clusterId)
|| ValidateUtils.isNull(isPhysicalClusterId)
|| ValidateUtils.isNull(topicName)) {
return false;
}
if (ValidateUtils.isNull(openTopicRequestMetrics)) {
openTopicRequestMetrics = Boolean.FALSE;
}
if (ValidateUtils.isNull(openAppIdTopicMetrics)) {
openAppIdTopicMetrics = Boolean.FALSE;
}
if (ValidateUtils.isNull(openClientRequestMetrics)) {
openClientRequestMetrics = Boolean.FALSE;
}
if (ValidateUtils.isNull(openDiskMetrics)) {
openDiskMetrics = Boolean.FALSE;
}
return true;
}
}

View File

@@ -0,0 +1,143 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.normal;
import com.xiaojukeji.kafka.manager.common.bizenum.KafkaFileEnum;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import org.springframework.web.multipart.MultipartFile;
/**
* @author zengqiao
* @date 20/4/29
*/
@ApiModel(description = "Kafka文件")
public class KafkaFileDTO {
@ApiModelProperty(value = "ID")
private Long id;
@ApiModelProperty(value = "集群ID, 创建的时候需要, 修改不需要, 如果是包,则传-1")
private Long clusterId;
@ApiModelProperty(value = "文件名, 创建时需要, 修改不需要")
private String fileName;
@ApiModelProperty(value = "文件MD5")
private String fileMd5;
@ApiModelProperty(value = "文件类型, 创建时需要, 修改不需要")
private Integer fileType;
@ApiModelProperty(value = "备注")
private String description;
@ApiModelProperty(value = "上传的文件")
private MultipartFile uploadFile;
@ApiModelProperty(value = "是更新操作")
private Boolean modify;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getFileName() {
return fileName;
}
public void setFileName(String fileName) {
this.fileName = fileName;
}
public String getFileMd5() {
return fileMd5;
}
public void setFileMd5(String fileMd5) {
this.fileMd5 = fileMd5;
}
public Integer getFileType() {
return fileType;
}
public void setFileType(Integer fileType) {
this.fileType = fileType;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public MultipartFile getUploadFile() {
return uploadFile;
}
public void setUploadFile(MultipartFile uploadFile) {
this.uploadFile = uploadFile;
}
public Boolean getModify() {
return modify;
}
public void setModify(Boolean modify) {
this.modify = modify;
}
@Override
public String toString() {
return "KafkaFileDTO{" +
"id=" + id +
", clusterId=" + clusterId +
", fileName='" + fileName + '\'' +
", fileMd5='" + fileMd5 + '\'' +
", fileType=" + fileType +
", description='" + description + '\'' +
'}';
}
public boolean createParamLegal() {
if (ValidateUtils.isNull(clusterId) ||
ValidateUtils.isBlank(fileName) ||
ValidateUtils.isNull(fileType) ||
ValidateUtils.isNull(fileMd5) ||
ValidateUtils.isNull(uploadFile)) {
return false;
}
if (!(fileName.endsWith(KafkaFileEnum.PACKAGE.getSuffix())
|| fileName.endsWith(KafkaFileEnum.SERVER_CONFIG.getSuffix()))) {
// 后缀不对
return false;
}
if (KafkaFileEnum.PACKAGE.getCode().equals(fileType) && clusterId != -1) {
// 包不属于任何集群
return false;
}
return true;
}
public boolean modifyParamLegal() {
if (ValidateUtils.isBlank(fileName) ||
ValidateUtils.isNull(fileMd5) ||
ValidateUtils.isNull(uploadFile)) {
return false;
}
return true;
}
}

Some files were not shown because too many files have changed in this diff Show More