Merge branch 'master' into v2.2.1_ldap

This commit is contained in:
李民
2021-02-10 10:00:32 +08:00
committed by GitHub
147 changed files with 3080 additions and 875 deletions

View File

@@ -104,5 +104,10 @@
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,32 @@
package com.xiaojukeji.kafka.manager.common.bizenum;
/**
* 过期Topic状态
* @author zengqiao
* @date 21/01/25
*/
public enum TopicExpiredStatusEnum {
ALREADY_NOTIFIED_AND_DELETED(-2, "已通知, 已下线"),
ALREADY_NOTIFIED_AND_CAN_DELETE(-1, "已通知, 可下线"),
ALREADY_EXPIRED_AND_WAIT_NOTIFY(0, "已过期, 待通知"),
ALREADY_NOTIFIED_AND_WAIT_RESPONSE(1, "已通知, 待反馈"),
;
private int status;
private String message;
TopicExpiredStatusEnum(int status, String message) {
this.status = status;
this.message = message;
}
public int getStatus() {
return status;
}
public String getMessage() {
return message;
}
}

View File

@@ -97,7 +97,7 @@ public class Result<T> implements Serializable {
return result;
}
public static <T> Result<T> buildFailure(String message) {
public static <T> Result<T> buildGatewayFailure(String message) {
Result<T> result = new Result<T>();
result.setCode(ResultStatus.GATEWAY_INVALID_REQUEST.getCode());
result.setMessage(message);
@@ -105,6 +105,14 @@ public class Result<T> implements Serializable {
return result;
}
public static <T> Result<T> buildFailure(String message) {
Result<T> result = new Result<T>();
result.setCode(ResultStatus.FAIL.getCode());
result.setMessage(message);
result.setData(null);
return result;
}
public static Result buildFrom(ResultStatus resultStatus) {
Result result = new Result();
result.setCode(resultStatus.getCode());

View File

@@ -12,125 +12,101 @@ public enum ResultStatus {
SUCCESS(Constant.SUCCESS, "success"),
LOGIN_FAILED(1, "login failed, please check username and password"),
FAIL(1, "操作失败"),
/**
* 内部依赖错误, [1000, 1200)
* 操作错误[1000, 2000)
* ------------------------------------------------------------------------------------------
*/
MYSQL_ERROR(1000, "operate database failed"),
CONNECT_ZOOKEEPER_FAILED(1000, "connect zookeeper failed"),
READ_ZOOKEEPER_FAILED(1000, "read zookeeper failed"),
READ_JMX_FAILED(1000, "read jmx failed"),
// 内部依赖错误 —— Kafka特定错误, [1000, 1100)
BROKER_NUM_NOT_ENOUGH(1000, "broker not enough"),
CONTROLLER_NOT_ALIVE(1000, "controller not alive"),
CLUSTER_METADATA_ERROR(1000, "cluster metadata error"),
TOPIC_CONFIG_ERROR(1000, "topic config error"),
/**
* 外部依赖错误, [1200, 1400)
* ------------------------------------------------------------------------------------------
*/
CALL_CLUSTER_TASK_AGENT_FAILED(1000, " call cluster task agent failed"),
CALL_MONITOR_SYSTEM_ERROR(1000, " call monitor-system failed"),
/**
* 外部用户操作错误, [1400, 1600)
* ------------------------------------------------------------------------------------------
*/
PARAM_ILLEGAL(1400, "param illegal"),
OPERATION_FAILED(1401, "operation failed"),
OPERATION_FORBIDDEN(1402, "operation forbidden"),
API_CALL_EXCEED_LIMIT(1403, "api call exceed limit"),
USER_WITHOUT_AUTHORITY(1404, "user without authority"),
CHANGE_ZOOKEEPER_FORBIDDEN(1405, "change zookeeper forbidden"),
// 资源不存在
CLUSTER_NOT_EXIST(10000, "cluster not exist"),
BROKER_NOT_EXIST(10000, "broker not exist"),
TOPIC_NOT_EXIST(10000, "topic not exist"),
PARTITION_NOT_EXIST(10000, "partition not exist"),
ACCOUNT_NOT_EXIST(10000, "account not exist"),
APP_NOT_EXIST(1000, "app not exist"),
ORDER_NOT_EXIST(1000, "order not exist"),
CONFIG_NOT_EXIST(1000, "config not exist"),
IDC_NOT_EXIST(1000, "idc not exist"),
TASK_NOT_EXIST(1110, "task not exist"),
TOPIC_OPERATION_PARAM_NULL_POINTER(1450, "参数错误"),
TOPIC_OPERATION_PARTITION_NUM_ILLEGAL(1451, "分区数错误"),
TOPIC_OPERATION_BROKER_NUM_NOT_ENOUGH(1452, "Broker数不足错误"),
TOPIC_OPERATION_TOPIC_NAME_ILLEGAL(1453, "Topic名称非法"),
TOPIC_OPERATION_TOPIC_EXISTED(1454, "Topic已存在"),
TOPIC_OPERATION_UNKNOWN_TOPIC_PARTITION(1455, "Topic未知"),
TOPIC_OPERATION_TOPIC_CONFIG_ILLEGAL(1456, "Topic配置错误"),
TOPIC_OPERATION_TOPIC_IN_DELETING(1457, "Topic正在删除"),
TOPIC_OPERATION_UNKNOWN_ERROR(1458, "未知错误"),
AUTHORITY_NOT_EXIST(1000, "authority not exist"),
/**
* 参数错误[2000, 3000)
* ------------------------------------------------------------------------------------------
*/
PARAM_ILLEGAL(2000, "param illegal"),
CG_LOCATION_ILLEGAL(2001, "consumer group location illegal"),
ORDER_ALREADY_HANDLED(2002, "order already handled"),
APP_ID_OR_PASSWORD_ILLEGAL(2003, "app or password illegal"),
SYSTEM_CODE_ILLEGAL(2004, "system code illegal"),
CLUSTER_TASK_HOST_LIST_ILLEGAL(2005, "主机列表错误,请检查主机列表"),
JSON_PARSER_ERROR(2006, "json parser error"),
MONITOR_NOT_EXIST(1110, "monitor not exist"),
BROKER_NUM_NOT_ENOUGH(2050, "broker not enough"),
CONTROLLER_NOT_ALIVE(2051, "controller not alive"),
CLUSTER_METADATA_ERROR(2052, "cluster metadata error"),
TOPIC_CONFIG_ERROR(2053, "topic config error"),
QUOTA_NOT_EXIST(1000, "quota not exist, please check clusterId, topicName and appId"),
/**
* 参数错误 - 资源检查错误
* 因为外部系统的问题, 操作时引起的错误, [7000, 8000)
* ------------------------------------------------------------------------------------------
*/
RESOURCE_NOT_EXIST(7100, "资源不存在"),
CLUSTER_NOT_EXIST(7101, "cluster not exist"),
BROKER_NOT_EXIST(7102, "broker not exist"),
TOPIC_NOT_EXIST(7103, "topic not exist"),
PARTITION_NOT_EXIST(7104, "partition not exist"),
ACCOUNT_NOT_EXIST(7105, "account not exist"),
APP_NOT_EXIST(7106, "app not exist"),
ORDER_NOT_EXIST(7107, "order not exist"),
CONFIG_NOT_EXIST(7108, "config not exist"),
IDC_NOT_EXIST(7109, "idc not exist"),
TASK_NOT_EXIST(7110, "task not exist"),
AUTHORITY_NOT_EXIST(7111, "authority not exist"),
MONITOR_NOT_EXIST(7112, "monitor not exist"),
QUOTA_NOT_EXIST(7113, "quota not exist, please check clusterId, topicName and appId"),
CONSUMER_GROUP_NOT_EXIST(7114, "consumerGroup not exist"),
TOPIC_BIZ_DATA_NOT_EXIST(7115, "topic biz data not exist, please sync topic to db"),
// 资源不存在, 已存在, 已被使用
RESOURCE_NOT_EXIST(1200, "资源存在"),
RESOURCE_ALREADY_EXISTED(1200, "资源已经存在"),
RESOURCE_NAME_DUPLICATED(1200, "资源名称重复"),
RESOURCE_ALREADY_USED(1000, "资源早已被使用"),
// 资源已存在
RESOURCE_ALREADY_EXISTED(7200, "资源已经存在"),
TOPIC_ALREADY_EXIST(7201, "topic already existed"),
// 资源重名
RESOURCE_NAME_DUPLICATED(7300, "资源名称重复"),
// 资源已被使用
RESOURCE_ALREADY_USED(7400, "资源早已被使用"),
/**
* 资源参数错误
* 因为外部系统的问题, 操作时引起的错误, [8000, 9000)
* ------------------------------------------------------------------------------------------
*/
CG_LOCATION_ILLEGAL(10000, "consumer group location illegal"),
ORDER_ALREADY_HANDLED(1000, "order already handled"),
MYSQL_ERROR(8010, "operate database failed"),
APP_ID_OR_PASSWORD_ILLEGAL(1000, "app or password illegal"),
SYSTEM_CODE_ILLEGAL(1000, "system code illegal"),
ZOOKEEPER_CONNECT_FAILED(8020, "zookeeper connect failed"),
ZOOKEEPER_READ_FAILED(8021, "zookeeper read failed"),
ZOOKEEPER_WRITE_FAILED(8022, "zookeeper write failed"),
ZOOKEEPER_DELETE_FAILED(8023, "zookeeper delete failed"),
CLUSTER_TASK_HOST_LIST_ILLEGAL(1000, "主机列表错误,请检查主机列表"),
// 调用集群任务里面的agent失败
CALL_CLUSTER_TASK_AGENT_FAILED(8030, " call cluster task agent failed"),
// 调用监控系统失败
CALL_MONITOR_SYSTEM_ERROR(8040, " call monitor-system failed"),
// 存储相关的调用失败
STORAGE_UPLOAD_FILE_FAILED(8050, "upload file failed"),
STORAGE_FILE_TYPE_NOT_SUPPORT(8051, "File type not support"),
STORAGE_DOWNLOAD_FILE_FAILED(8052, "download file failed"),
///////////////////////////////////////////////////////////////
USER_WITHOUT_AUTHORITY(1000, "user without authority"),
JSON_PARSER_ERROR(1000, "json parser error"),
TOPIC_OPERATION_PARAM_NULL_POINTER(2, "参数错误"),
TOPIC_OPERATION_PARTITION_NUM_ILLEGAL(3, "分区数错误"),
TOPIC_OPERATION_BROKER_NUM_NOT_ENOUGH(4, "Broker数不足错误"),
TOPIC_OPERATION_TOPIC_NAME_ILLEGAL(5, "Topic名称非法"),
TOPIC_OPERATION_TOPIC_EXISTED(6, "Topic已存在"),
TOPIC_OPERATION_UNKNOWN_TOPIC_PARTITION(7, "Topic未知"),
TOPIC_OPERATION_TOPIC_CONFIG_ILLEGAL(8, "Topic配置错误"),
TOPIC_OPERATION_TOPIC_IN_DELETING(9, "Topic正在删除"),
TOPIC_OPERATION_UNKNOWN_ERROR(10, "未知错误"),
TOPIC_EXIST_CONNECT_CANNOT_DELETE(10, "topic exist connect cannot delete"),
EXIST_TOPIC_CANNOT_DELETE(10, "exist topic cannot delete"),
/**
* 工单
*/
CHANGE_ZOOKEEPER_FORBIDEN(100, "change zookeeper forbiden"),
// APP_EXIST_TOPIC_AUTHORITY_CANNOT_DELETE(1000, "app exist topic authority cannot delete"),
UPLOAD_FILE_FAIL(1000, "upload file fail"),
FILE_TYPE_NOT_SUPPORT(1000, "File type not support"),
DOWNLOAD_FILE_FAIL(1000, "download file fail"),
TOPIC_ALREADY_EXIST(17400, "topic already existed"),
CONSUMER_GROUP_NOT_EXIST(17411, "consumerGroup not exist"),
;
private int code;

View File

@@ -23,6 +23,8 @@ public class ClusterDetailDTO {
private String securityProperties;
private String jmxProperties;
private Integer status;
private Date gmtCreate;
@@ -103,6 +105,14 @@ public class ClusterDetailDTO {
this.securityProperties = securityProperties;
}
public String getJmxProperties() {
return jmxProperties;
}
public void setJmxProperties(String jmxProperties) {
this.jmxProperties = jmxProperties;
}
public Integer getStatus() {
return status;
}
@@ -176,8 +186,9 @@ public class ClusterDetailDTO {
", bootstrapServers='" + bootstrapServers + '\'' +
", kafkaVersion='" + kafkaVersion + '\'' +
", idc='" + idc + '\'' +
", mode='" + mode + '\'' +
", mode=" + mode +
", securityProperties='" + securityProperties + '\'' +
", jmxProperties='" + jmxProperties + '\'' +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +

View File

@@ -1,57 +0,0 @@
package com.xiaojukeji.kafka.manager.common.entity.ao.config;
/**
* @author zengqiao
* @date 20/9/7
*/
public class SinkTopicRequestTimeMetricsConfig {
private Long clusterId;
private String topicName;
private Long startId;
private Long step;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
public Long getStartId() {
return startId;
}
public void setStartId(Long startId) {
this.startId = startId;
}
public Long getStep() {
return step;
}
public void setStep(Long step) {
this.step = step;
}
@Override
public String toString() {
return "SinkTopicRequestTimeMetricsConfig{" +
"clusterId=" + clusterId +
", topicName='" + topicName + '\'' +
", startId=" + startId +
", step=" + step +
'}';
}
}

View File

@@ -0,0 +1,45 @@
package com.xiaojukeji.kafka.manager.common.entity.dto.op;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.util.List;
/**
* @author zengqiao
* @date 21/01/24
*/
@JsonIgnoreProperties(ignoreUnknown = true)
@ApiModel(description="优选为Controller的候选者")
public class ControllerPreferredCandidateDTO {
@ApiModelProperty(value="集群ID")
private Long clusterId;
@ApiModelProperty(value="优选为controller的BrokerId")
private List<Integer> brokerIdList;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public List<Integer> getBrokerIdList() {
return brokerIdList;
}
public void setBrokerIdList(List<Integer> brokerIdList) {
this.brokerIdList = brokerIdList;
}
@Override
public String toString() {
return "ControllerPreferredCandidateDTO{" +
"clusterId=" + clusterId +
", brokerIdList=" + brokerIdList +
'}';
}
}

View File

@@ -102,12 +102,11 @@ public class ClusterDTO {
'}';
}
public Boolean legal() {
public boolean legal() {
if (ValidateUtils.isNull(clusterName)
|| ValidateUtils.isNull(zookeeper)
|| ValidateUtils.isNull(idc)
|| ValidateUtils.isNull(bootstrapServers)
) {
|| ValidateUtils.isNull(bootstrapServers)) {
return false;
}
return true;

View File

@@ -17,6 +17,8 @@ public class GatewayConfigDO {
private Long version;
private String description;
private Date createTime;
private Date modifyTime;
@@ -61,6 +63,14 @@ public class GatewayConfigDO {
this.version = version;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Date getCreateTime() {
return createTime;
}
@@ -85,6 +95,7 @@ public class GatewayConfigDO {
", name='" + name + '\'' +
", value='" + value + '\'' +
", version=" + version +
", description='" + description + '\'' +
", createTime=" + createTime +
", modifyTime=" + modifyTime +
'}';

View File

@@ -28,7 +28,7 @@ public class ExpiredTopicVO {
@ApiModelProperty(value = "负责人")
private String principals;
@ApiModelProperty(value = "状态, -1:可下线, 0:过期待通知, 1+:已通知待反馈")
@ApiModelProperty(value = "状态, -1:已通知可下线, 0:过期待通知, 1+:已通知待反馈")
private Integer status;
public Long getClusterId() {

View File

@@ -26,6 +26,9 @@ public class GatewayConfigVO {
@ApiModelProperty(value="版本")
private Long version;
@ApiModelProperty(value="描述说明")
private String description;
@ApiModelProperty(value="创建时间")
private Date createTime;
@@ -72,6 +75,14 @@ public class GatewayConfigVO {
this.version = version;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Date getCreateTime() {
return createTime;
}
@@ -96,6 +107,7 @@ public class GatewayConfigVO {
", name='" + name + '\'' +
", value='" + value + '\'' +
", version=" + version +
", description='" + description + '\'' +
", createTime=" + createTime +
", modifyTime=" + modifyTime +
'}';

View File

@@ -60,6 +60,13 @@ public class JsonUtils {
return JSON.parseObject(src, clazz);
}
public static <T> List<T> stringToArrObj(String src, Class<T> clazz) {
if (ValidateUtils.isBlank(src)) {
return null;
}
return JSON.parseArray(src, clazz);
}
public static List<TopicConnectionDO> parseTopicConnections(Long clusterId, JSONObject jsonObject, long postTime) {
List<TopicConnectionDO> connectionDOList = new ArrayList<>();
for (String clientType: jsonObject.keySet()) {

View File

@@ -2,6 +2,7 @@ package com.xiaojukeji.kafka.manager.common.utils;
import org.apache.commons.lang.StringUtils;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -11,6 +12,20 @@ import java.util.Set;
* @date 20/4/16
*/
public class ValidateUtils {
/**
* 任意一个为空, 则返回true
*/
public static boolean anyNull(Object... objects) {
return Arrays.stream(objects).anyMatch(ValidateUtils::isNull);
}
/**
* 是空字符串或者空
*/
public static boolean anyBlank(String... strings) {
return Arrays.stream(strings).anyMatch(StringUtils::isBlank);
}
/**
* 为空
*/

View File

@@ -79,7 +79,7 @@ public class JmxConnectorWrap {
try {
Map<String, Object> environment = new HashMap<String, Object>();
if (!ValidateUtils.isBlank(this.jmxConfig.getUsername()) && !ValidateUtils.isBlank(this.jmxConfig.getPassword())) {
environment.put(javax.management.remote.JMXConnector.CREDENTIALS, Arrays.asList(this.jmxConfig.getUsername(), this.jmxConfig.getPassword()));
environment.put(JMXConnector.CREDENTIALS, Arrays.asList(this.jmxConfig.getUsername(), this.jmxConfig.getPassword()));
}
if (jmxConfig.isOpenSSL() != null && this.jmxConfig.isOpenSSL()) {
environment.put(Context.SECURITY_PROTOCOL, "ssl");

View File

@@ -33,7 +33,9 @@ public class ZkPathUtil {
private static final String D_METRICS_CONFIG_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "KafkaExMetrics";
public static final String D_CONTROLLER_CANDIDATES = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "extension/candidates";
public static final String D_CONFIG_EXTENSION_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "extension";
public static final String D_CONTROLLER_CANDIDATES = D_CONFIG_EXTENSION_ROOT_NODE + ZOOKEEPER_SEPARATOR + "candidates";
public static String getBrokerIdNodePath(Integer brokerId) {
return BROKER_IDS_ROOT + ZOOKEEPER_SEPARATOR + String.valueOf(brokerId);
@@ -111,6 +113,10 @@ public class ZkPathUtil {
}
public static String getKafkaExtraMetricsPath(Integer brokerId) {
return D_METRICS_CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + String.valueOf(brokerId);
return D_METRICS_CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + brokerId;
}
public static String getControllerCandidatePath(Integer brokerId) {
return D_CONTROLLER_CANDIDATES + ZOOKEEPER_SEPARATOR + brokerId;
}
}

View File

@@ -0,0 +1,18 @@
package com.xiaojukeji.kafka.manager.common.utils;
import org.junit.Assert;
import org.junit.Test;
import java.util.HashMap;
import java.util.Map;
public class JsonUtilsTest {
@Test
public void testMapToJsonString() {
Map<String, Object> map = new HashMap<>();
map.put("key", "value");
map.put("int", 1);
String expectRes = "{\"key\":\"value\",\"int\":1}";
Assert.assertEquals(expectRes, JsonUtils.toJSONString(map));
}
}