Merge pull request #1 from didi/dev

Dev
This commit is contained in:
tcf1207239873
2021-05-11 10:28:02 +08:00
committed by GitHub
82 changed files with 1005 additions and 387 deletions

View File

@@ -46,7 +46,7 @@ public enum OperateEnum {
public static boolean validate(Integer code) {
if (code == null) {
return false;
return true;
}
for (OperateEnum state : OperateEnum.values()) {
if (state.getCode() == code) {

View File

@@ -20,6 +20,12 @@ public class ApiPrefix {
// open
public static final String API_V1_THIRD_PART_PREFIX = API_V1_PREFIX + "third-part/";
// 开放给OP的接口, 后续对 应的接口的集群都需要是物理集群
public static final String API_V1_THIRD_PART_OP_PREFIX = API_V1_THIRD_PART_PREFIX + "op/";
// 开放给Normal的接口, 后续对应的接口的集群,都需要是逻辑集群
public static final String API_V1_THIRD_PART_NORMAL_PREFIX = API_V1_THIRD_PART_PREFIX + "normal/";
// gateway
public static final String GATEWAY_API_V1_PREFIX = "/gateway" + API_V1_PREFIX;
}

View File

@@ -40,6 +40,9 @@ public class TopicCreationDTO extends ClusterTopicDTO {
@ApiModelProperty(value = "Topic属性列表")
private Properties properties;
@ApiModelProperty(value = "最大写入字节数")
private Long peakBytesIn;
public String getAppId() {
return appId;
}
@@ -104,6 +107,14 @@ public class TopicCreationDTO extends ClusterTopicDTO {
this.properties = properties;
}
public Long getPeakBytesIn() {
return peakBytesIn;
}
public void setPeakBytesIn(Long peakBytesIn) {
this.peakBytesIn = peakBytesIn;
}
@Override
public String toString() {
return "TopicCreationDTO{" +
@@ -135,4 +146,4 @@ public class TopicCreationDTO extends ClusterTopicDTO {
}
return true;
}
}
}

View File

@@ -81,11 +81,6 @@ public class OperateRecordDTO {
}
public boolean legal() {
if (!ModuleEnum.validate(moduleId) ||
(!ValidateUtils.isNull(operateId) && OperateEnum.validate(operateId))
) {
return false;
}
return true;
return !ValidateUtils.isNull(moduleId) && ModuleEnum.validate(moduleId) && OperateEnum.validate(operateId);
}
}

View File

@@ -1,6 +1,7 @@
package com.xiaojukeji.kafka.manager.common.entity.pojo;
import com.xiaojukeji.kafka.manager.common.entity.dto.op.topic.TopicCreationDTO;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import java.util.Date;
@@ -95,6 +96,7 @@ public class TopicDO {
topicDO.setClusterId(dto.getClusterId());
topicDO.setTopicName(dto.getTopicName());
topicDO.setDescription(dto.getDescription());
topicDO.setPeakBytesIn(ValidateUtils.isNull(dto.getPeakBytesIn()) ? -1L : dto.getPeakBytesIn());
return topicDO;
}
}
}

View File

@@ -33,7 +33,7 @@ public class BrokerOverviewVO {
@ApiModelProperty(value = "分区数")
private Integer partitionCount;
@ApiModelProperty(value = "已同步副本")
@ApiModelProperty(value = "失效副本分区的个")
private Integer underReplicatedPartitions;
@ApiModelProperty(value = "未同步")

View File

@@ -0,0 +1,33 @@
package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
/**
* @author zengqiao
* @date 20/8/14
*/
@ApiModel(description="Topic流量统计信息")
public class TopicStatisticMetricsVO {
@ApiModelProperty(value="峰值流入流量(B/s)")
private Double peakBytesIn;
public TopicStatisticMetricsVO(Double peakBytesIn) {
this.peakBytesIn = peakBytesIn;
}
public Double getPeakBytesIn() {
return peakBytesIn;
}
public void setPeakBytesIn(Double peakBytesIn) {
this.peakBytesIn = peakBytesIn;
}
@Override
public String toString() {
return "TopicStatisticMetricsVO{" +
"peakBytesIn=" + peakBytesIn +
'}';
}
}

View File

@@ -1,8 +1,5 @@
package com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
/**
@@ -18,12 +15,11 @@ import java.util.List;
* "host":null,
* "timestamp":"1546632983233",
* "port":-1,
* "version":4
* "version":4,
* "rack": "CY"
* }
*/
public class BrokerMetadata implements Cloneable {
private final static Logger LOGGER = LoggerFactory.getLogger(TopicMetadata.class);
private long clusterId;
private int brokerId;
@@ -43,6 +39,8 @@ public class BrokerMetadata implements Cloneable {
private long timestamp;
private String rack;
public long getClusterId() {
return clusterId;
}
@@ -107,14 +105,12 @@ public class BrokerMetadata implements Cloneable {
this.timestamp = timestamp;
}
@Override
public Object clone() {
try {
return super.clone();
} catch (CloneNotSupportedException var3) {
LOGGER.error("clone BrokerMetadata failed.", var3);
}
return null;
public String getRack() {
return rack;
}
public void setRack(String rack) {
this.rack = rack;
}
@Override
@@ -128,6 +124,7 @@ public class BrokerMetadata implements Cloneable {
", jmxPort=" + jmx_port +
", version='" + version + '\'' +
", timestamp=" + timestamp +
", rack='" + rack + '\'' +
'}';
}
}