mirror of
https://github.com/didi/KnowStreaming.git
synced 2025-12-24 11:52:08 +08:00
梳理Task模块任务-BrokerMetrics任务梳理
This commit is contained in:
@@ -27,7 +27,6 @@ spring:
|
|||||||
main:
|
main:
|
||||||
allow-bean-definition-overriding: true
|
allow-bean-definition-overriding: true
|
||||||
|
|
||||||
|
|
||||||
servlet:
|
servlet:
|
||||||
multipart:
|
multipart:
|
||||||
max-file-size: 100MB
|
max-file-size: 100MB
|
||||||
@@ -37,28 +36,32 @@ logging:
|
|||||||
config: classpath:logback-spring.xml
|
config: classpath:logback-spring.xml
|
||||||
|
|
||||||
custom:
|
custom:
|
||||||
idc: cn # 部署的数据中心, 忽略该配置, 后续会进行删除
|
idc: cn
|
||||||
jmx:
|
|
||||||
max-conn: 10 # 2.3版本配置不在这个地方生效
|
|
||||||
store-metrics-task:
|
store-metrics-task:
|
||||||
community:
|
community:
|
||||||
broker-metrics-enabled: true # 社区部分broker metrics信息收集开关, 关闭之后metrics信息将不会进行收集及写DB
|
topic-metrics-enabled: true
|
||||||
topic-metrics-enabled: true # 社区部分topic的metrics信息收集开关, 关闭之后metrics信息将不会进行收集及写DB
|
didi: # 滴滴Kafka特有的指标
|
||||||
didi:
|
app-topic-metrics-enabled: false
|
||||||
app-topic-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭
|
topic-request-time-metrics-enabled: false
|
||||||
topic-request-time-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭
|
topic-throttled-metrics-enabled: false
|
||||||
topic-throttled-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭
|
|
||||||
|
|
||||||
# 任务相关的开关
|
# 任务相关的配置
|
||||||
task:
|
task:
|
||||||
op:
|
op:
|
||||||
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
|
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
|
||||||
order-auto-exec: # 工单自动化审批线程的开关
|
order-auto-exec: # 工单自动化审批线程的开关
|
||||||
topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启
|
topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启
|
||||||
app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启
|
app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启
|
||||||
metrics:
|
metrics:
|
||||||
delete-metrics:
|
collect: # 收集指标
|
||||||
delete-limit-size: 1000
|
broker-metrics-enabled: true # 收集Broker指标
|
||||||
|
sink: # 上报指标
|
||||||
|
cluster-metrics: # 上报cluster指标
|
||||||
|
sink-db-enabled: true # 上报到db
|
||||||
|
broker-metrics: # 上报broker指标
|
||||||
|
sink-db-enabled: true # 上报到db
|
||||||
|
delete: # 删除指标
|
||||||
|
delete-limit-size: 1000 # 单次删除的批大小
|
||||||
cluster-metrics-save-days: 14 # 集群指标保存天数
|
cluster-metrics-save-days: 14 # 集群指标保存天数
|
||||||
broker-metrics-save-days: 14 # Broker指标保存天数
|
broker-metrics-save-days: 14 # Broker指标保存天数
|
||||||
topic-metrics-save-days: 7 # Topic指标保存天数
|
topic-metrics-save-days: 7 # Topic指标保存天数
|
||||||
@@ -66,64 +69,6 @@ task:
|
|||||||
topic-throttled-metrics-save-days: 7 # Topic限流指标保存天数
|
topic-throttled-metrics-save-days: 7 # Topic限流指标保存天数
|
||||||
app-topic-metrics-save-days: 7 # App+Topic指标保存天数
|
app-topic-metrics-save-days: 7 # App+Topic指标保存天数
|
||||||
|
|
||||||
# ldap相关的配置
|
|
||||||
account:
|
|
||||||
ldap:
|
|
||||||
enabled: false
|
|
||||||
url: ldap://127.0.0.1:389/
|
|
||||||
basedn: dc=tsign,dc=cn
|
|
||||||
factory: com.sun.jndi.ldap.LdapCtxFactory
|
|
||||||
filter: sAMAccountName
|
|
||||||
security:
|
|
||||||
authentication: simple
|
|
||||||
principal: cn=admin,dc=tsign,dc=cn
|
|
||||||
credentials: admin
|
|
||||||
auth-user-registration: true
|
|
||||||
auth-user-registration-role: normal
|
|
||||||
|
|
||||||
# 集群升级部署相关的功能,需要配合夜莺及S3进行使用
|
|
||||||
kcm:
|
|
||||||
enabled: false
|
|
||||||
s3:
|
|
||||||
endpoint: s3.didiyunapi.com
|
|
||||||
access-key: 1234567890
|
|
||||||
secret-key: 0987654321
|
|
||||||
bucket: logi-kafka
|
|
||||||
n9e:
|
|
||||||
base-url: http://127.0.0.1:8004
|
|
||||||
user-token: 12345678
|
|
||||||
timeout: 300
|
|
||||||
account: root
|
|
||||||
script-file: kcm_script.sh
|
|
||||||
|
|
||||||
# 监控告警相关的功能,需要配合夜莺进行使用
|
|
||||||
# enabled: 表示是否开启监控告警的功能, true: 开启, false: 不开启
|
|
||||||
# n9e.nid: 夜莺的节点ID
|
|
||||||
# n9e.user-token: 用户的密钥,在夜莺的个人设置中
|
|
||||||
# n9e.mon.base-url: 监控地址
|
|
||||||
# n9e.sink.base-url: 数据上报地址
|
|
||||||
# n9e.rdb.base-url: 用户资源中心地址
|
|
||||||
|
|
||||||
monitor:
|
|
||||||
enabled: false
|
|
||||||
n9e:
|
|
||||||
nid: 2
|
|
||||||
user-token: 1234567890
|
|
||||||
mon:
|
|
||||||
base-url: http://127.0.0.1:8000 # 夜莺v4版本,默认端口统一调整为了8000
|
|
||||||
sink:
|
|
||||||
base-url: http://127.0.0.1:8000 # 夜莺v4版本,默认端口统一调整为了8000
|
|
||||||
rdb:
|
|
||||||
base-url: http://127.0.0.1:8000 # 夜莺v4版本,默认端口统一调整为了8000
|
|
||||||
|
|
||||||
|
|
||||||
notify: # 通知的功能
|
|
||||||
kafka: # 默认通知发送到kafka的指定Topic中
|
|
||||||
cluster-id: 95 # Topic的集群ID
|
|
||||||
topic-name: didi-kafka-notify # Topic名称
|
|
||||||
order: # 部署的KM的地址
|
|
||||||
detail-url: http://127.0.0.1
|
|
||||||
|
|
||||||
thread-pool:
|
thread-pool:
|
||||||
collect-metrics:
|
collect-metrics:
|
||||||
thread-num: 256 # 收集指标线程池大小
|
thread-num: 256 # 收集指标线程池大小
|
||||||
@@ -137,4 +82,51 @@ client-pool:
|
|||||||
min-idle-client-num: 24 # 最小空闲客户端数
|
min-idle-client-num: 24 # 最小空闲客户端数
|
||||||
max-idle-client-num: 24 # 最大空闲客户端数
|
max-idle-client-num: 24 # 最大空闲客户端数
|
||||||
max-total-client-num: 24 # 最大客户端数
|
max-total-client-num: 24 # 最大客户端数
|
||||||
borrow-timeout-unit-ms: 3000 # 租借超时时间,单位毫秒
|
borrow-timeout-unit-ms: 3000 # 租借超时时间,单位毫秒
|
||||||
|
|
||||||
|
account:
|
||||||
|
ldap:
|
||||||
|
enabled: false
|
||||||
|
url: ldap://127.0.0.1:389/
|
||||||
|
basedn: dc=tsign,dc=cn
|
||||||
|
factory: com.sun.jndi.ldap.LdapCtxFactory
|
||||||
|
filter: sAMAccountName
|
||||||
|
security:
|
||||||
|
authentication: simple
|
||||||
|
principal: cn=admin,dc=tsign,dc=cn
|
||||||
|
credentials: admin
|
||||||
|
auth-user-registration: true
|
||||||
|
auth-user-registration-role: normal
|
||||||
|
|
||||||
|
kcm:
|
||||||
|
enabled: false
|
||||||
|
s3:
|
||||||
|
endpoint: s3.didiyunapi.com
|
||||||
|
access-key: 1234567890
|
||||||
|
secret-key: 0987654321
|
||||||
|
bucket: logi-kafka
|
||||||
|
n9e:
|
||||||
|
base-url: http://127.0.0.1:8004
|
||||||
|
user-token: 12345678
|
||||||
|
timeout: 300
|
||||||
|
account: root
|
||||||
|
script-file: kcm_script.sh
|
||||||
|
|
||||||
|
monitor:
|
||||||
|
enabled: false
|
||||||
|
n9e:
|
||||||
|
nid: 2
|
||||||
|
user-token: 1234567890
|
||||||
|
mon:
|
||||||
|
base-url: http://127.0.0.1:8000 # 夜莺v4版本,默认端口统一调整为了8000
|
||||||
|
sink:
|
||||||
|
base-url: http://127.0.0.1:8000 # 夜莺v4版本,默认端口统一调整为了8000
|
||||||
|
rdb:
|
||||||
|
base-url: http://127.0.0.1:8000 # 夜莺v4版本,默认端口统一调整为了8000
|
||||||
|
|
||||||
|
notify:
|
||||||
|
kafka:
|
||||||
|
cluster-id: 95
|
||||||
|
topic-name: didi-kafka-notify
|
||||||
|
order:
|
||||||
|
detail-url: http://127.0.0.1
|
||||||
|
|||||||
@@ -0,0 +1,33 @@
|
|||||||
|
package com.xiaojukeji.kafka.manager.common.events.metrics;
|
||||||
|
|
||||||
|
import org.springframework.context.ApplicationEvent;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @author zengqiao
|
||||||
|
* @date 22/01/17
|
||||||
|
*/
|
||||||
|
public class BaseMetricsCollectedEvent extends ApplicationEvent {
|
||||||
|
/**
|
||||||
|
* 物理集群ID
|
||||||
|
*/
|
||||||
|
protected final Long physicalClusterId;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 收集时间,依据业务需要来设置,可以设置任务开始时间,也可以设置任务结束时间
|
||||||
|
*/
|
||||||
|
protected final Long collectTime;
|
||||||
|
|
||||||
|
public BaseMetricsCollectedEvent(Object source, Long physicalClusterId, Long collectTime) {
|
||||||
|
super(source);
|
||||||
|
this.physicalClusterId = physicalClusterId;
|
||||||
|
this.collectTime = collectTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Long getPhysicalClusterId() {
|
||||||
|
return physicalClusterId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Long getCollectTime() {
|
||||||
|
return collectTime;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,22 @@
|
|||||||
|
package com.xiaojukeji.kafka.manager.common.events.metrics;
|
||||||
|
|
||||||
|
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @author zengqiao
|
||||||
|
* @date 20/8/31
|
||||||
|
*/
|
||||||
|
public class BatchBrokerMetricsCollectedEvent extends BaseMetricsCollectedEvent {
|
||||||
|
private final List<BrokerMetrics> metricsList;
|
||||||
|
|
||||||
|
public BatchBrokerMetricsCollectedEvent(Object source, Long physicalClusterId, Long collectTime, List<BrokerMetrics> metricsList) {
|
||||||
|
super(source, physicalClusterId, collectTime);
|
||||||
|
this.metricsList = metricsList;
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<BrokerMetrics> getMetricsList() {
|
||||||
|
return metricsList;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
package com.xiaojukeji.kafka.manager.task.component;
|
package com.xiaojukeji.kafka.manager.task.component;
|
||||||
|
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
|
|
||||||
import com.xiaojukeji.kafka.manager.common.utils.factory.DefaultThreadFactory;
|
import com.xiaojukeji.kafka.manager.common.utils.factory.DefaultThreadFactory;
|
||||||
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
|
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
|
||||||
import com.xiaojukeji.kafka.manager.common.utils.NetUtils;
|
import com.xiaojukeji.kafka.manager.common.utils.NetUtils;
|
||||||
@@ -29,7 +28,7 @@ import java.util.concurrent.*;
|
|||||||
* @date 20/8/10
|
* @date 20/8/10
|
||||||
*/
|
*/
|
||||||
public abstract class AbstractScheduledTask<E extends Comparable> implements SchedulingConfigurer {
|
public abstract class AbstractScheduledTask<E extends Comparable> implements SchedulingConfigurer {
|
||||||
private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
|
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractScheduledTask.class);
|
||||||
|
|
||||||
@Autowired
|
@Autowired
|
||||||
private HeartbeatDao heartbeatDao;
|
private HeartbeatDao heartbeatDao;
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
package com.xiaojukeji.kafka.manager.task.component;
|
package com.xiaojukeji.kafka.manager.task.component;
|
||||||
|
|
||||||
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
|
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
@@ -9,11 +8,11 @@ import org.slf4j.LoggerFactory;
|
|||||||
* @date 20/8/10
|
* @date 20/8/10
|
||||||
*/
|
*/
|
||||||
public class BaseBizTask<E extends Comparable> implements Runnable {
|
public class BaseBizTask<E extends Comparable> implements Runnable {
|
||||||
private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
|
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractScheduledTask.class);
|
||||||
|
|
||||||
private E task;
|
private final E task;
|
||||||
|
|
||||||
private AbstractScheduledTask scheduledTask;
|
private final AbstractScheduledTask scheduledTask;
|
||||||
|
|
||||||
public BaseBizTask(E task, AbstractScheduledTask scheduledTask) {
|
public BaseBizTask(E task, AbstractScheduledTask scheduledTask) {
|
||||||
this.task = task;
|
this.task = task;
|
||||||
@@ -30,6 +29,7 @@ public class BaseBizTask<E extends Comparable> implements Runnable {
|
|||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
LOGGER.error("scheduled task scheduleName:{} execute failed, task:{}", scheduledTask.getScheduledName(), task, t);
|
LOGGER.error("scheduled task scheduleName:{} execute failed, task:{}", scheduledTask.getScheduledName(), task, t);
|
||||||
}
|
}
|
||||||
|
|
||||||
LOGGER.info("scheduled task scheduleName:{} finished, cost-time:{}ms.", scheduledTask.getScheduledName(), System.currentTimeMillis() - startTime);
|
LOGGER.info("scheduled task scheduleName:{} finished, cost-time:{}ms.", scheduledTask.getScheduledName(), System.currentTimeMillis() - startTime);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -0,0 +1,93 @@
|
|||||||
|
package com.xiaojukeji.kafka.manager.task.dispatch.metrics.collect;
|
||||||
|
|
||||||
|
import com.xiaojukeji.kafka.manager.common.constant.KafkaMetricsCollections;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.events.metrics.BatchBrokerMetricsCollectedEvent;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.utils.SpringTool;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConstant;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
|
||||||
|
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
|
||||||
|
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
|
||||||
|
import com.xiaojukeji.kafka.manager.service.service.JmxService;
|
||||||
|
import com.xiaojukeji.kafka.manager.service.strategy.AbstractHealthScoreStrategy;
|
||||||
|
import com.xiaojukeji.kafka.manager.task.component.AbstractScheduledTask;
|
||||||
|
import com.xiaojukeji.kafka.manager.task.component.CustomScheduled;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Broker指标信息收集
|
||||||
|
* @author zengqiao
|
||||||
|
* @date 20/5/7
|
||||||
|
*/
|
||||||
|
@CustomScheduled(name = "collectAndPublishBrokerMetrics", cron = "21 0/1 * * * ?", threadNum = 2)
|
||||||
|
@ConditionalOnProperty(prefix = "task.metrics.collect", name = "broker-metrics-enabled", havingValue = "true", matchIfMissing = true)
|
||||||
|
public class CollectAndPublishBrokerMetrics extends AbstractScheduledTask<ClusterDO> {
|
||||||
|
private static final Logger LOGGER = LoggerFactory.getLogger(CollectAndPublishBrokerMetrics.class);
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private JmxService jmxService;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private ClusterService clusterService;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private AbstractHealthScoreStrategy healthScoreStrategy;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected List<ClusterDO> listAllTasks() {
|
||||||
|
return clusterService.list();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processTask(ClusterDO clusterDO) {
|
||||||
|
long startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
|
try {
|
||||||
|
SpringTool.publish(new BatchBrokerMetricsCollectedEvent(
|
||||||
|
this,
|
||||||
|
clusterDO.getId(),
|
||||||
|
startTime,
|
||||||
|
this.getBrokerMetrics(clusterDO.getId()))
|
||||||
|
);
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOGGER.error("collect broker-metrics failed, physicalClusterId:{}.", clusterDO.getId(), e);
|
||||||
|
}
|
||||||
|
|
||||||
|
LOGGER.info("collect broker-metrics finished, physicalClusterId:{} costTime:{}", clusterDO.getId(), System.currentTimeMillis() - startTime);
|
||||||
|
}
|
||||||
|
|
||||||
|
private List<BrokerMetrics> getBrokerMetrics(Long clusterId) {
|
||||||
|
List<BrokerMetrics> metricsList = new ArrayList<>();
|
||||||
|
for (Integer brokerId: PhysicalClusterMetadataManager.getBrokerIdList(clusterId)) {
|
||||||
|
BrokerMetrics metrics = jmxService.getBrokerMetrics(
|
||||||
|
clusterId,
|
||||||
|
brokerId,
|
||||||
|
KafkaMetricsCollections.BROKER_TO_DB_METRICS
|
||||||
|
);
|
||||||
|
|
||||||
|
if (ValidateUtils.isNull(metrics)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics.getMetricsMap().put(
|
||||||
|
JmxConstant.HEALTH_SCORE,
|
||||||
|
healthScoreStrategy.calBrokerHealthScore(clusterId, brokerId, metrics)
|
||||||
|
);
|
||||||
|
|
||||||
|
metricsList.add(metrics);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ValidateUtils.isEmptyList(metricsList)) {
|
||||||
|
return new ArrayList<>();
|
||||||
|
}
|
||||||
|
|
||||||
|
return metricsList;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -42,25 +42,25 @@ public class DeleteMetrics extends AbstractScheduledTask<EmptyEntry> {
|
|||||||
@Autowired
|
@Autowired
|
||||||
private TopicThrottledMetricsDao topicThrottledMetricsDao;
|
private TopicThrottledMetricsDao topicThrottledMetricsDao;
|
||||||
|
|
||||||
@Value(value = "${task.metrics.delete-metrics.delete-limit-size:1000}")
|
@Value(value = "${task.metrics.delete.delete-limit-size:1000}")
|
||||||
private Integer deleteLimitSize;
|
private Integer deleteLimitSize;
|
||||||
|
|
||||||
@Value(value = "${task.metrics.delete-metrics.cluster-metrics-save-days:14}")
|
@Value(value = "${task.metrics.delete.cluster-metrics-save-days:14}")
|
||||||
private Integer clusterMetricsSaveDays;
|
private Integer clusterMetricsSaveDays;
|
||||||
|
|
||||||
@Value(value = "${task.metrics.delete-metrics.broker-metrics-save-days:14}")
|
@Value(value = "${task.metrics.delete.broker-metrics-save-days:14}")
|
||||||
private Integer brokerMetricsSaveDays;
|
private Integer brokerMetricsSaveDays;
|
||||||
|
|
||||||
@Value(value = "${task.metrics.delete-metrics.topic-metrics-save-days:7}")
|
@Value(value = "${task.metrics.delete.topic-metrics-save-days:7}")
|
||||||
private Integer topicMetricsSaveDays;
|
private Integer topicMetricsSaveDays;
|
||||||
|
|
||||||
@Value(value = "${task.metrics.delete-metrics.topic-request-time-metrics-save-days:7}")
|
@Value(value = "${task.metrics.delete.topic-request-time-metrics-save-days:7}")
|
||||||
private Integer topicRequestTimeMetricsSaveDays;
|
private Integer topicRequestTimeMetricsSaveDays;
|
||||||
|
|
||||||
@Value(value = "${task.metrics.delete-metrics.topic-throttled-metrics-save-days:7}")
|
@Value(value = "${task.metrics.delete.topic-throttled-metrics-save-days:7}")
|
||||||
private Integer topicThrottledMetricsSaveDays;
|
private Integer topicThrottledMetricsSaveDays;
|
||||||
|
|
||||||
@Value(value = "${task.metrics.delete-metrics.app-topic-metrics-save-days:7}")
|
@Value(value = "${task.metrics.delete.app-topic-metrics-save-days:7}")
|
||||||
private Integer appTopicMetricsSaveDays;
|
private Integer appTopicMetricsSaveDays;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|||||||
@@ -1,146 +0,0 @@
|
|||||||
package com.xiaojukeji.kafka.manager.task.dispatch.metrics.store;
|
|
||||||
|
|
||||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
|
||||||
import com.xiaojukeji.kafka.manager.common.constant.KafkaMetricsCollections;
|
|
||||||
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
|
|
||||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.BaseMetrics;
|
|
||||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
|
||||||
import com.xiaojukeji.kafka.manager.common.entity.metrics.ClusterMetrics;
|
|
||||||
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
|
||||||
import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConstant;
|
|
||||||
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
|
|
||||||
import com.xiaojukeji.kafka.manager.dao.BrokerMetricsDao;
|
|
||||||
import com.xiaojukeji.kafka.manager.dao.ClusterMetricsDao;
|
|
||||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.BrokerMetricsDO;
|
|
||||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
|
|
||||||
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterMetricsDO;
|
|
||||||
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
|
|
||||||
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
|
|
||||||
import com.xiaojukeji.kafka.manager.service.service.JmxService;
|
|
||||||
import com.xiaojukeji.kafka.manager.service.strategy.AbstractHealthScoreStrategy;
|
|
||||||
import com.xiaojukeji.kafka.manager.service.utils.MetricsConvertUtils;
|
|
||||||
import com.xiaojukeji.kafka.manager.task.component.AbstractScheduledTask;
|
|
||||||
import com.xiaojukeji.kafka.manager.task.component.CustomScheduled;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
import org.springframework.beans.factory.annotation.Autowired;
|
|
||||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
|
||||||
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Broker指标信息存DB, Broker流量, 集群流量
|
|
||||||
* @author zengqiao
|
|
||||||
* @date 20/5/7
|
|
||||||
*/
|
|
||||||
@CustomScheduled(name = "storeBrokerMetrics", cron = "21 0/1 * * * ?", threadNum = 2)
|
|
||||||
@ConditionalOnProperty(prefix = "custom.store-metrics-task.community", name = "broker-metrics-enabled", havingValue = "true", matchIfMissing = true)
|
|
||||||
public class StoreBrokerMetrics extends AbstractScheduledTask<ClusterDO> {
|
|
||||||
private static final Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
|
|
||||||
|
|
||||||
@Autowired
|
|
||||||
private JmxService jmxService;
|
|
||||||
|
|
||||||
@Autowired
|
|
||||||
private ClusterService clusterService;
|
|
||||||
|
|
||||||
@Autowired
|
|
||||||
private BrokerMetricsDao brokerMetricsDao;
|
|
||||||
|
|
||||||
@Autowired
|
|
||||||
private ClusterMetricsDao clusterMetricsDao;
|
|
||||||
|
|
||||||
@Autowired
|
|
||||||
private AbstractHealthScoreStrategy healthScoreStrategy;
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected List<ClusterDO> listAllTasks() {
|
|
||||||
return clusterService.list();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void processTask(ClusterDO clusterDO) {
|
|
||||||
long startTime = System.currentTimeMillis();
|
|
||||||
List<ClusterMetrics> clusterMetricsList = new ArrayList<>();
|
|
||||||
|
|
||||||
try {
|
|
||||||
List<BrokerMetrics> brokerMetricsList = getAndBatchAddMetrics(startTime, clusterDO.getId());
|
|
||||||
clusterMetricsList.add(supplyAndConvert2ClusterMetrics(
|
|
||||||
clusterDO.getId(),
|
|
||||||
MetricsConvertUtils.merge2BaseMetricsByAdd(brokerMetricsList))
|
|
||||||
);
|
|
||||||
} catch (Exception t) {
|
|
||||||
LOGGER.error("collect failed, clusterId:{}.", clusterDO.getId(), t);
|
|
||||||
}
|
|
||||||
long endTime = System.currentTimeMillis();
|
|
||||||
LOGGER.info("collect finish, clusterId:{} costTime:{}", clusterDO.getId(), endTime - startTime);
|
|
||||||
|
|
||||||
List<ClusterMetricsDO> doList = MetricsConvertUtils.convertAndUpdateCreateTime2ClusterMetricsDOList(
|
|
||||||
startTime,
|
|
||||||
clusterMetricsList
|
|
||||||
);
|
|
||||||
|
|
||||||
if (ValidateUtils.isEmptyList(doList)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
clusterMetricsDao.batchAdd(doList);
|
|
||||||
}
|
|
||||||
|
|
||||||
private List<BrokerMetrics> getAndBatchAddMetrics(Long startTime, Long clusterId) {
|
|
||||||
List<BrokerMetrics> metricsList = new ArrayList<>();
|
|
||||||
for (Integer brokerId: PhysicalClusterMetadataManager.getBrokerIdList(clusterId)) {
|
|
||||||
BrokerMetrics metrics = jmxService.getBrokerMetrics(
|
|
||||||
clusterId,
|
|
||||||
brokerId,
|
|
||||||
KafkaMetricsCollections.BROKER_TO_DB_METRICS
|
|
||||||
);
|
|
||||||
if (ValidateUtils.isNull(metrics)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
metrics.getMetricsMap().put(
|
|
||||||
JmxConstant.HEALTH_SCORE,
|
|
||||||
healthScoreStrategy.calBrokerHealthScore(clusterId, brokerId, metrics)
|
|
||||||
);
|
|
||||||
metricsList.add(metrics);
|
|
||||||
}
|
|
||||||
if (ValidateUtils.isEmptyList(metricsList)) {
|
|
||||||
return new ArrayList<>();
|
|
||||||
}
|
|
||||||
|
|
||||||
List<BrokerMetricsDO> doList =
|
|
||||||
MetricsConvertUtils.convertAndUpdateCreateTime2BrokerMetricsDOList(startTime, metricsList);
|
|
||||||
int i = 0;
|
|
||||||
do {
|
|
||||||
List<BrokerMetricsDO> subDOList = doList.subList(i, Math.min(i + Constant.BATCH_INSERT_SIZE, doList.size()));
|
|
||||||
if (ValidateUtils.isEmptyList(subDOList)) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
brokerMetricsDao.batchAdd(subDOList);
|
|
||||||
i += Constant.BATCH_INSERT_SIZE;
|
|
||||||
} while (i < doList.size());
|
|
||||||
|
|
||||||
return metricsList;
|
|
||||||
}
|
|
||||||
|
|
||||||
private ClusterMetrics supplyAndConvert2ClusterMetrics(Long clusterId, BaseMetrics baseMetrics) {
|
|
||||||
ClusterMetrics metrics = new ClusterMetrics(clusterId);
|
|
||||||
Map<String, Object> metricsMap = metrics.getMetricsMap();
|
|
||||||
metricsMap.putAll(baseMetrics.getMetricsMap());
|
|
||||||
metricsMap.put(JmxConstant.TOPIC_NUM, PhysicalClusterMetadataManager.getTopicNameList(clusterId).size());
|
|
||||||
metricsMap.put(JmxConstant.BROKER_NUM, PhysicalClusterMetadataManager.getBrokerIdList(clusterId).size());
|
|
||||||
Integer partitionNum = 0;
|
|
||||||
for (String topicName : PhysicalClusterMetadataManager.getTopicNameList(clusterId)) {
|
|
||||||
TopicMetadata topicMetaData = PhysicalClusterMetadataManager.getTopicMetadata(clusterId, topicName);
|
|
||||||
if (ValidateUtils.isNull(topicMetaData)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
partitionNum += topicMetaData.getPartitionNum();
|
|
||||||
}
|
|
||||||
metricsMap.put(JmxConstant.PARTITION_NUM, partitionNum);
|
|
||||||
return metrics;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,55 @@
|
|||||||
|
package com.xiaojukeji.kafka.manager.task.listener.sink.db;
|
||||||
|
|
||||||
|
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.entity.pojo.BrokerMetricsDO;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.events.metrics.BatchBrokerMetricsCollectedEvent;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||||
|
import com.xiaojukeji.kafka.manager.dao.BrokerMetricsDao;
|
||||||
|
import com.xiaojukeji.kafka.manager.service.utils.MetricsConvertUtils;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||||
|
import org.springframework.context.ApplicationListener;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @author zengqiao
|
||||||
|
* @date 22/01/17
|
||||||
|
*/
|
||||||
|
@Component
|
||||||
|
@ConditionalOnProperty(prefix = "task.metrics.sink.broker-metrics", name = "sink-db-enabled", havingValue = "true", matchIfMissing = true)
|
||||||
|
public class SinkBrokerMetrics2DB implements ApplicationListener<BatchBrokerMetricsCollectedEvent> {
|
||||||
|
private static final Logger logger = LoggerFactory.getLogger(SinkBrokerMetrics2DB.class);
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private BrokerMetricsDao metricsDao;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onApplicationEvent(BatchBrokerMetricsCollectedEvent event) {
|
||||||
|
logger.debug("sink broker-metrics to db start, event:{}.", event);
|
||||||
|
|
||||||
|
List<BrokerMetrics> metricsList = event.getMetricsList();
|
||||||
|
if (ValidateUtils.isEmptyList(metricsList)) {
|
||||||
|
logger.warn("sink broker-metrics to db finished, without need sink, event:{}.", event);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
List<BrokerMetricsDO> doList = MetricsConvertUtils.convertAndUpdateCreateTime2BrokerMetricsDOList(event.getCollectTime(), metricsList);
|
||||||
|
int i = 0;
|
||||||
|
while (i < doList.size()) {
|
||||||
|
List<BrokerMetricsDO> subDOList = doList.subList(i, Math.min(i + Constant.BATCH_INSERT_SIZE, doList.size()));
|
||||||
|
if (ValidateUtils.isEmptyList(subDOList)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
metricsDao.batchAdd(subDOList);
|
||||||
|
i += Constant.BATCH_INSERT_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug("sink broker-metrics to db finished, event:{}.", event);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,80 @@
|
|||||||
|
package com.xiaojukeji.kafka.manager.task.listener.sink.db;
|
||||||
|
|
||||||
|
import com.xiaojukeji.kafka.manager.common.entity.metrics.BaseMetrics;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.entity.metrics.ClusterMetrics;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterMetricsDO;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.events.metrics.BatchBrokerMetricsCollectedEvent;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConstant;
|
||||||
|
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
|
||||||
|
import com.xiaojukeji.kafka.manager.dao.ClusterMetricsDao;
|
||||||
|
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
|
||||||
|
import com.xiaojukeji.kafka.manager.service.utils.MetricsConvertUtils;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||||
|
import org.springframework.context.ApplicationListener;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @author zengqiao
|
||||||
|
* @date 22/01/17
|
||||||
|
*/
|
||||||
|
@Component
|
||||||
|
@ConditionalOnProperty(prefix = "task.metrics.sink.cluster-metrics", name = "sink-db-enabled", havingValue = "true", matchIfMissing = true)
|
||||||
|
public class SinkClusterMetrics2DB implements ApplicationListener<BatchBrokerMetricsCollectedEvent> {
|
||||||
|
private static final Logger logger = LoggerFactory.getLogger(SinkClusterMetrics2DB.class);
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private ClusterMetricsDao clusterMetricsDao;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onApplicationEvent(BatchBrokerMetricsCollectedEvent event) {
|
||||||
|
logger.debug("sink cluster-metrics to db start, event:{}.", event);
|
||||||
|
|
||||||
|
List<BrokerMetrics> metricsList = event.getMetricsList();
|
||||||
|
if (ValidateUtils.isEmptyList(metricsList)) {
|
||||||
|
logger.warn("sink cluster-metrics to db finished, without need sink, event:{}.", event);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
List<ClusterMetricsDO> doList = MetricsConvertUtils.convertAndUpdateCreateTime2ClusterMetricsDOList(
|
||||||
|
event.getCollectTime(),
|
||||||
|
// 合并broker-metrics为cluster-metrics
|
||||||
|
Arrays.asList(supplyAndConvert2ClusterMetrics(event.getPhysicalClusterId(), MetricsConvertUtils.merge2BaseMetricsByAdd(event.getMetricsList())))
|
||||||
|
);
|
||||||
|
|
||||||
|
if (ValidateUtils.isEmptyList(doList)) {
|
||||||
|
logger.warn("sink cluster-metrics to db finished, without need sink, event:{}.", event);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
clusterMetricsDao.batchAdd(doList);
|
||||||
|
|
||||||
|
logger.debug("sink cluster-metrics to db finished, event:{}.", event);
|
||||||
|
}
|
||||||
|
|
||||||
|
private ClusterMetrics supplyAndConvert2ClusterMetrics(Long clusterId, BaseMetrics baseMetrics) {
|
||||||
|
ClusterMetrics metrics = new ClusterMetrics(clusterId);
|
||||||
|
Map<String, Object> metricsMap = metrics.getMetricsMap();
|
||||||
|
metricsMap.putAll(baseMetrics.getMetricsMap());
|
||||||
|
metricsMap.put(JmxConstant.TOPIC_NUM, PhysicalClusterMetadataManager.getTopicNameList(clusterId).size());
|
||||||
|
metricsMap.put(JmxConstant.BROKER_NUM, PhysicalClusterMetadataManager.getBrokerIdList(clusterId).size());
|
||||||
|
Integer partitionNum = 0;
|
||||||
|
for (String topicName : PhysicalClusterMetadataManager.getTopicNameList(clusterId)) {
|
||||||
|
TopicMetadata topicMetaData = PhysicalClusterMetadataManager.getTopicMetadata(clusterId, topicName);
|
||||||
|
if (ValidateUtils.isNull(topicMetaData)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
partitionNum += topicMetaData.getPartitionNum();
|
||||||
|
}
|
||||||
|
metricsMap.put(JmxConstant.PARTITION_NUM, partitionNum);
|
||||||
|
return metrics;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package com.xiaojukeji.kafka.manager.task.listener;
|
package com.xiaojukeji.kafka.manager.task.listener.sink.db;
|
||||||
|
|
||||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||||
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
|
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package com.xiaojukeji.kafka.manager.task.listener;
|
package com.xiaojukeji.kafka.manager.task.listener.sink.db;
|
||||||
|
|
||||||
import com.xiaojukeji.kafka.manager.common.bizenum.KafkaClientEnum;
|
import com.xiaojukeji.kafka.manager.common.bizenum.KafkaClientEnum;
|
||||||
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
import com.xiaojukeji.kafka.manager.common.constant.Constant;
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package com.xiaojukeji.kafka.manager.task.listener;
|
package com.xiaojukeji.kafka.manager.task.listener.sink.kafka;
|
||||||
|
|
||||||
import com.xiaojukeji.kafka.manager.common.constant.ConfigConstant;
|
import com.xiaojukeji.kafka.manager.common.constant.ConfigConstant;
|
||||||
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
|
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package com.xiaojukeji.kafka.manager.task.listener;
|
package com.xiaojukeji.kafka.manager.task.listener.sink.kafka;
|
||||||
|
|
||||||
import com.xiaojukeji.kafka.manager.common.constant.ConfigConstant;
|
import com.xiaojukeji.kafka.manager.common.constant.ConfigConstant;
|
||||||
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
|
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package com.xiaojukeji.kafka.manager.task.listener;
|
package com.xiaojukeji.kafka.manager.task.listener.sink.monitor;
|
||||||
|
|
||||||
import com.xiaojukeji.kafka.manager.monitor.common.entry.bizenum.MonitorMetricNameEnum;
|
import com.xiaojukeji.kafka.manager.monitor.common.entry.bizenum.MonitorMetricNameEnum;
|
||||||
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
|
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package com.xiaojukeji.kafka.manager.task.listener;
|
package com.xiaojukeji.kafka.manager.task.listener.sink.monitor;
|
||||||
|
|
||||||
import com.xiaojukeji.kafka.manager.monitor.common.entry.bizenum.MonitorMetricNameEnum;
|
import com.xiaojukeji.kafka.manager.monitor.common.entry.bizenum.MonitorMetricNameEnum;
|
||||||
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
|
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package com.xiaojukeji.kafka.manager.task.listener;
|
package com.xiaojukeji.kafka.manager.task.listener.sink.monitor;
|
||||||
|
|
||||||
import com.xiaojukeji.kafka.manager.common.bizenum.KafkaClientEnum;
|
import com.xiaojukeji.kafka.manager.common.bizenum.KafkaClientEnum;
|
||||||
import com.xiaojukeji.kafka.manager.monitor.common.MonitorSinkConstant;
|
import com.xiaojukeji.kafka.manager.monitor.common.MonitorSinkConstant;
|
||||||
@@ -33,7 +33,6 @@ custom:
|
|||||||
idc: cn
|
idc: cn
|
||||||
store-metrics-task:
|
store-metrics-task:
|
||||||
community:
|
community:
|
||||||
broker-metrics-enabled: true
|
|
||||||
topic-metrics-enabled: true
|
topic-metrics-enabled: true
|
||||||
didi: # 滴滴Kafka特有的指标
|
didi: # 滴滴Kafka特有的指标
|
||||||
app-topic-metrics-enabled: false
|
app-topic-metrics-enabled: false
|
||||||
@@ -43,13 +42,20 @@ custom:
|
|||||||
# 任务相关的配置
|
# 任务相关的配置
|
||||||
task:
|
task:
|
||||||
op:
|
op:
|
||||||
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
|
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
|
||||||
order-auto-exec: # 工单自动化审批线程的开关
|
order-auto-exec: # 工单自动化审批线程的开关
|
||||||
topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启
|
topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启
|
||||||
app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启
|
app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启
|
||||||
metrics:
|
metrics:
|
||||||
delete-metrics:
|
collect: # 收集指标
|
||||||
delete-limit-size: 1000
|
broker-metrics-enabled: true # 收集Broker指标
|
||||||
|
sink: # 上报指标
|
||||||
|
cluster-metrics: # 上报cluster指标
|
||||||
|
sink-db-enabled: true # 上报到db
|
||||||
|
broker-metrics: # 上报broker指标
|
||||||
|
sink-db-enabled: true # 上报到db
|
||||||
|
delete: # 删除指标
|
||||||
|
delete-limit-size: 1000 # 单次删除的批大小
|
||||||
cluster-metrics-save-days: 14 # 集群指标保存天数
|
cluster-metrics-save-days: 14 # 集群指标保存天数
|
||||||
broker-metrics-save-days: 14 # Broker指标保存天数
|
broker-metrics-save-days: 14 # Broker指标保存天数
|
||||||
topic-metrics-save-days: 7 # Topic指标保存天数
|
topic-metrics-save-days: 7 # Topic指标保存天数
|
||||||
@@ -57,6 +63,21 @@ task:
|
|||||||
topic-throttled-metrics-save-days: 7 # Topic限流指标保存天数
|
topic-throttled-metrics-save-days: 7 # Topic限流指标保存天数
|
||||||
app-topic-metrics-save-days: 7 # App+Topic指标保存天数
|
app-topic-metrics-save-days: 7 # App+Topic指标保存天数
|
||||||
|
|
||||||
|
thread-pool:
|
||||||
|
collect-metrics:
|
||||||
|
thread-num: 256 # 收集指标线程池大小
|
||||||
|
queue-size: 5000 # 收集指标线程池的queue大小
|
||||||
|
api-call:
|
||||||
|
thread-num: 16 # api服务线程池大小
|
||||||
|
queue-size: 5000 # api服务线程池的queue大小
|
||||||
|
|
||||||
|
client-pool:
|
||||||
|
kafka-consumer:
|
||||||
|
min-idle-client-num: 24 # 最小空闲客户端数
|
||||||
|
max-idle-client-num: 24 # 最大空闲客户端数
|
||||||
|
max-total-client-num: 24 # 最大客户端数
|
||||||
|
borrow-timeout-unit-ms: 3000 # 租借超时时间,单位毫秒
|
||||||
|
|
||||||
account:
|
account:
|
||||||
ldap:
|
ldap:
|
||||||
enabled: false
|
enabled: false
|
||||||
@@ -103,18 +124,3 @@ notify:
|
|||||||
topic-name: didi-kafka-notify
|
topic-name: didi-kafka-notify
|
||||||
order:
|
order:
|
||||||
detail-url: http://127.0.0.1
|
detail-url: http://127.0.0.1
|
||||||
|
|
||||||
thread-pool:
|
|
||||||
collect-metrics:
|
|
||||||
thread-num: 256 # 收集指标线程池大小
|
|
||||||
queue-size: 5000 # 收集指标线程池的queue大小
|
|
||||||
api-call:
|
|
||||||
thread-num: 16 # api服务线程池大小
|
|
||||||
queue-size: 5000 # api服务线程池的queue大小
|
|
||||||
|
|
||||||
client-pool:
|
|
||||||
kafka-consumer:
|
|
||||||
min-idle-client-num: 24 # 最小空闲客户端数
|
|
||||||
max-idle-client-num: 24 # 最大空闲客户端数
|
|
||||||
max-total-client-num: 24 # 最大客户端数
|
|
||||||
borrow-timeout-unit-ms: 3000 # 租借超时时间,单位毫秒
|
|
||||||
|
|||||||
Reference in New Issue
Block a user