[Optimize]统一日志格式-part1(#800)

This commit is contained in:
zengqiao
2022-12-02 14:39:57 +08:00
parent 6241eb052a
commit 175b8d643a
8 changed files with 21 additions and 21 deletions

View File

@@ -94,7 +94,7 @@ public class ClusterZookeepersManagerImpl implements ClusterZookeepersManager {
); );
if (metricsResult.failed()) { if (metricsResult.failed()) {
LOGGER.error( LOGGER.error(
"class=ClusterZookeepersManagerImpl||method=getClusterPhyZookeepersState||clusterPhyId={}||errMsg={}", "method=getClusterPhyZookeepersState||clusterPhyId={}||errMsg={}",
clusterPhyId, metricsResult.getMessage() clusterPhyId, metricsResult.getMessage()
); );
return Result.buildSuc(vo); return Result.buildSuc(vo);

View File

@@ -140,7 +140,7 @@ public class KafkaControllerServiceImpl implements KafkaControllerService {
try { try {
adminClient = kafkaAdminClient.getClient(clusterPhy.getId()); adminClient = kafkaAdminClient.getClient(clusterPhy.getId());
} catch (Exception e) { } catch (Exception e) {
log.error("class=KafkaControllerServiceImpl||method=getControllerFromAdminClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e); log.error("method=getControllerFromAdminClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
// 集群已经加载进来但是创建admin-client失败则设置无controller // 集群已经加载进来但是创建admin-client失败则设置无controller
return Result.buildSuc(); return Result.buildSuc();
@@ -178,7 +178,7 @@ public class KafkaControllerServiceImpl implements KafkaControllerService {
)); ));
} catch (Exception e) { } catch (Exception e) {
log.error( log.error(
"class=KafkaControllerServiceImpl||method=getControllerFromAdminClient||clusterPhyId={}||tryTime={}||errMsg=exception", "method=getControllerFromAdminClient||clusterPhyId={}||tryTime={}||errMsg=exception",
clusterPhy.getId(), i, e clusterPhy.getId(), i, e
); );
} }
@@ -192,7 +192,7 @@ public class KafkaControllerServiceImpl implements KafkaControllerService {
try { try {
return Result.buildSuc(kafkaZKDAO.getKafkaController(clusterPhy.getId(), false)); return Result.buildSuc(kafkaZKDAO.getKafkaController(clusterPhy.getId(), false));
} catch (Exception e) { } catch (Exception e) {
log.error("class=KafkaControllerServiceImpl||method=getControllerFromZKClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e); log.error("method=getControllerFromZKClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage()); return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
} }

View File

@@ -84,7 +84,7 @@ public class OpPartitionServiceImpl extends BaseVersionControlService implements
return Result.buildSuc(); return Result.buildSuc();
} catch (Exception e) { } catch (Exception e) {
LOGGER.error( LOGGER.error(
"class=OpPartitionServiceImpl||method=preferredReplicaElectionByZKClient||clusterPhyId={}||errMsg=exception", "method=preferredReplicaElectionByZKClient||clusterPhyId={}||errMsg=exception",
partitionParam.getClusterPhyId(), e partitionParam.getClusterPhyId(), e
); );
@@ -109,7 +109,7 @@ public class OpPartitionServiceImpl extends BaseVersionControlService implements
return Result.buildSuc(); return Result.buildSuc();
} catch (Exception e) { } catch (Exception e) {
LOGGER.error( LOGGER.error(
"class=OpPartitionServiceImpl||method=preferredReplicaElectionByKafkaClient||clusterPhyId={}||errMsg=exception", "method=preferredReplicaElectionByKafkaClient||clusterPhyId={}||errMsg=exception",
partitionParam.getClusterPhyId(), e partitionParam.getClusterPhyId(), e
); );

View File

@@ -191,7 +191,7 @@ public class PartitionMetricServiceImpl extends BaseMetricService implements Par
} }
} else { } else {
LOGGER.warn( LOGGER.warn(
"class=PartitionMetricServiceImpl||method=getOffsetRelevantMetrics||clusterPhyId={}||topicName={}||resultMsg={}||msg=get begin offset failed", "method=getOffsetRelevantMetrics||clusterPhyId={}||topicName={}||resultMsg={}||msg=get begin offset failed",
clusterPhyId, topicName, beginOffsetMapResult.getMessage() clusterPhyId, topicName, beginOffsetMapResult.getMessage()
); );
} }
@@ -211,7 +211,7 @@ public class PartitionMetricServiceImpl extends BaseMetricService implements Par
} }
} else { } else {
LOGGER.warn( LOGGER.warn(
"class=PartitionMetricServiceImpl||method=getOffsetRelevantMetrics||clusterPhyId={}||topicName={}||resultMsg={}||msg=get end offset failed", "method=getOffsetRelevantMetrics||clusterPhyId={}||topicName={}||resultMsg={}||msg=get end offset failed",
clusterPhyId, topicName, endOffsetMapResult.getMessage() clusterPhyId, topicName, endOffsetMapResult.getMessage()
); );
} }
@@ -235,7 +235,7 @@ public class PartitionMetricServiceImpl extends BaseMetricService implements Par
} }
} else { } else {
LOGGER.warn( LOGGER.warn(
"class=PartitionMetricServiceImpl||method=getOffsetRelevantMetrics||clusterPhyId={}||topicName={}||endResultMsg={}||beginResultMsg={}||msg=get messages failed", "method=getOffsetRelevantMetrics||clusterPhyId={}||topicName={}||endResultMsg={}||beginResultMsg={}||msg=get messages failed",
clusterPhyId, topicName, endOffsetMapResult.getMessage(), beginOffsetMapResult.getMessage() clusterPhyId, topicName, endOffsetMapResult.getMessage(), beginOffsetMapResult.getMessage()
); );
} }
@@ -286,7 +286,7 @@ public class PartitionMetricServiceImpl extends BaseMetricService implements Par
continue; continue;
} catch (Exception e) { } catch (Exception e) {
LOGGER.error( LOGGER.error(
"class=PartitionMetricServiceImpl||method=getMetricFromJmx||clusterPhyId={}||topicName={}||partitionId={}||leaderBrokerId={}||metricName={}||msg={}", "method=getMetricFromJmx||clusterPhyId={}||topicName={}||partitionId={}||leaderBrokerId={}||metricName={}||msg={}",
clusterPhyId, topicName, partition.getPartitionId(), partition.getLeaderBrokerId(), metricName, e.getClass().getName() clusterPhyId, topicName, partition.getPartitionId(), partition.getLeaderBrokerId(), metricName, e.getClass().getName()
); );
} }
@@ -341,7 +341,7 @@ public class PartitionMetricServiceImpl extends BaseMetricService implements Par
continue; continue;
} catch (Exception e) { } catch (Exception e) {
LOGGER.error( LOGGER.error(
"class=PartitionMetricServiceImpl||method=getTopicAvgMetricFromJmx||clusterPhyId={}||topicName={}||partitionId={}||leaderBrokerId={}||metricName={}||msg={}", "method=getTopicAvgMetricFromJmx||clusterPhyId={}||topicName={}||partitionId={}||leaderBrokerId={}||metricName={}||msg={}",
clusterPhyId, topicName, partition.getPartitionId(), partition.getLeaderBrokerId(), metricName, e.getClass().getName() clusterPhyId, topicName, partition.getPartitionId(), partition.getLeaderBrokerId(), metricName, e.getClass().getName()
); );
} }

View File

@@ -260,7 +260,7 @@ public class TopicServiceImpl implements TopicService {
return Result.buildSuc(topicList); return Result.buildSuc(topicList);
} catch (Exception e) { } catch (Exception e) {
log.error("class=TopicServiceImpl||method=getTopicsFromAdminClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e); log.error("method=getTopicsFromAdminClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage()); return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
} }
@@ -278,7 +278,7 @@ public class TopicServiceImpl implements TopicService {
return Result.buildSuc(topicList); return Result.buildSuc(topicList);
} catch (Exception e) { } catch (Exception e) {
log.error("class=TopicServiceImpl||method=getTopicsFromZKClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e); log.error("method=getTopicsFromZKClient||clusterPhyId={}||errMsg=exception", clusterPhy.getId(), e);
return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage()); return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
} }

View File

@@ -43,10 +43,10 @@ public class ZnodeServiceImpl implements ZnodeService {
try { try {
children = kafkaZKDAO.getChildren(clusterPhyId, path, false); children = kafkaZKDAO.getChildren(clusterPhyId, path, false);
} catch (NotExistException e) { } catch (NotExistException e) {
LOGGER.error("class=ZnodeServiceImpl||method=listZnodeChildren||clusterPhyId={}||errMsg={}", clusterPhyId, "create ZK client create failed"); LOGGER.error("method=listZnodeChildren||clusterPhyId={}||errMsg={}", clusterPhyId, "create ZK client create failed");
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, "ZK客户端创建失败"); return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, "ZK客户端创建失败");
} catch (Exception e) { } catch (Exception e) {
LOGGER.error("class=ZnodeServiceImpl||method=listZnodeChildren||clusterPhyId={}||errMsg={}", clusterPhyId, "ZK operate failed"); LOGGER.error("method=listZnodeChildren||clusterPhyId={}||errMsg={}", clusterPhyId, "ZK operate failed");
return Result.buildFromRSAndMsg(ResultStatus.ZK_OPERATE_FAILED, "ZK操作失败"); return Result.buildFromRSAndMsg(ResultStatus.ZK_OPERATE_FAILED, "ZK操作失败");
} }
@@ -69,10 +69,10 @@ public class ZnodeServiceImpl implements ZnodeService {
try { try {
dataAndStat = kafkaZKDAO.getDataAndStat(clusterPhyId, path); dataAndStat = kafkaZKDAO.getDataAndStat(clusterPhyId, path);
} catch (NotExistException e) { } catch (NotExistException e) {
LOGGER.error("class=ZnodeServiceImpl||method=getZnode||clusterPhyId={}||errMsg={}", clusterPhyId, "create ZK client create failed"); LOGGER.error("method=getZnode||clusterPhyId={}||errMsg={}", clusterPhyId, "create ZK client create failed");
return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, "ZK客户端创建失败"); return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, "ZK客户端创建失败");
} catch (Exception e) { } catch (Exception e) {
LOGGER.error("class=ZnodeServiceImpl||method=getZnode||clusterPhyId={}||errMsg={}", clusterPhyId, "ZK operate failed"); LOGGER.error("method=getZnode||clusterPhyId={}||errMsg={}", clusterPhyId, "ZK operate failed");
return Result.buildFromRSAndMsg(ResultStatus.ZK_OPERATE_FAILED, "ZK操作失败"); return Result.buildFromRSAndMsg(ResultStatus.ZK_OPERATE_FAILED, "ZK操作失败");
} }

View File

@@ -140,7 +140,7 @@ public class ZookeeperMetricServiceImpl extends BaseMetricService implements Zoo
metrics.putMetric(ret.getData().getMetrics()); metrics.putMetric(ret.getData().getMetrics());
} catch (Exception e){ } catch (Exception e){
LOGGER.error( LOGGER.error(
"class=ZookeeperMetricServiceImpl||method=collectMetricsFromZookeeper||clusterPhyId={}||metricName={}||errMsg=exception!", "method=collectMetricsFromZookeeper||clusterPhyId={}||metricName={}||errMsg=exception!",
clusterPhyId, metricName, e clusterPhyId, metricName, e
); );
} }

View File

@@ -41,7 +41,7 @@ public class ZookeeperServiceImpl implements ZookeeperService {
addressList = ZookeeperUtils.connectStringParser(zookeeperAddress); addressList = ZookeeperUtils.connectStringParser(zookeeperAddress);
} catch (Exception e) { } catch (Exception e) {
LOGGER.error( LOGGER.error(
"class=ZookeeperServiceImpl||method=listFromZookeeperCluster||clusterPhyId={}||zookeeperAddress={}||errMsg=exception!", "method=listFromZookeeperCluster||clusterPhyId={}||zookeeperAddress={}||errMsg=exception!",
clusterPhyId, zookeeperAddress, e clusterPhyId, zookeeperAddress, e
); );
@@ -87,7 +87,7 @@ public class ZookeeperServiceImpl implements ZookeeperService {
zookeeperDAO.updateById(newInfo); zookeeperDAO.updateById(newInfo);
} }
} catch (Exception e) { } catch (Exception e) {
LOGGER.error("class=ZookeeperServiceImpl||method=batchReplaceDataInDB||clusterPhyId={}||newInfo={}||errMsg=exception", clusterPhyId, newInfo, e); LOGGER.error("method=batchReplaceDataInDB||clusterPhyId={}||newInfo={}||errMsg=exception", clusterPhyId, newInfo, e);
} }
} }
@@ -96,7 +96,7 @@ public class ZookeeperServiceImpl implements ZookeeperService {
try { try {
zookeeperDAO.deleteById(entry.getValue().getId()); zookeeperDAO.deleteById(entry.getValue().getId());
} catch (Exception e) { } catch (Exception e) {
LOGGER.error("class=ZookeeperServiceImpl||method=batchReplaceDataInDB||clusterPhyId={}||expiredInfo={}||errMsg=exception", clusterPhyId, entry.getValue(), e); LOGGER.error("method=batchReplaceDataInDB||clusterPhyId={}||expiredInfo={}||errMsg=exception", clusterPhyId, entry.getValue(), e);
} }
}); });
} }