mirror of
https://github.com/didi/KnowStreaming.git
synced 2025-12-24 03:42:07 +08:00
[Optimize]指标采集性能优化-part1(#726)
This commit is contained in:
@@ -12,6 +12,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.group.Group;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.group.GroupTopic;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.group.GroupTopicMember;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.GroupMetrics;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.offset.KSOffsetSpec;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic;
|
||||
@@ -42,7 +43,6 @@ import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.Group
|
||||
import com.xiaojukeji.know.streaming.km.persistence.es.dao.GroupMetricESDAO;
|
||||
import org.apache.kafka.clients.admin.ConsumerGroupDescription;
|
||||
import org.apache.kafka.clients.admin.MemberDescription;
|
||||
import org.apache.kafka.clients.admin.OffsetSpec;
|
||||
import org.apache.kafka.common.ConsumerGroupState;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
@@ -274,16 +274,16 @@ public class GroupManagerImpl implements GroupManager {
|
||||
)));
|
||||
}
|
||||
|
||||
OffsetSpec offsetSpec = null;
|
||||
KSOffsetSpec offsetSpec = null;
|
||||
if (OffsetTypeEnum.PRECISE_TIMESTAMP.getResetType() == dto.getResetType()) {
|
||||
offsetSpec = OffsetSpec.forTimestamp(dto.getTimestamp());
|
||||
offsetSpec = KSOffsetSpec.forTimestamp(dto.getTimestamp());
|
||||
} else if (OffsetTypeEnum.EARLIEST.getResetType() == dto.getResetType()) {
|
||||
offsetSpec = OffsetSpec.earliest();
|
||||
offsetSpec = KSOffsetSpec.earliest();
|
||||
} else {
|
||||
offsetSpec = OffsetSpec.latest();
|
||||
offsetSpec = KSOffsetSpec.latest();
|
||||
}
|
||||
|
||||
return partitionService.getPartitionOffsetFromKafka(dto.getClusterId(), dto.getTopicName(), offsetSpec, dto.getTimestamp());
|
||||
return partitionService.getPartitionOffsetFromKafka(dto.getClusterId(), dto.getTopicName(), offsetSpec);
|
||||
}
|
||||
|
||||
private List<GroupTopicOverviewVO> convert2GroupTopicOverviewVOList(List<GroupMemberPO> poList, List<GroupMetrics> metricsList) {
|
||||
|
||||
@@ -10,6 +10,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.PartitionMetrics;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.TopicMetrics;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.offset.KSOffsetSpec;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.partition.Partition;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult;
|
||||
import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result;
|
||||
@@ -46,7 +47,6 @@ import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
|
||||
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.TopicMetricVersionItems;
|
||||
import org.apache.commons.lang3.ObjectUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.kafka.clients.admin.OffsetSpec;
|
||||
import org.apache.kafka.clients.consumer.*;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.config.TopicConfig;
|
||||
@@ -143,12 +143,12 @@ public class TopicStateManagerImpl implements TopicStateManager {
|
||||
}
|
||||
|
||||
// 获取分区beginOffset
|
||||
Result<Map<TopicPartition, Long>> beginOffsetsMapResult = partitionService.getPartitionOffsetFromKafka(clusterPhyId, topicName, dto.getFilterPartitionId(), OffsetSpec.earliest(), null);
|
||||
Result<Map<TopicPartition, Long>> beginOffsetsMapResult = partitionService.getPartitionOffsetFromKafka(clusterPhyId, topicName, dto.getFilterPartitionId(), KSOffsetSpec.earliest());
|
||||
if (beginOffsetsMapResult.failed()) {
|
||||
return Result.buildFromIgnoreData(beginOffsetsMapResult);
|
||||
}
|
||||
// 获取分区endOffset
|
||||
Result<Map<TopicPartition, Long>> endOffsetsMapResult = partitionService.getPartitionOffsetFromKafka(clusterPhyId, topicName, dto.getFilterPartitionId(), OffsetSpec.latest(), null);
|
||||
Result<Map<TopicPartition, Long>> endOffsetsMapResult = partitionService.getPartitionOffsetFromKafka(clusterPhyId, topicName, dto.getFilterPartitionId(), KSOffsetSpec.latest());
|
||||
if (endOffsetsMapResult.failed()) {
|
||||
return Result.buildFromIgnoreData(endOffsetsMapResult);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user