Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import io.kafbat.ui.model.ConsumerGroupDetailsDTO;
import io.kafbat.ui.model.ConsumerGroupOffsetsResetDTO;
import io.kafbat.ui.model.ConsumerGroupOrderingDTO;
import io.kafbat.ui.model.ConsumerGroupsLagResponseDTO;
import io.kafbat.ui.model.ConsumerGroupsPageResponseDTO;
import io.kafbat.ui.model.PartitionOffsetDTO;
import io.kafbat.ui.model.SortOrderDTO;
Expand All @@ -20,9 +21,12 @@
import io.kafbat.ui.service.ConsumerGroupService;
import io.kafbat.ui.service.OffsetsResetService;
import io.kafbat.ui.service.mcp.McpTool;
import java.time.Instant;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
Expand All @@ -32,6 +36,7 @@
import org.springframework.web.server.ServerWebExchange;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.util.function.Tuples;

@RestController
@RequiredArgsConstructor
Expand Down Expand Up @@ -95,6 +100,39 @@ public Mono<ResponseEntity<ConsumerGroupDetailsDTO>> getConsumerGroup(String clu
.doOnEach(sig -> audit(context, sig));
}

@Override
public Mono<ResponseEntity<ConsumerGroupsLagResponseDTO>> getConsumerGroupsLag(String clusterName,
List<String> groupNames,
Long lastUpdate,
ServerWebExchange exchange) {

var context = AccessContext.builder()
.cluster(clusterName)
.operationName("getConsumerGroupsLag")
.build();

Mono<ResponseEntity<ConsumerGroupsLagResponseDTO>> result =
consumerGroupService.getConsumerGroupsLag(getCluster(clusterName), groupNames, Optional.ofNullable(lastUpdate))
.flatMap(t ->
Flux.fromIterable(t.getT1().entrySet())
.filterWhen(cg -> accessControlService.isConsumerGroupAccessible(cg.getKey(), clusterName))
.collectList()
.map(l -> l.stream().collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)))
.map(l -> Tuples.of(t.getT2(), l))
)
.map(t ->
new ConsumerGroupsLagResponseDTO(
t.getT1().orElse(0L), t.getT2()
)
)
.map(ResponseEntity::ok)
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));

return validateAccess(context)
.then(result)
.doOnEach(sig -> audit(context, sig));
}

@Override
public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getTopicConsumerGroups(String clusterName,
String topicName,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,16 +1,25 @@
package io.kafbat.ui.mapper;

import io.kafbat.ui.api.model.ConsumerGroupLag;
import io.kafbat.ui.api.model.ConsumerGroupState;
import io.kafbat.ui.model.BrokerDTO;
import io.kafbat.ui.model.ConsumerGroupDTO;
import io.kafbat.ui.model.ConsumerGroupDetailsDTO;
import io.kafbat.ui.model.ConsumerGroupLagDTO;
import io.kafbat.ui.model.ConsumerGroupStateDTO;
import io.kafbat.ui.model.ConsumerGroupTopicPartitionDTO;
import io.kafbat.ui.model.InternalConsumerGroup;
import io.kafbat.ui.model.InternalTopicConsumerGroup;
import io.kafbat.ui.service.metrics.scrape.ScrapedClusterState;
import java.time.Instant;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartition;

Expand Down
18 changes: 2 additions & 16 deletions api/src/main/java/io/kafbat/ui/model/InternalConsumerGroup.java
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
package io.kafbat.ui.model;

import static io.kafbat.ui.util.ConsumerGroupUtil.calculateConsumerLag;

import java.util.Collection;
import java.util.Map;
import java.util.Optional;
Expand Down Expand Up @@ -56,22 +58,6 @@ public static InternalConsumerGroup create(
return builder.build();
}

private static Long calculateConsumerLag(Map<TopicPartition, Long> offsets, Map<TopicPartition, Long> endOffsets) {
Long consumerLag = null;
// consumerLag should be undefined if no committed offsets found for topic
if (!offsets.isEmpty()) {
consumerLag = offsets.entrySet().stream()
.mapToLong(e ->
Optional.ofNullable(endOffsets)
.map(o -> o.get(e.getKey()))
.map(o -> o - e.getValue())
.orElse(0L)
).sum();
}

return consumerLag;
}

private static Integer calculateTopicNum(Map<TopicPartition, Long> offsets, Collection<InternalMember> members) {

return (int) Stream.concat(
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
package io.kafbat.ui.service;

import static io.kafbat.ui.util.ConsumerGroupUtil.calculateConsumerLag;
import static io.kafbat.ui.util.ConsumerGroupUtil.calculateLag;

import com.google.common.collect.Streams;
import com.google.common.collect.Table;
import io.kafbat.ui.config.ClustersProperties;
import io.kafbat.ui.emitter.EnhancedConsumer;
import io.kafbat.ui.model.ConsumerGroupLagDTO;
import io.kafbat.ui.model.ConsumerGroupOrderingDTO;
import io.kafbat.ui.model.InternalConsumerGroup;
import io.kafbat.ui.model.InternalTopicConsumerGroup;
Expand All @@ -16,6 +20,7 @@
import io.kafbat.ui.service.rbac.AccessControlService;
import io.kafbat.ui.util.ApplicationMetrics;
import io.kafbat.ui.util.KafkaClientSslPropertiesUtil;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
Expand All @@ -38,6 +43,8 @@
import org.apache.kafka.common.TopicPartition;
import org.springframework.stereotype.Service;
import reactor.core.publisher.Mono;
import reactor.util.function.Tuple2;
import reactor.util.function.Tuples;

@Service
@RequiredArgsConstructor
Expand Down Expand Up @@ -141,6 +148,86 @@ private boolean isConsumerGroupRelatesToTopic(String topic,
return hasActiveMembersForTopic || hasCommittedOffsets;
}

public Mono<Tuple2<Map<String, ConsumerGroupLagDTO>, Optional<Long>>> getConsumerGroupsLag(
KafkaCluster cluster, Collection<String> groupNames, Optional<Long> lastUpdate) {
Statistics statistics = statisticsCache.get(cluster);

Map<TopicPartition, Long> endOffsets = statistics.getClusterState().getTopicStates().entrySet().stream()
.flatMap(e -> e.getValue().endOffsets().entrySet().stream().map(p ->
Map.entry(new TopicPartition(e.getKey(), p.getKey()), p.getValue()))
).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));

if (statistics.getStatus().equals(ServerStatusDTO.ONLINE)) {
boolean select = lastUpdate
.map(t -> statistics.getClusterState().getScrapeFinishedAt().isAfter(Instant.ofEpochMilli(t)))
.orElse(true);

if (select) {
Map<String, ScrapedClusterState.ConsumerGroupState> consumerGroupsStates =
statistics.getClusterState().getConsumerGroupsStates();

return Mono.just(
Tuples.of(
groupNames.stream()
.map(g -> Optional.ofNullable(consumerGroupsStates.get(g)))
.filter(Optional::isPresent)
.map(Optional::get)
.map(g -> Map.entry(g.group(), buildConsumerGroup(g, endOffsets)))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)),
Optional.of(statistics.getClusterState().getScrapeFinishedAt().toEpochMilli())
)
);
}

}

return Mono.just(Tuples.of(Map.of(), lastUpdate));
}

private ConsumerGroupLagDTO buildConsumerGroup(
ScrapedClusterState.ConsumerGroupState state,
Map<TopicPartition, Long> endOffsets
) {
var commitedTopicPartitions = Stream.concat(
state.description().members().stream()
.flatMap(m ->
m.assignment().topicPartitions().stream()
.map(t -> Map.entry(t, Optional.<Long>empty()))
),
state.committedOffsets().entrySet().stream()
.map(o -> Map.entry(o.getKey(), Optional.ofNullable(o.getValue())))
).collect(
Collectors.groupingBy(
Map.Entry::getKey,
Collectors.mapping(Map.Entry::getValue,
Collectors.<Optional<Long>>reducing(
Optional.empty(),
(a, b) -> Stream.of(a, b)
.flatMap(Optional::stream)
.max(Long::compare)
)
)
)
);

Map<String, Long> topicsLags = commitedTopicPartitions.entrySet().stream()
.map(e ->
Map.entry(
e.getKey(),
calculateLag(e.getValue(), Optional.ofNullable(endOffsets.get(e.getKey()))).orElse(0L)
)
).collect(
Collectors.groupingBy(
(e) -> e.getKey().topic(),
Collectors.reducing(0L, Map.Entry::getValue, Long::sum)
)
);

long lag = topicsLags.values().stream().mapToLong(v -> v).sum();

return new ConsumerGroupLagDTO(lag, topicsLags);
}

public record ConsumerGroupsPage(List<InternalConsumerGroup> consumerGroups, int totalPages) {
}

Expand Down
35 changes: 35 additions & 0 deletions api/src/main/java/io/kafbat/ui/util/ConsumerGroupUtil.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
package io.kafbat.ui.util;

import java.util.Map;
import java.util.Optional;
import org.apache.kafka.common.TopicPartition;

public class ConsumerGroupUtil {
private ConsumerGroupUtil() {
}

public static Long calculateConsumerLag(Map<TopicPartition, Long> offsets,
Map<TopicPartition, Long> endOffsets) {
Long consumerLag = null;
// consumerLag should be undefined if no committed offsets found for topic
if (!offsets.isEmpty()) {
consumerLag = offsets.entrySet().stream()
.mapToLong(e ->
calculateLag(
Optional.ofNullable(e.getValue()),
Optional.ofNullable(endOffsets.get(e.getKey()))
).orElse(0L)
).sum();
}

return consumerLag;
}

public static Optional<Long> calculateLag(Optional<Long> commitedOffset, Optional<Long> endOffset) {
Optional<Long> consumerLag = Optional.empty();
if (endOffset.isPresent()) {
consumerLag = commitedOffset.map(o -> endOffset.get() - o);
}
return consumerLag;
}
}
Loading
Loading