From ae06c866e3f3745bdcfbaa8141ea1fd5ef724b11 Mon Sep 17 00:00:00 2001 From: adixitconfluent Date: Fri, 7 Mar 2025 16:19:32 +0530 Subject: [PATCH 01/11] Removed usage of partition max bytes from share fetch requests on the broker --- .../common/requests/ShareFetchRequest.java | 52 +--- .../kafka/server/share/DelayedShareFetch.java | 2 +- .../server/share/SharePartitionManager.java | 39 +-- .../main/scala/kafka/server/KafkaApis.scala | 18 +- .../server/share/DelayedShareFetchTest.java | 63 ++-- .../server/share/ShareFetchUtilsTest.java | 20 +- .../share/SharePartitionManagerTest.java | 293 ++++++++---------- .../unit/kafka/server/KafkaApisTest.scala | 142 +++------ .../kafka/server/ReplicaManagerTest.scala | 8 +- .../server/share/CachedSharePartition.java | 22 +- .../share/ErroneousAndValidPartitionData.java | 19 +- .../share/context/ShareSessionContext.java | 15 +- .../share/fetch/PartitionRotateStrategy.java | 21 +- .../kafka/server/share/fetch/ShareFetch.java | 15 +- .../server/share/session/ShareSession.java | 17 +- .../fetch/PartitionRotateStrategyTest.java | 22 +- .../server/share/fetch/ShareFetchTest.java | 18 +- .../share/fetch/ShareFetchTestUtils.java | 39 +-- 18 files changed, 310 insertions(+), 515 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java index f1a5753fef1d8..de5f0adcf83f3 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java @@ -28,10 +28,10 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; -import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; -import java.util.Objects; +import java.util.Set; public class ShareFetchRequest extends AbstractRequest { @@ -151,7 +151,7 @@ public String toString() { } private final ShareFetchRequestData data; - private volatile LinkedHashMap shareFetchData = null; + private volatile LinkedHashSet shareFetchData = null; private volatile List toForget = null; public ShareFetchRequest(ShareFetchRequestData data, short version) { @@ -179,41 +179,6 @@ public static ShareFetchRequest parse(ByteBuffer buffer, short version) { ); } - public static final class SharePartitionData { - public final Uuid topicId; - public final int maxBytes; - - public SharePartitionData( - Uuid topicId, - int maxBytes - ) { - this.topicId = topicId; - this.maxBytes = maxBytes; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ShareFetchRequest.SharePartitionData that = (ShareFetchRequest.SharePartitionData) o; - return Objects.equals(topicId, that.topicId) && - maxBytes == that.maxBytes; - } - - @Override - public int hashCode() { - return Objects.hash(topicId, maxBytes); - } - - @Override - public String toString() { - return "SharePartitionData(" + - "topicId=" + topicId + - ", maxBytes=" + maxBytes + - ')'; - } - } - public int minBytes() { return data.minBytes(); } @@ -226,23 +191,18 @@ public int maxWait() { return data.maxWaitMs(); } - public Map shareFetchData(Map topicNames) { + public Set shareFetchData(Map topicNames) { if (shareFetchData == null) { synchronized (this) { if (shareFetchData == null) { // Assigning the lazy-initialized `shareFetchData` in the last step // to avoid other threads accessing a half-initialized object. - final LinkedHashMap shareFetchDataTmp = new LinkedHashMap<>(); + final LinkedHashSet shareFetchDataTmp = new LinkedHashSet<>(); data.topics().forEach(shareFetchTopic -> { String name = topicNames.get(shareFetchTopic.topicId()); shareFetchTopic.partitions().forEach(shareFetchPartition -> { // Topic name may be null here if the topic name was unable to be resolved using the topicNames map. - shareFetchDataTmp.put(new TopicIdPartition(shareFetchTopic.topicId(), new TopicPartition(name, shareFetchPartition.partitionIndex())), - new ShareFetchRequest.SharePartitionData( - shareFetchTopic.topicId(), - shareFetchPartition.partitionMaxBytes() - ) - ); + shareFetchDataTmp.add(new TopicIdPartition(shareFetchTopic.topicId(), new TopicPartition(name, shareFetchPartition.partitionIndex()))); }); }); shareFetchData = shareFetchDataTmp; diff --git a/core/src/main/java/kafka/server/share/DelayedShareFetch.java b/core/src/main/java/kafka/server/share/DelayedShareFetch.java index 18797f3cc0cd3..d68ed06d3070d 100644 --- a/core/src/main/java/kafka/server/share/DelayedShareFetch.java +++ b/core/src/main/java/kafka/server/share/DelayedShareFetch.java @@ -181,7 +181,7 @@ public void onComplete() { return; } else { // Update metric to record acquired to requested partitions. - double requestTopicToAcquired = (double) topicPartitionData.size() / shareFetch.partitionMaxBytes().size(); + double requestTopicToAcquired = (double) topicPartitionData.size() / shareFetch.topicIdPartitions().size(); shareGroupMetrics.recordTopicPartitionsFetchRatio(shareFetch.groupId(), (int) (requestTopicToAcquired * 100)); } log.trace("Fetchable share partitions data: {} with groupId: {} fetch params: {}", diff --git a/core/src/main/java/kafka/server/share/SharePartitionManager.java b/core/src/main/java/kafka/server/share/SharePartitionManager.java index f080b08e8c8e9..fc9cd115b8775 100644 --- a/core/src/main/java/kafka/server/share/SharePartitionManager.java +++ b/core/src/main/java/kafka/server/share/SharePartitionManager.java @@ -30,7 +30,6 @@ import org.apache.kafka.common.message.ShareAcknowledgeResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareRequestMetadata; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.Time; @@ -68,6 +67,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -248,7 +248,7 @@ private SharePartitionManager( * @param memberId The member id, generated by the group-coordinator, this is used to identify the client. * @param fetchParams The fetch parameters from the share fetch request. * @param batchSize The number of records per acquired records batch. - * @param partitionMaxBytes The maximum number of bytes to fetch for each partition. + * @param topicPartitions The topic partitions to fetch for. * * @return A future that will be completed with the fetched messages. */ @@ -258,14 +258,14 @@ public CompletableFuture> fetchMessages( FetchParams fetchParams, int sessionEpoch, int batchSize, - LinkedHashMap partitionMaxBytes + LinkedHashSet topicPartitions ) { log.trace("Fetch request for topicIdPartitions: {} with groupId: {} fetch params: {}", - partitionMaxBytes.keySet(), groupId, fetchParams); + topicPartitions, groupId, fetchParams); - LinkedHashMap topicIdPartitions = PartitionRotateStrategy + LinkedHashSet topicIdPartitions = PartitionRotateStrategy .type(PartitionRotateStrategy.StrategyType.ROUND_ROBIN) - .rotate(partitionMaxBytes, new PartitionRotateMetadata(sessionEpoch)); + .rotate(topicPartitions, new PartitionRotateMetadata(sessionEpoch)); CompletableFuture> future = new CompletableFuture<>(); processShareFetch(new ShareFetch(fetchParams, groupId, memberId, future, topicIdPartitions, batchSize, maxFetchRecords, brokerTopicStats)); @@ -433,23 +433,14 @@ private CompletableFuture shareFetchData, + public ShareFetchContext newContext(String groupId, Set shareFetchData, List toForget, ShareRequestMetadata reqMetadata, Boolean isAcknowledgeDataPresent) { ShareFetchContext context; - // TopicPartition with maxBytes as 0 should not be added in the cachedPartitions - Map shareFetchDataWithMaxBytes = new HashMap<>(); - shareFetchData.forEach((tp, sharePartitionData) -> { - if (sharePartitionData.maxBytes > 0) shareFetchDataWithMaxBytes.put(tp, sharePartitionData); - }); // If the request's epoch is FINAL_EPOCH or INITIAL_EPOCH, we should remove the existing sessions. Also, start a // new session in case it is INITIAL_EPOCH. Hence, we need to treat them as special cases. if (reqMetadata.isFull()) { ShareSessionKey key = shareSessionKey(groupId, reqMetadata.memberId()); if (reqMetadata.epoch() == ShareRequestMetadata.FINAL_EPOCH) { - // If the epoch is FINAL_EPOCH, don't try to create a new session. - if (!shareFetchDataWithMaxBytes.isEmpty()) { - throw Errors.INVALID_REQUEST.exception(); - } if (cache.get(key) == null) { log.error("Share session error for {}: no such share session found", key); throw Errors.SHARE_SESSION_NOT_FOUND.exception(); @@ -464,9 +455,9 @@ public ShareFetchContext newContext(String groupId, Map cachedSharePartitions = new - ImplicitLinkedHashCollection<>(shareFetchDataWithMaxBytes.size()); - shareFetchDataWithMaxBytes.forEach((topicIdPartition, reqData) -> - cachedSharePartitions.mustAdd(new CachedSharePartition(topicIdPartition, reqData, false))); + ImplicitLinkedHashCollection<>(shareFetchData.size()); + shareFetchData.forEach(topicIdPartition -> + cachedSharePartitions.mustAdd(new CachedSharePartition(topicIdPartition, false))); ShareSessionKey responseShareSessionKey = cache.maybeCreateSession(groupId, reqMetadata.memberId(), time.milliseconds(), cachedSharePartitions); if (responseShareSessionKey == null) { @@ -474,10 +465,10 @@ public ShareFetchContext newContext(String groupId, Map> modifiedTopicIdPartitions = shareSession.update( - shareFetchDataWithMaxBytes, toForget); + shareFetchData, toForget); cache.touch(shareSession, time.milliseconds()); shareSession.epoch = ShareRequestMetadata.nextEpoch(shareSession.epoch); log.debug("Created a new ShareSessionContext for session key {}, epoch {}: " + @@ -586,7 +577,7 @@ private static String partitionsToLogString(Collection partiti // Visible for testing. void processShareFetch(ShareFetch shareFetch) { - if (shareFetch.partitionMaxBytes().isEmpty()) { + if (shareFetch.topicIdPartitions().isEmpty()) { // If there are no partitions to fetch then complete the future with an empty map. shareFetch.maybeComplete(Collections.emptyMap()); return; @@ -596,7 +587,7 @@ void processShareFetch(ShareFetch shareFetch) { LinkedHashMap sharePartitions = new LinkedHashMap<>(); // Track the topics for which we have received a share fetch request for metrics. Set topics = new HashSet<>(); - for (TopicIdPartition topicIdPartition : shareFetch.partitionMaxBytes().keySet()) { + for (TopicIdPartition topicIdPartition : shareFetch.topicIdPartitions()) { topics.add(topicIdPartition.topic()); SharePartitionKey sharePartitionKey = sharePartitionKey( shareFetch.groupId(), diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index e77a67b35b36d..9d8509d2ef8c2 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -2997,12 +2997,8 @@ class KafkaApis(val requestChannel: RequestChannel, erroneousAndValidPartitionData.erroneous.forEach { case(tp, _) => if (!topicIdPartitionSeq.contains(tp)) topicIdPartitionSeq += tp } - erroneousAndValidPartitionData.validTopicIdPartitions.forEach { - case(tp, _) => if (!topicIdPartitionSeq.contains(tp)) topicIdPartitionSeq += tp - } - shareFetchData.forEach { - case(tp, _) => if (!topicIdPartitionSeq.contains(tp)) topicIdPartitionSeq += tp - } + erroneousAndValidPartitionData.validTopicIdPartitions.forEach(tp => if (!topicIdPartitionSeq.contains(tp)) topicIdPartitionSeq += tp) + shareFetchData.forEach { tp => if (!topicIdPartitionSeq.contains(tp)) topicIdPartitionSeq += tp} // Kafka share consumers need READ permission on each topic they are fetching. val authorizedTopics = authHelper.filterByAuthorized( @@ -3139,15 +3135,15 @@ class KafkaApis(val requestChannel: RequestChannel, val erroneous = mutable.Map.empty[TopicIdPartition, ShareFetchResponseData.PartitionData] erroneousAndValidPartitionData.erroneous.forEach { (topicIdPartition, partitionData) => erroneous.put(topicIdPartition, partitionData) } - val interestedWithMaxBytes = new util.LinkedHashMap[TopicIdPartition, Integer] + val interestedTopicPartitions = new util.LinkedHashSet[TopicIdPartition] - erroneousAndValidPartitionData.validTopicIdPartitions.forEach { case (topicIdPartition, sharePartitionData) => + erroneousAndValidPartitionData.validTopicIdPartitions.forEach { case topicIdPartition => if (!authorizedTopics.contains(topicIdPartition.topicPartition.topic)) erroneous += topicIdPartition -> ShareFetchResponse.partitionResponse(topicIdPartition, Errors.TOPIC_AUTHORIZATION_FAILED) else if (!metadataCache.contains(topicIdPartition.topicPartition)) erroneous += topicIdPartition -> ShareFetchResponse.partitionResponse(topicIdPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION) else - interestedWithMaxBytes.put(topicIdPartition, sharePartitionData.maxBytes) + interestedTopicPartitions.add(topicIdPartition) } val shareFetchRequest = request.body[ShareFetchRequest] @@ -3156,7 +3152,7 @@ class KafkaApis(val requestChannel: RequestChannel, val versionId = request.header.apiVersion val groupId = shareFetchRequest.data.groupId - if (interestedWithMaxBytes.isEmpty) { + if (interestedTopicPartitions.isEmpty) { CompletableFuture.completedFuture(erroneous) } else { // for share fetch from consumer, cap fetchMaxBytes to the maximum bytes that could be fetched without being @@ -3194,7 +3190,7 @@ class KafkaApis(val requestChannel: RequestChannel, params, shareSessionEpoch, shareFetchRequest.data.batchSize, - interestedWithMaxBytes + interestedTopicPartitions ).thenApply{ result => val combinedResult = mutable.Map.empty[TopicIdPartition, ShareFetchResponseData.PartitionData] result.asScala.foreach { case (tp, data) => diff --git a/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java b/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java index 27aae04f176ac..7c645f43c1026 100644 --- a/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java +++ b/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java @@ -70,11 +70,10 @@ import scala.jdk.javaapi.CollectionConverters; import static kafka.server.share.SharePartitionManagerTest.DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL; -import static kafka.server.share.SharePartitionManagerTest.PARTITION_MAX_BYTES; import static kafka.server.share.SharePartitionManagerTest.buildLogReadResult; import static kafka.server.share.SharePartitionManagerTest.mockReplicaManagerDelayedShareFetch; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.createShareAcquiredRecords; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.orderedMap; +import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.orderedSet; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; @@ -121,7 +120,6 @@ public void testDelayedShareFetchTryCompleteReturnsFalseDueToNonAcquirablePartit Uuid topicId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(topicId, new TopicPartition("foo", 1)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -134,7 +132,7 @@ public void testDelayedShareFetchTryCompleteReturnsFalseDueToNonAcquirablePartit sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), orderedSet(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(false); @@ -167,7 +165,6 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnFirstFetch() { ReplicaManager replicaManager = mock(ReplicaManager.class); TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(topicId, new TopicPartition("foo", 1)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -182,7 +179,7 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnFirstFetch() { ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 2, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), orderedSet(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(true); @@ -240,7 +237,6 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnSubsequentFetch() { ReplicaManager replicaManager = mock(ReplicaManager.class); TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(topicId, new TopicPartition("foo", 1)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -255,7 +251,7 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnSubsequentFetch() { ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 2, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), orderedSet(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(true); @@ -295,7 +291,6 @@ public void testDelayedShareFetchTryCompleteReturnsTrue() { ReplicaManager replicaManager = mock(ReplicaManager.class); TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(topicId, new TopicPartition("foo", 1)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -308,7 +303,7 @@ public void testDelayedShareFetchTryCompleteReturnsTrue() { sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), orderedSet(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(true); @@ -355,7 +350,6 @@ public void testEmptyFutureReturnedByDelayedShareFetchOnComplete() { ReplicaManager replicaManager = mock(ReplicaManager.class); TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(topicId, new TopicPartition("foo", 1)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -369,7 +363,7 @@ public void testEmptyFutureReturnedByDelayedShareFetchOnComplete() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - future, partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); + future, orderedSet(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(false); when(sp1.canAcquireRecords()).thenReturn(false); @@ -410,7 +404,6 @@ public void testReplicaManagerFetchShouldHappenOnComplete() { ReplicaManager replicaManager = mock(ReplicaManager.class); TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(topicId, new TopicPartition("foo", 1)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -423,7 +416,7 @@ public void testReplicaManagerFetchShouldHappenOnComplete() { sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), orderedSet(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(true); @@ -471,7 +464,6 @@ public void testToCompleteAnAlreadyCompletedFuture() { Uuid topicId = Uuid.randomUuid(); ReplicaManager replicaManager = mock(ReplicaManager.class); TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0); SharePartition sp0 = mock(SharePartition.class); @@ -480,7 +472,7 @@ public void testToCompleteAnAlreadyCompletedFuture() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - future, partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); + future, orderedSet(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.maybeAcquireFetchLock()).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(false); @@ -526,7 +518,7 @@ public void testForceCompleteTriggersDelayedActionsQueue() { TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(topicId, new TopicPartition("foo", 1)); TopicIdPartition tp2 = new TopicIdPartition(topicId, new TopicPartition("foo", 2)); - LinkedHashMap partitionMaxBytes1 = orderedMap(PARTITION_MAX_BYTES, tp0, tp1); + LinkedHashSet topicIdPartitions1 = orderedSet(tp0, tp1); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -542,7 +534,7 @@ public void testForceCompleteTriggersDelayedActionsQueue() { sharePartitions1.put(tp2, sp2); ShareFetch shareFetch1 = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes1, BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), topicIdPartitions1, BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( @@ -551,7 +543,7 @@ public void testForceCompleteTriggersDelayedActionsQueue() { mockReplicaManagerDelayedShareFetch(replicaManager, delayedShareFetchPurgatory); List delayedShareFetchWatchKeys = new ArrayList<>(); - partitionMaxBytes1.keySet().forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); + topicIdPartitions1.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); DelayedShareFetch delayedShareFetch1 = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch1) @@ -568,9 +560,9 @@ public void testForceCompleteTriggersDelayedActionsQueue() { assertTrue(delayedShareFetch1.lock().tryLock()); delayedShareFetch1.lock().unlock(); - LinkedHashMap partitionMaxBytes2 = orderedMap(PARTITION_MAX_BYTES, tp0, tp1); + LinkedHashSet topicIdPartitions2 = orderedSet(tp0, tp1); ShareFetch shareFetch2 = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes2, BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), topicIdPartitions2, BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp1))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); @@ -616,7 +608,6 @@ public void testCombineLogReadResponse() { ReplicaManager replicaManager = mock(ReplicaManager.class); TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -629,7 +620,7 @@ public void testCombineLogReadResponse() { ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - future, partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); + future, orderedSet(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Collections.singleton(tp1)); @@ -674,7 +665,6 @@ public void testExceptionInMinBytesCalculation() { Uuid topicId = Uuid.randomUuid(); ReplicaManager replicaManager = mock(ReplicaManager.class); TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0); SharePartition sp0 = mock(SharePartition.class); @@ -686,7 +676,7 @@ public void testExceptionInMinBytesCalculation() { ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), orderedSet(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(true); @@ -753,7 +743,6 @@ public void testExceptionInMinBytesCalculation() { public void testTryCompleteLocksReleasedOnCompleteException() { ReplicaManager replicaManager = mock(ReplicaManager.class); TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0); SharePartition sp0 = mock(SharePartition.class); when(sp0.maybeAcquireFetchLock()).thenReturn(true); @@ -764,7 +753,7 @@ public void testTryCompleteLocksReleasedOnCompleteException() { sharePartitions.put(tp0, sp0); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), orderedSet(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); @@ -804,7 +793,7 @@ public void testLocksReleasedForCompletedFetch() { mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp0, 1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), orderedMap(PARTITION_MAX_BYTES, tp0), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), orderedSet(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Collections.singleton(tp0)); @@ -838,7 +827,7 @@ public void testLocksReleasedAcquireException() { sharePartitions.put(tp0, sp0); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), orderedMap(PARTITION_MAX_BYTES, tp0), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), orderedSet(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); DelayedShareFetch delayedShareFetch = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() @@ -857,7 +846,6 @@ public void testTryCompleteWhenPartitionMaxBytesStrategyThrowsException() { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); SharePartition sp0 = mock(SharePartition.class); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0); when(sp0.maybeAcquireFetchLock()).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(true); @@ -868,7 +856,7 @@ public void testTryCompleteWhenPartitionMaxBytesStrategyThrowsException() { ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 2, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - future, partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); + future, orderedSet(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); // partitionMaxBytesStrategy.maxBytes() function throws an exception PartitionMaxBytesStrategy partitionMaxBytesStrategy = mock(PartitionMaxBytesStrategy.class); @@ -913,8 +901,6 @@ public void testPartitionMaxBytesFromUniformStrategyWhenAllPartitionsAreAcquirab SharePartition sp3 = mock(SharePartition.class); SharePartition sp4 = mock(SharePartition.class); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1, tp2, tp3, tp4); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); when(sp1.maybeAcquireFetchLock()).thenReturn(true); when(sp2.maybeAcquireFetchLock()).thenReturn(true); @@ -934,7 +920,7 @@ public void testPartitionMaxBytesFromUniformStrategyWhenAllPartitionsAreAcquirab sharePartitions.put(tp4, sp4); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), orderedSet(tp0, tp1, tp2, tp3, tp4), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class))).thenReturn( @@ -1011,8 +997,6 @@ public void testPartitionMaxBytesFromUniformStrategyWhenFewPartitionsAreAcquirab SharePartition sp3 = mock(SharePartition.class); SharePartition sp4 = mock(SharePartition.class); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1, tp2, tp3, tp4); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); when(sp1.maybeAcquireFetchLock()).thenReturn(true); when(sp2.maybeAcquireFetchLock()).thenReturn(false); @@ -1032,7 +1016,7 @@ public void testPartitionMaxBytesFromUniformStrategyWhenFewPartitionsAreAcquirab sharePartitions.put(tp4, sp4); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), orderedSet(tp0, tp1, tp2, tp3, tp4), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class))).thenReturn( @@ -1091,7 +1075,6 @@ public void testPartitionMaxBytesFromUniformStrategyInCombineLogReadResponse() { TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1, tp2); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -1105,7 +1088,7 @@ public void testPartitionMaxBytesFromUniformStrategyInCombineLogReadResponse() { ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), orderedSet(tp0, tp1, tp2), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); DelayedShareFetch delayedShareFetch = DelayedShareFetchBuilder.builder() @@ -1163,7 +1146,7 @@ public void testPartitionMaxBytesFromUniformStrategyInCombineLogReadResponse() { public void testOnCompleteExecutionOnTimeout() { ShareFetch shareFetch = new ShareFetch( FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), - new CompletableFuture<>(), new LinkedHashMap<>(), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), new LinkedHashSet<>(), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); DelayedShareFetch delayedShareFetch = DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) diff --git a/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java b/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java index 6baa3b05b534b..aaedcdd95dd17 100644 --- a/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java +++ b/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java @@ -59,11 +59,10 @@ import java.util.concurrent.CompletableFuture; import java.util.function.BiConsumer; -import static kafka.server.share.SharePartitionManagerTest.PARTITION_MAX_BYTES; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.createFileRecords; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.createShareAcquiredRecords; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.memoryRecordsBuilder; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.orderedMap; +import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.orderedSet; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -95,7 +94,6 @@ public void testProcessFetchResponse() { String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -115,7 +113,7 @@ public void testProcessFetchResponse() { sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, memberId, - new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, 100, BROKER_TOPIC_STATS); + new CompletableFuture<>(), orderedSet(tp0, tp1), BATCH_SIZE, 100, BROKER_TOPIC_STATS); MemoryRecords records = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("0".getBytes(), "v".getBytes()), @@ -161,7 +159,6 @@ public void testProcessFetchResponseWithEmptyRecords() { String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -177,7 +174,7 @@ public void testProcessFetchResponseWithEmptyRecords() { sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, memberId, - new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, 100, BROKER_TOPIC_STATS); + new CompletableFuture<>(), orderedSet(tp0, tp1), BATCH_SIZE, 100, BROKER_TOPIC_STATS); List responseData = List.of( new ShareFetchPartitionData(tp0, 0, new FetchPartitionData(Errors.NONE, 0L, 0L, @@ -208,8 +205,6 @@ public void testProcessFetchResponseWithLsoMovementForTopicPartition() { TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1); - SharePartition sp0 = Mockito.mock(SharePartition.class); SharePartition sp1 = Mockito.mock(SharePartition.class); @@ -218,7 +213,7 @@ public void testProcessFetchResponseWithLsoMovementForTopicPartition() { sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, 100, BROKER_TOPIC_STATS); + new CompletableFuture<>(), orderedSet(tp0, tp1), BATCH_SIZE, 100, BROKER_TOPIC_STATS); ReplicaManager replicaManager = mock(ReplicaManager.class); @@ -304,14 +299,13 @@ public void testProcessFetchResponseWhenNoRecordsAreAcquired() { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0); SharePartition sp0 = Mockito.mock(SharePartition.class); LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, 100, BROKER_TOPIC_STATS); + new CompletableFuture<>(), orderedSet(tp0), BATCH_SIZE, 100, BROKER_TOPIC_STATS); ReplicaManager replicaManager = mock(ReplicaManager.class); @@ -366,8 +360,6 @@ public void testProcessFetchResponseWithMaxFetchRecords() throws IOException { TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1); - SharePartition sp0 = Mockito.mock(SharePartition.class); SharePartition sp1 = Mockito.mock(SharePartition.class); @@ -381,7 +373,7 @@ public void testProcessFetchResponseWithMaxFetchRecords() throws IOException { Uuid memberId = Uuid.randomUuid(); // Set max fetch records to 10 ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, memberId.toString(), - new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, 10, BROKER_TOPIC_STATS); + new CompletableFuture<>(), orderedSet(tp0, tp1), BATCH_SIZE, 10, BROKER_TOPIC_STATS); LinkedHashMap recordsPerOffset = new LinkedHashMap<>(); recordsPerOffset.put(0L, 1); diff --git a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java index 4f550c6751b4f..6edfed90b2395 100644 --- a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java +++ b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java @@ -44,7 +44,6 @@ import org.apache.kafka.common.record.FileRecords; import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.requests.FetchRequest; -import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.requests.ShareRequestMetadata; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; @@ -99,6 +98,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -116,7 +116,7 @@ import scala.jdk.javaapi.CollectionConverters; import static kafka.server.share.DelayedShareFetchTest.mockTopicIdPartitionToReturnDataEqualToMinBytes; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.orderedMap; +import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.orderedSet; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.validateRotatedMapEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -157,7 +157,6 @@ public class SharePartitionManagerTest { 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty(), true); private static final String TIMER_NAME_PREFIX = "share-partition-manager"; - static final int PARTITION_MAX_BYTES = 40000; static final int DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL = 1000; private Time time; @@ -201,9 +200,7 @@ public void testNewContextReturnsFinalContextWithoutRequestData() { Uuid memberId = Uuid.randomUuid(); // Create a new share session with an initial share fetch request - Map reqData1 = new LinkedHashMap<>(); - reqData1.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), PARTITION_MAX_BYTES)); - reqData1.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), PARTITION_MAX_BYTES)); + Set reqData1 = orderedSet(tp0, tp1); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); @@ -211,7 +208,7 @@ public void testNewContextReturnsFinalContextWithoutRequestData() { assertFalse(((ShareSessionContext) context1).isSubsequent()); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId, ShareRequestMetadata.FINAL_EPOCH); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), Collections.emptyList(), reqMetadata2, true); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptySet(), Collections.emptyList(), reqMetadata2, true); assertEquals(FinalContext.class, context2.getClass()); } @@ -231,9 +228,7 @@ public void testNewContextReturnsFinalContextWithRequestData() { Uuid memberId = Uuid.randomUuid(); // Create a new share session with an initial share fetch request - Map reqData1 = new LinkedHashMap<>(); - reqData1.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), PARTITION_MAX_BYTES)); - reqData1.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), PARTITION_MAX_BYTES)); + Set reqData1 = orderedSet(tp0, tp1); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); @@ -244,14 +239,13 @@ public void testNewContextReturnsFinalContextWithRequestData() { // shareFetch is not empty, but the maxBytes of topic partition is 0, which means this is added only for acknowledgements. // New context should be created successfully - Map reqData3 = Collections.singletonMap(new TopicIdPartition(tpId1, new TopicPartition("foo", 0)), - new ShareFetchRequest.SharePartitionData(tpId1, 0)); + Set reqData3 = orderedSet(new TopicIdPartition(tpId1, new TopicPartition("foo", 0))); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData3, Collections.emptyList(), reqMetadata2, true); assertEquals(FinalContext.class, context2.getClass()); } @Test - public void testNewContextReturnsFinalContextError() { + public void testNewContextReturnsFinalContextWhenTopicPartitionsArePresentInRequestData() { ShareSessionCache cache = new ShareSessionCache(10, 1000); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) @@ -266,9 +260,7 @@ public void testNewContextReturnsFinalContextError() { Uuid memberId = Uuid.randomUuid(); // Create a new share session with an initial share fetch request - Map reqData1 = new LinkedHashMap<>(); - reqData1.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), PARTITION_MAX_BYTES)); - reqData1.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), PARTITION_MAX_BYTES)); + Set reqData1 = orderedSet(tp0, tp1); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); @@ -277,12 +269,10 @@ public void testNewContextReturnsFinalContextError() { ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId, ShareRequestMetadata.FINAL_EPOCH); - // shareFetch is not empty and the maxBytes of topic partition is not 0, which means this is trying to fetch on a Final request. - // New context should throw an error - Map reqData3 = Collections.singletonMap(new TopicIdPartition(tpId1, new TopicPartition("foo", 0)), - new ShareFetchRequest.SharePartitionData(tpId1, PARTITION_MAX_BYTES)); - assertThrows(InvalidRequestException.class, - () -> sharePartitionManager.newContext(groupId, reqData3, Collections.emptyList(), reqMetadata2, true)); + // shareFetch is not empty, and it contains tpId1, which should return FinalContext instance since it is FINAL_EPOCH + Set reqData3 = orderedSet(new TopicIdPartition(tpId1, new TopicPartition("foo", 0))); + assertInstanceOf(FinalContext.class, + sharePartitionManager.newContext(groupId, reqData3, Collections.emptyList(), reqMetadata2, true)); } @Test @@ -305,20 +295,14 @@ public void testNewContext() { String groupId = "grp"; // Create a new share session with an initial share fetch request - Map reqData2 = new LinkedHashMap<>(); - reqData2.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), 100)); - reqData2.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), 100)); - + Set reqData2 = orderedSet(tp0, tp1); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); assertInstanceOf(ShareSessionContext.class, context2); assertFalse(((ShareSessionContext) context2).isSubsequent()); - ((ShareSessionContext) context2).shareFetchData().forEach((topicIdPartition, sharePartitionData) -> { - assertTrue(reqData2.containsKey(topicIdPartition)); - assertEquals(reqData2.get(topicIdPartition), sharePartitionData); - }); + ((ShareSessionContext) context2).shareFetchData().forEach(topicIdPartition -> assertTrue(reqData2.contains(topicIdPartition))); LinkedHashMap respData2 = new LinkedHashMap<>(); respData2.put(tp0, new ShareFetchResponseData.PartitionData().setPartitionIndex(0)); @@ -341,7 +325,7 @@ public void testNewContext() { new ShareRequestMetadata(memberId4, 1), true)); // Continue the first share session we created. - ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context5); assertTrue(((ShareSessionContext) context5).isSubsequent()); @@ -351,9 +335,7 @@ public void testNewContext() { shareSessionContext5.session().partitionMap().forEach(cachedSharePartition -> { TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), new TopicPartition(cachedSharePartition.topic(), cachedSharePartition.partition())); - ShareFetchRequest.SharePartitionData data = cachedSharePartition.reqData(); - assertTrue(reqData2.containsKey(topicIdPartition)); - assertEquals(reqData2.get(topicIdPartition), data); + assertTrue(reqData2.contains(topicIdPartition)); }); } ShareFetchResponse resp5 = context5.updateAndGenerateResponseData(groupId, reqMetadata2.memberId(), respData2); @@ -365,14 +347,14 @@ public void testNewContext() { new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test generating a throttled response for a subsequent share session - ShareFetchContext context7 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + ShareFetchContext context7 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); ShareFetchResponse resp7 = context7.throttleResponse(100); assertEquals(Errors.NONE, resp7.error()); assertEquals(100, resp7.throttleTimeMs()); // Get the final share session. - ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); assertEquals(FinalContext.class, context8.getClass()); assertEquals(1, cache.size()); @@ -407,9 +389,7 @@ public void testShareSessionExpiration() { TopicIdPartition foo1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); // Create a new share session, session 1 - Map session1req = new LinkedHashMap<>(); - session1req.put(foo0, new ShareFetchRequest.SharePartitionData(foo0.topicId(), 100)); - session1req.put(foo1, new ShareFetchRequest.SharePartitionData(foo1.topicId(), 100)); + Set session1req = orderedSet(foo0, foo1); String groupId = "grp"; ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); @@ -432,9 +412,7 @@ public void testShareSessionExpiration() { time.sleep(500); // Create a second new share session - Map session2req = new LinkedHashMap<>(); - session2req.put(foo0, new ShareFetchRequest.SharePartitionData(foo0.topicId(), 100)); - session2req.put(foo1, new ShareFetchRequest.SharePartitionData(foo1.topicId(), 100)); + Set session2req = orderedSet(foo0, foo1); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); @@ -458,7 +436,7 @@ public void testShareSessionExpiration() { time.sleep(500); // Create a subsequent share fetch context for session 1 - ShareFetchContext session1context2 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + ShareFetchContext session1context2 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, session1context2); @@ -468,9 +446,7 @@ public void testShareSessionExpiration() { // create one final share session to test that the least recently used entry is evicted // the second share session should be evicted because the first share session was incrementally fetched // more recently than the second session was created - Map session3req = new LinkedHashMap<>(); - session3req.put(foo0, new ShareFetchRequest.SharePartitionData(foo0.topicId(), 100)); - session3req.put(foo1, new ShareFetchRequest.SharePartitionData(foo1.topicId(), 100)); + Set session3req = orderedSet(foo0, foo1); ShareRequestMetadata reqMetadata3 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); @@ -504,10 +480,8 @@ public void testSubsequentShareSession() { TopicIdPartition tp1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); TopicIdPartition tp2 = new TopicIdPartition(barId, new TopicPartition("bar", 0)); - // Create a new share session with foo-0 and foo-1 - Map reqData1 = new LinkedHashMap<>(); - reqData1.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), 100)); - reqData1.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), 100)); + // Create a new share session with tp0 and tp1 + Set reqData1 = orderedSet(tp0, tp1); String groupId = "grp"; ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); @@ -524,8 +498,7 @@ public void testSubsequentShareSession() { assertEquals(2, resp1.responseData(topicNames).size()); // Create a subsequent fetch request that removes foo-0 and adds bar-0 - Map reqData2 = Collections.singletonMap( - tp2, new ShareFetchRequest.SharePartitionData(tp2.topicId(), 100)); + Set reqData2 = orderedSet(tp2); List removed2 = new ArrayList<>(); removed2.add(tp0); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, removed2, @@ -571,9 +544,7 @@ public void testZeroSizeShareSession() { TopicIdPartition foo1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); // Create a new share session with foo-0 and foo-1 - Map reqData1 = new LinkedHashMap<>(); - reqData1.put(foo0, new ShareFetchRequest.SharePartitionData(foo0.topicId(), 100)); - reqData1.put(foo1, new ShareFetchRequest.SharePartitionData(foo1.topicId(), 100)); + Set reqData1 = orderedSet(foo0, foo1); String groupId = "grp"; ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); @@ -594,7 +565,7 @@ public void testZeroSizeShareSession() { List removed2 = new ArrayList<>(); removed2.add(foo0); removed2.add(foo1); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), removed2, + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptySet(), removed2, new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context2); @@ -619,10 +590,7 @@ public void testToForgetPartitions() { ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - Map reqData1 = new LinkedHashMap<>(); - reqData1.put(foo, new ShareFetchRequest.SharePartitionData(foo.topicId(), 100)); - reqData1.put(bar, new ShareFetchRequest.SharePartitionData(bar.topicId(), 100)); - + Set reqData1 = orderedSet(foo, bar); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); assertInstanceOf(ShareSessionContext.class, context1); @@ -630,7 +598,7 @@ public void testToForgetPartitions() { mockUpdateAndGenerateResponseData(context1, groupId, reqMetadata1.memberId()); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), Collections.singletonList(foo), + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptySet(), Collections.singletonList(foo), new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); // So foo is removed but not the others. @@ -638,7 +606,7 @@ public void testToForgetPartitions() { mockUpdateAndGenerateResponseData(context2, groupId, reqMetadata1.memberId()); - ShareFetchContext context3 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), Collections.singletonList(bar), + ShareFetchContext context3 = sharePartitionManager.newContext(groupId, Collections.emptySet(), Collections.singletonList(bar), new ShareRequestMetadata(reqMetadata1.memberId(), 2), true); assertPartitionsPresent((ShareSessionContext) context3, Collections.emptyList()); } @@ -663,9 +631,7 @@ public void testShareSessionUpdateTopicIdsBrokerSide() { topicNames.put(barId, "bar"); // Create a new share session with foo-0 and bar-1 - Map reqData1 = new LinkedHashMap<>(); - reqData1.put(foo, new ShareFetchRequest.SharePartitionData(foo.topicId(), 100)); - reqData1.put(bar, new ShareFetchRequest.SharePartitionData(bar.topicId(), 100)); + Set reqData1 = orderedSet(foo, bar); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); @@ -683,7 +649,7 @@ public void testShareSessionUpdateTopicIdsBrokerSide() { assertEquals(2, resp1.responseData(topicNames).size()); // Create a subsequent share fetch request as though no topics changed. - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context2); @@ -715,11 +681,7 @@ public void testGetErroneousAndValidTopicIdPartitions() { String groupId = "grp"; // Create a new share session with an initial share fetch request - Map reqData2 = new LinkedHashMap<>(); - reqData2.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), 100)); - reqData2.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), 100)); - reqData2.put(tpNull1, new ShareFetchRequest.SharePartitionData(tpNull1.topicId(), 100)); - + Set reqData2 = orderedSet(tp0, tp1, tpNull1); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); @@ -751,7 +713,7 @@ public void testGetErroneousAndValidTopicIdPartitions() { new ShareRequestMetadata(Uuid.randomUuid(), 1), true)); // Continue the first share session we created. - ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context5); assertTrue(((ShareSessionContext) context5).isSubsequent()); @@ -766,8 +728,7 @@ public void testGetErroneousAndValidTopicIdPartitions() { new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test generating a throttled response for a subsequent share session - Map reqData7 = Collections.singletonMap( - tpNull2, new ShareFetchRequest.SharePartitionData(tpNull2.topicId(), 100)); + Set reqData7 = orderedSet(tpNull2); ShareFetchContext context7 = sharePartitionManager.newContext(groupId, reqData7, EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); // Check for throttled response @@ -778,7 +739,7 @@ public void testGetErroneousAndValidTopicIdPartitions() { assertErroneousAndValidTopicIdPartitions(context7.getErroneousAndValidTopicIdPartitions(), Arrays.asList(tpNull1, tpNull2), Arrays.asList(tp0, tp1)); // Get the final share session. - ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); assertEquals(FinalContext.class, context8.getClass()); assertEquals(1, cache.size()); @@ -817,9 +778,7 @@ public void testShareFetchContextResponseSize() { String groupId = "grp"; // Create a new share session with an initial share fetch request - Map reqData2 = new LinkedHashMap<>(); - reqData2.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), 100)); - reqData2.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), 100)); + Set reqData2 = orderedSet(tp0, tp1); // For response size expected value calculation ObjectSerializationCache objectSerializationCache = new ObjectSerializationCache(); @@ -854,8 +813,7 @@ public void testShareFetchContextResponseSize() { new ShareRequestMetadata(memberId4, 1), true)); // Continue the first share session we created. - Map reqData5 = Collections.singletonMap( - tp2, new ShareFetchRequest.SharePartitionData(tp2.topicId(), 100)); + Set reqData5 = orderedSet(tp2); ShareFetchContext context5 = sharePartitionManager.newContext(groupId, reqData5, EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context5); @@ -875,7 +833,7 @@ public void testShareFetchContextResponseSize() { new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test generating a throttled response for a subsequent share session - ShareFetchContext context7 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + ShareFetchContext context7 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); int respSize7 = context7.responseSize(respData2, version); @@ -886,7 +844,7 @@ public void testShareFetchContextResponseSize() { assertEquals(4 + new ShareFetchResponseData().size(objectSerializationCache, version), respSize7); // Get the final share session. - ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); assertEquals(FinalContext.class, context8.getClass()); assertEquals(1, cache.size()); @@ -930,9 +888,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { Uuid memberId2 = Uuid.randomUuid(); // Create a new share session with an initial share fetch request. - Map reqData1 = new LinkedHashMap<>(); - reqData1.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), 100)); - reqData1.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), 100)); + Set reqData1 = orderedSet(tp0, tp1); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId1, ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); @@ -953,8 +909,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { new HashSet<>(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1))); // Create a new share session with an initial share fetch request. - Map reqData2 = Collections.singletonMap( - tp2, new ShareFetchRequest.SharePartitionData(tp2.topicId(), 100)); + Set reqData2 = orderedSet(tp2); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId2, ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); @@ -973,8 +928,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { assertEquals(Collections.singletonList(tp2), sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); // Continue the first share session we created. - Map reqData3 = Collections.singletonMap( - tp2, new ShareFetchRequest.SharePartitionData(tp2.topicId(), 100)); + Set reqData3 = orderedSet(tp2); ShareFetchContext context3 = sharePartitionManager.newContext(groupId, reqData3, EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey1.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context3); @@ -989,8 +943,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { new HashSet<>(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1))); // Continue the second session we created. - Map reqData4 = Collections.singletonMap( - tp3, new ShareFetchRequest.SharePartitionData(tp3.topicId(), 100)); + Set reqData4 = orderedSet(tp3); ShareFetchContext context4 = sharePartitionManager.newContext(groupId, reqData4, Collections.singletonList(tp2), new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context4); @@ -1004,7 +957,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { assertEquals(Collections.singletonList(tp3), sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); // Get the final share session. - ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata1.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); assertEquals(FinalContext.class, context5.getClass()); @@ -1019,7 +972,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { assertTrue(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1).isEmpty()); // Continue the second share session . - ShareFetchContext context6 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), Collections.singletonList(tp3), + ShareFetchContext context6 = sharePartitionManager.newContext(groupId, Collections.emptySet(), Collections.singletonList(tp3), new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); assertInstanceOf(ShareSessionContext.class, context6); assertTrue(((ShareSessionContext) context6).isSubsequent()); @@ -1067,7 +1020,7 @@ public void testMultipleSequentialShareFetches() { TopicIdPartition tp4 = new TopicIdPartition(fooId, new TopicPartition("foo", 2)); TopicIdPartition tp5 = new TopicIdPartition(barId, new TopicPartition("bar", 2)); TopicIdPartition tp6 = new TopicIdPartition(fooId, new TopicPartition("foo", 3)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1, tp2, tp3, tp4, tp5, tp6); + LinkedHashSet topicIdPartitions = orderedSet(tp0, tp1, tp2, tp3, tp4, tp5, tp6); mockFetchOffsetForTimestamp(mockReplicaManager); @@ -1090,22 +1043,22 @@ public void testMultipleSequentialShareFetches() { .withBrokerTopicStats(brokerTopicStats) .build(); - doAnswer(invocation -> buildLogReadResult(partitionMaxBytes.keySet())).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(topicIdPartitions)).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); CompletableFuture> future = sharePartitionManager.fetchMessages( - groupId, memberId1.toString(), FETCH_PARAMS, 1, BATCH_SIZE, partitionMaxBytes); + groupId, memberId1.toString(), FETCH_PARAMS, 1, BATCH_SIZE, topicIdPartitions); assertTrue(future.isDone()); Mockito.verify(mockReplicaManager, times(1)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); future = sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, 3, BATCH_SIZE, - partitionMaxBytes); + topicIdPartitions); assertTrue(future.isDone()); Mockito.verify(mockReplicaManager, times(2)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); future = sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, 10, BATCH_SIZE, - partitionMaxBytes); + topicIdPartitions); assertTrue(future.isDone()); Mockito.verify(mockReplicaManager, times(3)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); @@ -1129,7 +1082,7 @@ public void testMultipleConcurrentShareFetches() throws InterruptedException { TopicIdPartition tp1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); TopicIdPartition tp2 = new TopicIdPartition(barId, new TopicPartition("bar", 0)); TopicIdPartition tp3 = new TopicIdPartition(barId, new TopicPartition("bar", 1)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1, tp2, tp3); + LinkedHashSet topicIdPartitions = orderedSet(tp0, tp1, tp2, tp3); mockFetchOffsetForTimestamp(mockReplicaManager); @@ -1165,31 +1118,31 @@ public void testMultipleConcurrentShareFetches() throws InterruptedException { assertEquals(4, sp1.nextFetchOffset()); assertEquals(10, sp2.nextFetchOffset()); assertEquals(20, sp3.nextFetchOffset()); - return buildLogReadResult(partitionMaxBytes.keySet()); + return buildLogReadResult(topicIdPartitions); }).doAnswer(invocation -> { assertEquals(15, sp0.nextFetchOffset()); assertEquals(1, sp1.nextFetchOffset()); assertEquals(25, sp2.nextFetchOffset()); assertEquals(15, sp3.nextFetchOffset()); - return buildLogReadResult(partitionMaxBytes.keySet()); + return buildLogReadResult(topicIdPartitions); }).doAnswer(invocation -> { assertEquals(6, sp0.nextFetchOffset()); assertEquals(18, sp1.nextFetchOffset()); assertEquals(26, sp2.nextFetchOffset()); assertEquals(23, sp3.nextFetchOffset()); - return buildLogReadResult(partitionMaxBytes.keySet()); + return buildLogReadResult(topicIdPartitions); }).doAnswer(invocation -> { assertEquals(30, sp0.nextFetchOffset()); assertEquals(5, sp1.nextFetchOffset()); assertEquals(26, sp2.nextFetchOffset()); assertEquals(16, sp3.nextFetchOffset()); - return buildLogReadResult(partitionMaxBytes.keySet()); + return buildLogReadResult(topicIdPartitions); }).doAnswer(invocation -> { assertEquals(25, sp0.nextFetchOffset()); assertEquals(5, sp1.nextFetchOffset()); assertEquals(26, sp2.nextFetchOffset()); assertEquals(16, sp3.nextFetchOffset()); - return buildLogReadResult(partitionMaxBytes.keySet()); + return buildLogReadResult(topicIdPartitions); }).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); int threadCount = 100; @@ -1199,7 +1152,7 @@ public void testMultipleConcurrentShareFetches() throws InterruptedException { for (int i = 0; i != threadCount; ++i) { executorService.submit(() -> { sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); }); // We are blocking the main thread at an interval of 10 threads so that the currently running executorService threads can complete. if (i % 10 == 0) @@ -1222,7 +1175,7 @@ public void testReplicaManagerFetchShouldNotProceed() { Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0); + LinkedHashSet topicIdPartitions = orderedSet(tp0); SharePartition sp0 = mock(SharePartition.class); when(sp0.maybeAcquireFetchLock()).thenReturn(true); @@ -1246,7 +1199,7 @@ public void testReplicaManagerFetchShouldNotProceed() { CompletableFuture> future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); Mockito.verify(mockReplicaManager, times(0)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); Map result = future.join(); @@ -1265,7 +1218,7 @@ public void testReplicaManagerFetchShouldProceed() { Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0); + LinkedHashSet topicIdPartitions = orderedSet(tp0); mockFetchOffsetForTimestamp(mockReplicaManager); @@ -1282,10 +1235,10 @@ public void testReplicaManagerFetchShouldProceed() { .withBrokerTopicStats(brokerTopicStats) .build(); - doAnswer(invocation -> buildLogReadResult(partitionMaxBytes.keySet())).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(topicIdPartitions)).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, BATCH_SIZE, - partitionMaxBytes); + topicIdPartitions); // Since the nextFetchOffset does not point to endOffset + 1, i.e. some of the records in the cachedState are AVAILABLE, // even though the maxInFlightMessages limit is exceeded, replicaManager.readFromLog should be called Mockito.verify(mockReplicaManager, times(1)).readFromLog( @@ -1757,7 +1710,7 @@ public void testAcknowledgeCompletesDelayedShareFetchRequest() { TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo1", 0)); TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp1, tp2); + LinkedHashSet topicIdPartitions = orderedSet(tp1, tp2); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); @@ -1780,7 +1733,7 @@ public void testAcknowledgeCompletesDelayedShareFetchRequest() { groupId, Uuid.randomUuid().toString(), new CompletableFuture<>(), - partitionMaxBytes, + topicIdPartitions, BATCH_SIZE, 100, brokerTopicStats); @@ -1802,7 +1755,7 @@ public void testAcknowledgeCompletesDelayedShareFetchRequest() { when(sp2.acquire(anyString(), anyInt(), anyInt(), anyLong(), any())).thenReturn(ShareAcquiredRecords.empty()); List delayedShareFetchWatchKeys = new ArrayList<>(); - partitionMaxBytes.keySet().forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); + topicIdPartitions.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCacheMap(partitionCacheMap) @@ -1827,7 +1780,7 @@ public void testAcknowledgeCompletesDelayedShareFetchRequest() { // Since acquisition lock for sp1 and sp2 cannot be acquired, we should have 2 watched keys. assertEquals(2, delayedShareFetchPurgatory.watched()); - doAnswer(invocation -> buildLogReadResult(partitionMaxBytes.keySet())).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(topicIdPartitions)).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); Map> acknowledgeTopics = new HashMap<>(); acknowledgeTopics.put(tp1, Arrays.asList( @@ -1861,7 +1814,7 @@ public void testAcknowledgeDoesNotCompleteDelayedShareFetchRequest() { TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp1, tp2); + LinkedHashSet topicIdPartitions = orderedSet(tp1, tp2); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); @@ -1891,7 +1844,7 @@ public void testAcknowledgeDoesNotCompleteDelayedShareFetchRequest() { groupId, Uuid.randomUuid().toString(), new CompletableFuture<>(), - partitionMaxBytes, + topicIdPartitions, BATCH_SIZE, 100, brokerTopicStats); @@ -1911,7 +1864,7 @@ public void testAcknowledgeDoesNotCompleteDelayedShareFetchRequest() { when(sp3.canAcquireRecords()).thenReturn(false); List delayedShareFetchWatchKeys = new ArrayList<>(); - partitionMaxBytes.keySet().forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); + topicIdPartitions.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCacheMap(partitionCacheMap) @@ -1967,7 +1920,7 @@ public void testReleaseSessionCompletesDelayedShareFetchRequest() { TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp1, tp2); + LinkedHashSet topicIdPartitions = orderedSet(tp1, tp2); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); @@ -1995,7 +1948,7 @@ public void testReleaseSessionCompletesDelayedShareFetchRequest() { groupId, Uuid.randomUuid().toString(), new CompletableFuture<>(), - partitionMaxBytes, + topicIdPartitions, BATCH_SIZE, 100, brokerTopicStats); @@ -2015,7 +1968,7 @@ public void testReleaseSessionCompletesDelayedShareFetchRequest() { when(sp2.canAcquireRecords()).thenReturn(false); List delayedShareFetchWatchKeys = new ArrayList<>(); - partitionMaxBytes.keySet().forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); + topicIdPartitions.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); sharePartitionManager = spy(SharePartitionManagerBuilder.builder() .withPartitionCacheMap(partitionCacheMap) @@ -2067,7 +2020,7 @@ public void testReleaseSessionDoesNotCompleteDelayedShareFetchRequest() { TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp1, tp2); + LinkedHashSet topicIdPartitions = orderedSet(tp1, tp2); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); @@ -2101,7 +2054,7 @@ public void testReleaseSessionDoesNotCompleteDelayedShareFetchRequest() { groupId, Uuid.randomUuid().toString(), new CompletableFuture<>(), - partitionMaxBytes, + topicIdPartitions, BATCH_SIZE, 100, brokerTopicStats); @@ -2121,7 +2074,7 @@ public void testReleaseSessionDoesNotCompleteDelayedShareFetchRequest() { when(sp3.canAcquireRecords()).thenReturn(false); List delayedShareFetchWatchKeys = new ArrayList<>(); - partitionMaxBytes.keySet().forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); + topicIdPartitions.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); sharePartitionManager = spy(SharePartitionManagerBuilder.builder() .withPartitionCacheMap(partitionCacheMap) @@ -2169,7 +2122,7 @@ public void testPendingInitializationShouldCompleteFetchRequest() throws Excepti Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0); + LinkedHashSet topicIdPartitions = orderedSet(tp0); SharePartition sp0 = mock(SharePartition.class); Map partitionCacheMap = new HashMap<>(); @@ -2200,7 +2153,7 @@ public void testPendingInitializationShouldCompleteFetchRequest() throws Excepti CompletableFuture> future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); // Verify that the fetch request is completed. TestUtils.waitForCondition( future::isDone, @@ -2233,7 +2186,7 @@ public void testPartitionLoadTimeMetricWithMultiplePartitions() throws Exception String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1); + LinkedHashSet topicIdPartitions = orderedSet(tp0, tp1); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -2270,7 +2223,7 @@ public void testPartitionLoadTimeMetricWithMultiplePartitions() throws Exception CompletableFuture> future = sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); // Verify that the fetch request is completed. TestUtils.waitForCondition( future::isDone, @@ -2302,7 +2255,7 @@ public void testDelayedInitializationShouldCompleteFetchRequest() { Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0); + LinkedHashSet topicIdPartitions = orderedSet(tp0); SharePartition sp0 = mock(SharePartition.class); Map partitionCacheMap = new HashMap<>(); @@ -2332,15 +2285,15 @@ public void testDelayedInitializationShouldCompleteFetchRequest() { // Send 3 requests for share fetch for same share partition. CompletableFuture> future1 = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); CompletableFuture> future2 = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); CompletableFuture> future3 = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); Mockito.verify(sp0, times(3)).maybeInitialize(); Mockito.verify(mockReplicaManager, times(3)).addDelayedShareFetchRequest(any(), any()); @@ -2376,7 +2329,7 @@ public void testSharePartitionInitializationExceptions() throws Exception { Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0); + LinkedHashSet topicIdPartitions = orderedSet(tp0); SharePartition sp0 = mock(SharePartition.class); Map partitionCacheMap = new HashMap<>(); @@ -2399,7 +2352,7 @@ public void testSharePartitionInitializationExceptions() throws Exception { when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new LeaderNotAvailableException("Leader not available"))); CompletableFuture> future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, @@ -2415,7 +2368,7 @@ public void testSharePartitionInitializationExceptions() throws Exception { // Return IllegalStateException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new IllegalStateException("Illegal state"))); future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, @@ -2429,7 +2382,7 @@ public void testSharePartitionInitializationExceptions() throws Exception { // Return CoordinatorNotAvailableException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new CoordinatorNotAvailableException("Coordinator not available"))); future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, @@ -2443,7 +2396,7 @@ public void testSharePartitionInitializationExceptions() throws Exception { // Return InvalidRequestException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new InvalidRequestException("Invalid request"))); future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, @@ -2457,7 +2410,7 @@ public void testSharePartitionInitializationExceptions() throws Exception { // Return FencedStateEpochException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new FencedStateEpochException("Fenced state epoch"))); future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, @@ -2471,7 +2424,7 @@ public void testSharePartitionInitializationExceptions() throws Exception { // Return NotLeaderOrFollowerException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new NotLeaderOrFollowerException("Not leader or follower"))); future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, @@ -2485,7 +2438,7 @@ public void testSharePartitionInitializationExceptions() throws Exception { // Return RuntimeException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new RuntimeException("Runtime exception"))); future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, @@ -2507,7 +2460,7 @@ public void testSharePartitionInitializationExceptions() throws Exception { public void testShareFetchProcessingExceptions() throws Exception { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0); + LinkedHashSet topicIdPartitions = orderedSet(tp0); Map partitionCacheMap = (Map) mock(Map.class); // Throw the exception for first fetch request. Return share partition for next. @@ -2521,7 +2474,7 @@ public void testShareFetchProcessingExceptions() throws Exception { CompletableFuture> future = sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, @@ -2539,7 +2492,7 @@ public void testShareFetchProcessingExceptions() throws Exception { public void testSharePartitionInitializationFailure() throws Exception { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0); + LinkedHashSet topicIdPartitions = orderedSet(tp0); // Send map to check no share partition is created. Map partitionCacheMap = new HashMap<>(); @@ -2562,7 +2515,7 @@ public void testSharePartitionInitializationFailure() throws Exception { // Validate when exception is thrown. CompletableFuture> future = sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, @@ -2572,7 +2525,7 @@ public void testSharePartitionInitializationFailure() throws Exception { // Validate when partition is not leader. future = sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, @@ -2597,7 +2550,7 @@ public void testSharePartitionPartialInitializationFailure() throws Exception { TopicIdPartition tp1 = new TopicIdPartition(memberId1, new TopicPartition("foo", 1)); // For tp2, share partition initialization will fail. TopicIdPartition tp2 = new TopicIdPartition(memberId1, new TopicPartition("foo", 2)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1, tp2); + LinkedHashSet topicIdPartitions = orderedSet(tp0, tp1, tp2); // Mark partition0 as not the leader. Partition partition0 = mock(Partition.class); @@ -2641,7 +2594,7 @@ public void testSharePartitionPartialInitializationFailure() throws Exception { // Validate when exception is thrown. CompletableFuture> future = sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); assertTrue(future.isDone()); assertFalse(future.isCompletedExceptionally()); @@ -2672,7 +2625,7 @@ public void testReplicaManagerFetchException() { String groupId = "grp"; Uuid memberId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0); + LinkedHashSet topicIdPartitions = orderedSet(tp0); SharePartition sp0 = mock(SharePartition.class); when(sp0.maybeAcquireFetchLock()).thenReturn(true); @@ -2698,7 +2651,7 @@ public void testReplicaManagerFetchException() { CompletableFuture> future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); validateShareFetchFutureException(future, tp0, Errors.UNKNOWN_SERVER_ERROR, "Exception"); // Verify that the share partition is still in the cache on exception. assertEquals(1, partitionCacheMap.size()); @@ -2707,7 +2660,7 @@ public void testReplicaManagerFetchException() { doThrow(new NotLeaderOrFollowerException("Leader exception")).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); validateShareFetchFutureException(future, tp0, Errors.NOT_LEADER_OR_FOLLOWER, "Leader exception"); assertTrue(partitionCacheMap.isEmpty()); // Should have 2 fetch recorded and 2 failures. @@ -2725,7 +2678,7 @@ public void testReplicaManagerFetchMultipleSharePartitionsException() { TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 0)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1); + LinkedHashSet topicIdPartitions = orderedSet(tp0, tp1); SharePartition sp0 = mock(SharePartition.class); when(sp0.maybeAcquireFetchLock()).thenReturn(true); @@ -2760,7 +2713,7 @@ public void testReplicaManagerFetchMultipleSharePartitionsException() { CompletableFuture> future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); validateShareFetchFutureException(future, tp0, Errors.FENCED_STATE_EPOCH, "Fenced exception"); // Verify that tp1 is still in the cache on exception. assertEquals(1, partitionCacheMap.size()); @@ -2775,7 +2728,7 @@ public void testReplicaManagerFetchMultipleSharePartitionsException() { doThrow(new FencedStateEpochException("Fenced exception again")).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - BATCH_SIZE, partitionMaxBytes); + BATCH_SIZE, topicIdPartitions); validateShareFetchFutureException(future, List.of(tp0, tp1), Errors.FENCED_STATE_EPOCH, "Fenced exception again"); assertTrue(partitionCacheMap.isEmpty()); // Should have 4 fetch recorded (2 fetch and 2 topics) and 3 failures as sp1 was not acquired @@ -2795,7 +2748,7 @@ public void testListenerRegistration() { TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 0)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, tp0, tp1); + LinkedHashSet topicIdPartitions = orderedSet(tp0, tp1); ReplicaManager mockReplicaManager = mock(ReplicaManager.class); Partition partition = mockPartition(); @@ -2807,7 +2760,7 @@ public void testListenerRegistration() { .build(); CompletableFuture> future = sharePartitionManager.fetchMessages( - groupId, memberId.toString(), FETCH_PARAMS, 0, BATCH_SIZE, partitionMaxBytes); + groupId, memberId.toString(), FETCH_PARAMS, 0, BATCH_SIZE, topicIdPartitions); assertTrue(future.isDone()); // Validate that the listener is registered. verify(mockReplicaManager, times(2)).maybeAddListener(any(), any()); @@ -2864,50 +2817,49 @@ public void testFetchMessagesRotatePartitions() { TopicIdPartition tp4 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); TopicIdPartition tp5 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 2)); TopicIdPartition tp6 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 3)); - LinkedHashMap partitionMaxBytes = orderedMap(PARTITION_MAX_BYTES, - tp0, tp1, tp2, tp3, tp4, tp5, tp6); + LinkedHashSet topicIdPartitions = orderedSet(tp0, tp1, tp2, tp3, tp4, tp5, tp6); sharePartitionManager = Mockito.spy(SharePartitionManagerBuilder.builder().withBrokerTopicStats(brokerTopicStats).build()); // Capture the arguments passed to processShareFetch. ArgumentCaptor captor = ArgumentCaptor.forClass(ShareFetch.class); sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, 0, BATCH_SIZE, - partitionMaxBytes); + topicIdPartitions); verify(sharePartitionManager, times(1)).processShareFetch(captor.capture()); // Verify the partitions rotation, no rotation. ShareFetch resultShareFetch = captor.getValue(); - validateRotatedMapEquals(resultShareFetch.partitionMaxBytes(), partitionMaxBytes, 0); + validateRotatedMapEquals(resultShareFetch.topicIdPartitions(), topicIdPartitions, 0); // Single rotation. sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, 1, BATCH_SIZE, - partitionMaxBytes); + topicIdPartitions); verify(sharePartitionManager, times(2)).processShareFetch(captor.capture()); // Verify the partitions rotation, rotate by 1. resultShareFetch = captor.getValue(); - validateRotatedMapEquals(partitionMaxBytes, resultShareFetch.partitionMaxBytes(), 1); + validateRotatedMapEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 1); // Rotation by 3, less that the number of partitions. sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, 3, BATCH_SIZE, - partitionMaxBytes); + topicIdPartitions); verify(sharePartitionManager, times(3)).processShareFetch(captor.capture()); // Verify the partitions rotation, rotate by 3. resultShareFetch = captor.getValue(); - validateRotatedMapEquals(partitionMaxBytes, resultShareFetch.partitionMaxBytes(), 3); + validateRotatedMapEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 3); // Rotation by 12, more than the number of partitions. sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, 12, BATCH_SIZE, - partitionMaxBytes); + topicIdPartitions); verify(sharePartitionManager, times(4)).processShareFetch(captor.capture()); // Verify the partitions rotation, rotate by 5 (12 % 7). resultShareFetch = captor.getValue(); - validateRotatedMapEquals(partitionMaxBytes, resultShareFetch.partitionMaxBytes(), 5); + validateRotatedMapEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 5); // Rotation by Integer.MAX_VALUE, boundary test. sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, Integer.MAX_VALUE, BATCH_SIZE, - partitionMaxBytes); + topicIdPartitions); verify(sharePartitionManager, times(5)).processShareFetch(captor.capture()); // Verify the partitions rotation, rotate by 1 (2147483647 % 7). resultShareFetch = captor.getValue(); - validateRotatedMapEquals(partitionMaxBytes, resultShareFetch.partitionMaxBytes(), 1); + validateRotatedMapEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 1); } private Timer systemTimerReaper() { @@ -2969,7 +2921,7 @@ private void mockUpdateAndGenerateResponseData(ShareFetchContext context, String if (context.getClass() == ShareSessionContext.class) { ShareSessionContext shareSessionContext = (ShareSessionContext) context; if (!shareSessionContext.isSubsequent()) { - shareSessionContext.shareFetchData().forEach((topicIdPartition, sharePartitionData) -> data.put(topicIdPartition, + shareSessionContext.shareFetchData().forEach(topicIdPartition -> data.put(topicIdPartition, topicIdPartition.topic() == null ? errorShareFetchResponse(Errors.UNKNOWN_TOPIC_ID.code()) : noErrorShareFetchResponse())); } else { @@ -2989,8 +2941,7 @@ private void mockUpdateAndGenerateResponseData(ShareFetchContext context, String private void assertPartitionsPresent(ShareSessionContext context, List partitions) { Set partitionsInContext = new HashSet<>(); if (!context.isSubsequent()) { - context.shareFetchData().forEach((topicIdPartition, sharePartitionData) -> - partitionsInContext.add(topicIdPartition)); + partitionsInContext.addAll(context.shareFetchData()); } else { context.session().partitionMap().forEach(cachedSharePartition -> { TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), new @@ -3008,11 +2959,9 @@ private void assertErroneousAndValidTopicIdPartitions( Set expectedErroneousSet = new HashSet<>(expectedErroneous); Set expectedValidSet = new HashSet<>(expectedValid); Set actualErroneousPartitions = new HashSet<>(); - Set actualValidPartitions = new HashSet<>(); erroneousAndValidPartitionData.erroneous().forEach((topicIdPartition, partitionData) -> actualErroneousPartitions.add(topicIdPartition)); - erroneousAndValidPartitionData.validTopicIdPartitions().forEach((topicIdPartition, partitionData) -> - actualValidPartitions.add(topicIdPartition)); + Set actualValidPartitions = new HashSet<>(erroneousAndValidPartitionData.validTopicIdPartitions()); assertEquals(expectedErroneousSet, actualErroneousPartitions); assertEquals(expectedValidSet, actualValidPartitions); } diff --git a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala index 5dbfca8d3354a..ba55c687631ca 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala @@ -3918,10 +3918,8 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, shareSessionEpoch), Map( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> - new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes) - ).asJava) + new ShareSessionContext(new ShareRequestMetadata(memberId, shareSessionEpoch), Set( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex))).asJava) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( @@ -3991,7 +3989,7 @@ class KafkaApisTest extends Logging { val cachedSharePartitions = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes), false)) + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), false)) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenThrow( Errors.INVALID_REQUEST.exception() @@ -4095,9 +4093,8 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Map( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> - new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes) + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Set( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) ).asJava) ).thenThrow(Errors.INVALID_REQUEST.exception) @@ -4187,9 +4184,8 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Map( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> - new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes) + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Set( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) ).asJava) ) @@ -4255,7 +4251,7 @@ class KafkaApisTest extends Logging { val cachedSharePartitions = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes), false)) + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), false)) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())) .thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( @@ -4319,7 +4315,7 @@ class KafkaApisTest extends Logging { val cachedSharePartitions = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes), false)) + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), false)) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())) .thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( @@ -4384,9 +4380,8 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Map( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> - new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes) + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Set( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) ).asJava) ) @@ -4454,9 +4449,8 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Map( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> - new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes) + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Set( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) ).asJava) ).thenThrow(Errors.SHARE_SESSION_NOT_FOUND.exception) @@ -4547,9 +4541,8 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Map( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> - new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes) + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Set( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) ).asJava) ).thenThrow(Errors.INVALID_SHARE_SESSION_EPOCH.exception) @@ -4685,13 +4678,12 @@ class KafkaApisTest extends Logging { val cachedSharePartitions = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes), false) + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), false) ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Map( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> - new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes) + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Set( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) ).asJava) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( new ShareSessionKey(groupId, memberId), cachedSharePartitions, 0L, 0L, 2)) @@ -4935,39 +4927,35 @@ class KafkaApisTest extends Logging { val cachedSharePartitions1 = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions1.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)), new ShareFetchRequest.SharePartitionData(topicId1, partitionMaxBytes), false + new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)), false )) cachedSharePartitions1.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)), new ShareFetchRequest.SharePartitionData(topicId1, partitionMaxBytes), false + new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)), false )) cachedSharePartitions1.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)), new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes), false + new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)), false )) cachedSharePartitions1.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)), new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes), false + new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)), false )) cachedSharePartitions1.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)), new ShareFetchRequest.SharePartitionData(topicId3, partitionMaxBytes), false + new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)), false )) val cachedSharePartitions2 = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions2.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)), new ShareFetchRequest.SharePartitionData(topicId3, partitionMaxBytes), false + new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)), false )) cachedSharePartitions2.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId4, new TopicPartition(topicName4, 0)), new ShareFetchRequest.SharePartitionData(topicId4, partitionMaxBytes), false + new TopicIdPartition(topicId4, new TopicPartition(topicName4, 0)), false )) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Map( - new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)) -> - new ShareFetchRequest.SharePartitionData(topicId1, partitionMaxBytes), - new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)) -> - new ShareFetchRequest.SharePartitionData(topicId1, partitionMaxBytes), - new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)) -> - new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes), - new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) -> - new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes) + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Set( + new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)), + new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)), + new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)), + new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) ).asJava) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( new ShareSessionKey(groupId, memberId), cachedSharePartitions1, 0L, 0L, 2)) @@ -5367,19 +5355,10 @@ class KafkaApisTest extends Logging { val erroneousPartitions: util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = new util.HashMap() - val validPartitions: util.Map[TopicIdPartition, ShareFetchRequest.SharePartitionData] = new util.HashMap() - validPartitions.put( - tp1, - new ShareFetchRequest.SharePartitionData(topicId1, partitionMaxBytes) - ) - validPartitions.put( - tp2, - new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes) - ) - validPartitions.put( - tp3, - new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes) - ) + val validPartitions: util.Set[TopicIdPartition] = new util.HashSet() + validPartitions.add(tp1) + validPartitions.add(tp2) + validPartitions.add(tp3) val erroneousAndValidPartitionData: ErroneousAndValidPartitionData = new ErroneousAndValidPartitionData(erroneousPartitions, validPartitions) @@ -5522,11 +5501,8 @@ class KafkaApisTest extends Logging { .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code) ) - val validPartitions: util.Map[TopicIdPartition, ShareFetchRequest.SharePartitionData] = new util.HashMap() - validPartitions.put( - tp1, - new ShareFetchRequest.SharePartitionData(topicId1, partitionMaxBytes) - ) + val validPartitions: util.Set[TopicIdPartition] = new util.HashSet() + validPartitions.add(tp1) val erroneousAndValidPartitionData: ErroneousAndValidPartitionData = new ErroneousAndValidPartitionData(erroneousPartitions, validPartitions) @@ -5658,19 +5634,10 @@ class KafkaApisTest extends Logging { val erroneousPartitions: util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = new util.HashMap() - val validPartitions: util.Map[TopicIdPartition, ShareFetchRequest.SharePartitionData] = new util.HashMap() - validPartitions.put( - tp1, - new ShareFetchRequest.SharePartitionData(topicId1, partitionMaxBytes) - ) - validPartitions.put( - tp2, - new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes) - ) - validPartitions.put( - tp3, - new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes) - ) + val validPartitions: util.Set[TopicIdPartition] = new util.HashSet() + validPartitions.add(tp1) + validPartitions.add(tp2) + validPartitions.add(tp3) val erroneousAndValidPartitionData: ErroneousAndValidPartitionData = new ErroneousAndValidPartitionData(erroneousPartitions, validPartitions) @@ -5817,23 +5784,11 @@ class KafkaApisTest extends Logging { val erroneousPartitions: util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = new util.HashMap() - val validPartitions: util.Map[TopicIdPartition, ShareFetchRequest.SharePartitionData] = new util.HashMap() - validPartitions.put( - tp1, - new ShareFetchRequest.SharePartitionData(topicId1, partitionMaxBytes) - ) - validPartitions.put( - tp2, - new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes) - ) - validPartitions.put( - tp3, - new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes) - ) - validPartitions.put( - tp4, - new ShareFetchRequest.SharePartitionData(topicId3, partitionMaxBytes) - ) + val validPartitions: util.Set[TopicIdPartition] = new util.HashSet() + validPartitions.add(tp1) + validPartitions.add(tp2) + validPartitions.add(tp3) + validPartitions.add(tp4) val erroneousAndValidPartitionData: ErroneousAndValidPartitionData = new ErroneousAndValidPartitionData(erroneousPartitions, validPartitions) @@ -6013,13 +5968,12 @@ class KafkaApisTest extends Logging { val cachedSharePartitions = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId, new TopicPartition(topicName, 0)), new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes), false + new TopicIdPartition(topicId, new TopicPartition(topicName, 0)), false )) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Map( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> - new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes) + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Set( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) ).asJava) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( new ShareSessionKey(groupId, memberId), cachedSharePartitions, 0L, 0L, 2)) diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala index 59d9b4b1a6361..f816da4358de0 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala @@ -6083,8 +6083,8 @@ class ReplicaManagerTest { try { val groupId = "grp" val tp1 = new TopicIdPartition(Uuid.randomUuid, new TopicPartition("foo1", 0)) - val partitionMaxBytes = new util.LinkedHashMap[TopicIdPartition, Integer] - partitionMaxBytes.put(tp1, 1000) + val topicPartitions = new util.LinkedHashSet[TopicIdPartition] + topicPartitions.add(tp1) val sp1 = mock(classOf[SharePartition]) val sharePartitions = new util.LinkedHashMap[TopicIdPartition, SharePartition] @@ -6096,7 +6096,7 @@ class ReplicaManagerTest { groupId, Uuid.randomUuid.toString, future, - partitionMaxBytes, + topicPartitions, 500, 100, brokerTopicStats) @@ -6110,7 +6110,7 @@ class ReplicaManagerTest { time)) val delayedShareFetchWatchKeys : util.List[DelayedShareFetchKey] = new util.ArrayList[DelayedShareFetchKey] - partitionMaxBytes.keySet.forEach((topicIdPartition: TopicIdPartition) => delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId, topicIdPartition.partition))) + topicPartitions.forEach((topicIdPartition: TopicIdPartition) => delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId, topicIdPartition.partition))) // You cannot acquire records for sp1, so request will be stored in purgatory waiting for timeout. when(sp1.maybeAcquireFetchLock).thenReturn(false) diff --git a/server/src/main/java/org/apache/kafka/server/share/CachedSharePartition.java b/server/src/main/java/org/apache/kafka/server/share/CachedSharePartition.java index aab42c901ddba..8cfc936503969 100644 --- a/server/src/main/java/org/apache/kafka/server/share/CachedSharePartition.java +++ b/server/src/main/java/org/apache/kafka/server/share/CachedSharePartition.java @@ -21,7 +21,6 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; @@ -42,33 +41,30 @@ public class CachedSharePartition implements ImplicitLinkedHashCollection.Elemen private final Uuid topicId; private final int partition; private final Optional leaderEpoch; - private int maxBytes; private boolean requiresUpdateInResponse; private int cachedNext = ImplicitLinkedHashCollection.INVALID_INDEX; private int cachedPrev = ImplicitLinkedHashCollection.INVALID_INDEX; - private CachedSharePartition(String topic, Uuid topicId, int partition, int maxBytes, Optional leaderEpoch, + private CachedSharePartition(String topic, Uuid topicId, int partition, Optional leaderEpoch, boolean requiresUpdateInResponse) { this.topic = topic; this.topicId = topicId; this.partition = partition; - this.maxBytes = maxBytes; this.leaderEpoch = leaderEpoch; this.requiresUpdateInResponse = requiresUpdateInResponse; } public CachedSharePartition(String topic, Uuid topicId, int partition, boolean requiresUpdateInResponse) { - this(topic, topicId, partition, -1, Optional.empty(), requiresUpdateInResponse); + this(topic, topicId, partition, Optional.empty(), requiresUpdateInResponse); } public CachedSharePartition(TopicIdPartition topicIdPartition) { this(topicIdPartition.topic(), topicIdPartition.topicId(), topicIdPartition.partition(), false); } - public CachedSharePartition(TopicIdPartition topicIdPartition, ShareFetchRequest.SharePartitionData reqData, - boolean requiresUpdateInResponse) { - this(topicIdPartition.topic(), topicIdPartition.topicId(), topicIdPartition.partition(), reqData.maxBytes, + public CachedSharePartition(TopicIdPartition topicIdPartition, boolean requiresUpdateInResponse) { + this(topicIdPartition.topic(), topicIdPartition.topicId(), topicIdPartition.partition(), Optional.empty(), requiresUpdateInResponse); } @@ -84,15 +80,6 @@ public int partition() { return partition; } - public ShareFetchRequest.SharePartitionData reqData() { - return new ShareFetchRequest.SharePartitionData(topicId, maxBytes); - } - - public void updateRequestParams(ShareFetchRequest.SharePartitionData reqData) { - // Update our cached request parameters. - maxBytes = reqData.maxBytes; - } - /** * Determine whether the specified cached partition should be included in the ShareFetchResponse we send back to * the fetcher and update it if requested. @@ -128,7 +115,6 @@ public String toString() { return "CachedSharePartition(topic=" + topic + ", topicId=" + topicId + ", partition=" + partition + - ", maxBytes=" + maxBytes + ", leaderEpoch=" + leaderEpoch + ")"; } diff --git a/server/src/main/java/org/apache/kafka/server/share/ErroneousAndValidPartitionData.java b/server/src/main/java/org/apache/kafka/server/share/ErroneousAndValidPartitionData.java index 9918fbdf48e30..90d9fd511cba5 100644 --- a/server/src/main/java/org/apache/kafka/server/share/ErroneousAndValidPartitionData.java +++ b/server/src/main/java/org/apache/kafka/server/share/ErroneousAndValidPartitionData.java @@ -20,47 +20,48 @@ import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareFetchResponse; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; +import java.util.Set; /** * Helper class to return the erroneous partitions and valid partition data */ public class ErroneousAndValidPartitionData { private final Map erroneous; - private final Map validTopicIdPartitions; + private final Set validTopicIdPartitions; public ErroneousAndValidPartitionData(Map erroneous, - Map validTopicIdPartitions) { + Set validTopicIdPartitions) { this.erroneous = erroneous; this.validTopicIdPartitions = validTopicIdPartitions; } - public ErroneousAndValidPartitionData(Map shareFetchData) { + public ErroneousAndValidPartitionData(Set shareFetchData) { erroneous = new HashMap<>(); - validTopicIdPartitions = new HashMap<>(); - shareFetchData.forEach((topicIdPartition, sharePartitionData) -> { + validTopicIdPartitions = new HashSet<>(); + shareFetchData.forEach(topicIdPartition -> { if (topicIdPartition.topic() == null) { erroneous.put(topicIdPartition, ShareFetchResponse.partitionResponse(topicIdPartition, Errors.UNKNOWN_TOPIC_ID)); } else { - validTopicIdPartitions.put(topicIdPartition, sharePartitionData); + validTopicIdPartitions.add(topicIdPartition); } }); } public ErroneousAndValidPartitionData() { this.erroneous = new HashMap<>(); - this.validTopicIdPartitions = new HashMap<>(); + this.validTopicIdPartitions = new HashSet<>(); } public Map erroneous() { return erroneous; } - public Map validTopicIdPartitions() { + public Set validTopicIdPartitions() { return validTopicIdPartitions; } } diff --git a/server/src/main/java/org/apache/kafka/server/share/context/ShareSessionContext.java b/server/src/main/java/org/apache/kafka/server/share/context/ShareSessionContext.java index 18df489af338a..77388b9a32ae6 100644 --- a/server/src/main/java/org/apache/kafka/server/share/context/ShareSessionContext.java +++ b/server/src/main/java/org/apache/kafka/server/share/context/ShareSessionContext.java @@ -23,8 +23,6 @@ import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.requests.ShareFetchRequest; -import org.apache.kafka.common.requests.ShareFetchRequest.SharePartitionData; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.requests.ShareRequestMetadata; import org.apache.kafka.server.share.CachedSharePartition; @@ -36,11 +34,13 @@ import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Set; import java.util.NoSuchElementException; /** @@ -52,7 +52,7 @@ public class ShareSessionContext extends ShareFetchContext { private final ShareRequestMetadata reqMetadata; private final boolean isSubsequent; - private Map shareFetchData; + private Set shareFetchData; private ShareSession session; /** @@ -62,7 +62,7 @@ public class ShareSessionContext extends ShareFetchContext { * @param shareFetchData The share partition data from the share fetch request. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, - Map shareFetchData) { + Set shareFetchData) { this.reqMetadata = reqMetadata; this.shareFetchData = shareFetchData; this.isSubsequent = false; @@ -81,7 +81,7 @@ public ShareSessionContext(ShareRequestMetadata reqMetadata, ShareSession sessio } // Visible for testing - public Map shareFetchData() { + public Set shareFetchData() { return shareFetchData; } @@ -229,17 +229,16 @@ public ErroneousAndValidPartitionData getErroneousAndValidTopicIdPartitions() { return new ErroneousAndValidPartitionData(shareFetchData); } Map erroneous = new HashMap<>(); - Map valid = new HashMap<>(); + Set valid = new HashSet<>(); // Take the session lock and iterate over all the cached partitions. synchronized (session) { session.partitionMap().forEach(cachedSharePartition -> { TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), new TopicPartition(cachedSharePartition.topic(), cachedSharePartition.partition())); - ShareFetchRequest.SharePartitionData reqData = cachedSharePartition.reqData(); if (topicIdPartition.topic() == null) { erroneous.put(topicIdPartition, ShareFetchResponse.partitionResponse(topicIdPartition, Errors.UNKNOWN_TOPIC_ID)); } else { - valid.put(topicIdPartition, reqData); + valid.add(topicIdPartition); } }); return new ErroneousAndValidPartitionData(erroneous, valid); diff --git a/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java b/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java index 42fdaa58cce3f..aa6b3399c3715 100644 --- a/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java +++ b/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java @@ -18,9 +18,8 @@ import org.apache.kafka.common.TopicIdPartition; -import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.Locale; -import java.util.Map; /** * The PartitionRotateStrategy is used to rotate the partitions based on the respective strategy. @@ -48,7 +47,7 @@ public String toString() { * * @return the rotated topicIdPartitions */ - LinkedHashMap rotate(LinkedHashMap topicIdPartitions, PartitionRotateMetadata metadata); + LinkedHashSet rotate(LinkedHashSet topicIdPartitions, PartitionRotateMetadata metadata); static PartitionRotateStrategy type(StrategyType type) { return switch (type) { @@ -64,8 +63,8 @@ static PartitionRotateStrategy type(StrategyType type) { * * @return the rotated topicIdPartitions */ - static LinkedHashMap rotateRoundRobin( - LinkedHashMap topicIdPartitions, + static LinkedHashSet rotateRoundRobin( + LinkedHashSet topicIdPartitions, PartitionRotateMetadata metadata ) { if (topicIdPartitions.isEmpty() || topicIdPartitions.size() == 1 || metadata.sessionEpoch < 1) { @@ -82,18 +81,18 @@ static LinkedHashMap rotateRoundRobin( // TODO: Once the partition max bytes is removed then the partition will be a linked list and rotation // will be a simple operation. Else consider using ImplicitLinkedHashCollection. - LinkedHashMap suffixPartitions = new LinkedHashMap<>(rotateAt); - LinkedHashMap rotatedPartitions = new LinkedHashMap<>(topicIdPartitions.size()); + LinkedHashSet suffixPartitions = new LinkedHashSet<>(rotateAt); + LinkedHashSet rotatedPartitions = new LinkedHashSet<>(topicIdPartitions.size()); int i = 0; - for (Map.Entry entry : topicIdPartitions.entrySet()) { + for (TopicIdPartition topicIdPartition : topicIdPartitions) { if (i < rotateAt) { - suffixPartitions.put(entry.getKey(), entry.getValue()); + suffixPartitions.add(topicIdPartition); } else { - rotatedPartitions.put(entry.getKey(), entry.getValue()); + rotatedPartitions.add(topicIdPartition); } i++; } - rotatedPartitions.putAll(suffixPartitions); + rotatedPartitions.addAll(suffixPartitions); return rotatedPartitions; } diff --git a/server/src/main/java/org/apache/kafka/server/share/fetch/ShareFetch.java b/server/src/main/java/org/apache/kafka/server/share/fetch/ShareFetch.java index 521e080726801..f1375b4fe8eb4 100644 --- a/server/src/main/java/org/apache/kafka/server/share/fetch/ShareFetch.java +++ b/server/src/main/java/org/apache/kafka/server/share/fetch/ShareFetch.java @@ -26,7 +26,6 @@ import java.util.Collection; import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.Map; import java.util.Set; @@ -55,9 +54,9 @@ public class ShareFetch { */ private final String memberId; /** - * The maximum number of bytes that can be fetched for each partition. + * The topic partitions to be fetched. */ - private final LinkedHashMap partitionMaxBytes; + private final LinkedHashSet topicIdPartitions; /** * The batch size of the fetch request. */ @@ -81,7 +80,7 @@ public ShareFetch( String groupId, String memberId, CompletableFuture> future, - LinkedHashMap partitionMaxBytes, + LinkedHashSet topicIdPartitions, int batchSize, int maxFetchRecords, BrokerTopicStats brokerTopicStats @@ -90,7 +89,7 @@ public ShareFetch( this.groupId = groupId; this.memberId = memberId; this.future = future; - this.partitionMaxBytes = partitionMaxBytes; + this.topicIdPartitions = topicIdPartitions; this.batchSize = batchSize; this.maxFetchRecords = maxFetchRecords; this.brokerTopicStats = brokerTopicStats; @@ -104,8 +103,8 @@ public String memberId() { return memberId; } - public LinkedHashMap partitionMaxBytes() { - return partitionMaxBytes; + public LinkedHashSet topicIdPartitions() { + return topicIdPartitions; } public FetchParams fetchParams() { @@ -151,7 +150,7 @@ public boolean isCompleted() { * @return true if all the partitions in the request have errored, false otherwise. */ public synchronized boolean errorInAllPartitions() { - return erroneous != null && erroneous.size() == partitionMaxBytes().size(); + return erroneous != null && erroneous.size() == topicIdPartitions().size(); } /** diff --git a/server/src/main/java/org/apache/kafka/server/share/session/ShareSession.java b/server/src/main/java/org/apache/kafka/server/share/session/ShareSession.java index 97e46d6212659..1dbcc485954f6 100644 --- a/server/src/main/java/org/apache/kafka/server/share/session/ShareSession.java +++ b/server/src/main/java/org/apache/kafka/server/share/session/ShareSession.java @@ -18,7 +18,6 @@ package org.apache.kafka.server.share.session; import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.server.share.CachedSharePartition; @@ -27,6 +26,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; public class ShareSession { @@ -110,25 +110,20 @@ public synchronized LastUsedKey lastUsedKey() { return new LastUsedKey(key, lastUsedMs); } - // Visible for testing - public synchronized long creationMs() { - return creationMs; - } - // Update the cached partition data based on the request. - public synchronized Map> update(Map shareFetchData, List toForget) { + public synchronized Map> update( + Set shareFetchData, + List toForget) { List added = new ArrayList<>(); List updated = new ArrayList<>(); List removed = new ArrayList<>(); - shareFetchData.forEach((topicIdPartition, sharePartitionData) -> { - CachedSharePartition cachedSharePartitionKey = new CachedSharePartition(topicIdPartition, sharePartitionData, true); + shareFetchData.forEach(topicIdPartition -> { + CachedSharePartition cachedSharePartitionKey = new CachedSharePartition(topicIdPartition, true); CachedSharePartition cachedPart = partitionMap.find(cachedSharePartitionKey); if (cachedPart == null) { partitionMap.mustAdd(cachedSharePartitionKey); added.add(topicIdPartition); } else { - cachedPart.updateRequestParams(sharePartitionData); updated.add(topicIdPartition); } }); diff --git a/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java b/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java index f05490c8747fe..0fc66587107d4 100644 --- a/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java +++ b/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java @@ -24,7 +24,7 @@ import org.junit.jupiter.api.Test; -import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.validateRotatedMapEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -35,9 +35,9 @@ public class PartitionRotateStrategyTest { @Test public void testRoundRobinStrategy() { PartitionRotateStrategy strategy = PartitionRotateStrategy.type(StrategyType.ROUND_ROBIN); - LinkedHashMap partitions = createPartitions(3); + LinkedHashSet partitions = createPartitions(3); - LinkedHashMap result = strategy.rotate(partitions, new PartitionRotateMetadata(1)); + LinkedHashSet result = strategy.rotate(partitions, new PartitionRotateMetadata(1)); assertEquals(3, result.size()); validateRotatedMapEquals(partitions, result, 1); @@ -61,8 +61,8 @@ public void testRoundRobinStrategy() { public void testRoundRobinStrategyWithSpecialSessionEpochs() { PartitionRotateStrategy strategy = PartitionRotateStrategy.type(StrategyType.ROUND_ROBIN); - LinkedHashMap partitions = createPartitions(3); - LinkedHashMap result = strategy.rotate( + LinkedHashSet partitions = createPartitions(3); + LinkedHashSet result = strategy.rotate( partitions, new PartitionRotateMetadata(ShareRequestMetadata.INITIAL_EPOCH)); assertEquals(3, result.size()); @@ -79,20 +79,20 @@ public void testRoundRobinStrategyWithSpecialSessionEpochs() { public void testRoundRobinStrategyWithEmptyPartitions() { PartitionRotateStrategy strategy = PartitionRotateStrategy.type(StrategyType.ROUND_ROBIN); // Empty partitions. - LinkedHashMap result = strategy.rotate(new LinkedHashMap<>(), new PartitionRotateMetadata(5)); + LinkedHashSet result = strategy.rotate(new LinkedHashSet<>(), new PartitionRotateMetadata(5)); // The result should be empty. assertTrue(result.isEmpty()); } /** - * Create an ordered map of TopicIdPartition to partition max bytes. + * Create an ordered map of topic partitions. * @param size The number of topic-partitions to create. - * @return The ordered map of TopicIdPartition to partition max bytes. + * @return The ordered set of topic partitions. */ - private LinkedHashMap createPartitions(int size) { - LinkedHashMap partitions = new LinkedHashMap<>(); + private LinkedHashSet createPartitions(int size) { + LinkedHashSet partitions = new LinkedHashSet<>(); for (int i = 0; i < size; i++) { - partitions.put(new TopicIdPartition(Uuid.randomUuid(), i, "foo" + i), 1 /* partition max bytes*/); + partitions.add(new TopicIdPartition(Uuid.randomUuid(), i, "foo" + i)); } return partitions; } diff --git a/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTest.java b/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTest.java index faa01d14938c8..df66550672a26 100644 --- a/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTest.java +++ b/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTest.java @@ -32,7 +32,7 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.orderedMap; +import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.orderedSet; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -60,7 +60,7 @@ public void tearDown() throws Exception { public void testErrorInAllPartitions() { TopicIdPartition topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, new CompletableFuture<>(), - orderedMap(10, topicIdPartition), BATCH_SIZE, 100, brokerTopicStats); + orderedSet(topicIdPartition), BATCH_SIZE, 100, brokerTopicStats); assertFalse(shareFetch.errorInAllPartitions()); shareFetch.addErroneous(topicIdPartition, new RuntimeException()); @@ -72,7 +72,7 @@ public void testErrorInAllPartitionsWithMultipleTopicIdPartitions() { TopicIdPartition topicIdPartition0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition topicIdPartition1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, new CompletableFuture<>(), - orderedMap(10, topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); + orderedSet(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); assertFalse(shareFetch.errorInAllPartitions()); shareFetch.addErroneous(topicIdPartition0, new RuntimeException()); @@ -87,7 +87,7 @@ public void testFilterErroneousTopicPartitions() { TopicIdPartition topicIdPartition0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition topicIdPartition1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, new CompletableFuture<>(), - orderedMap(10, topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); + orderedSet(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); Set result = shareFetch.filterErroneousTopicPartitions(Set.of(topicIdPartition0, topicIdPartition1)); // No erroneous partitions, hence all partitions should be returned. assertEquals(2, result.size()); @@ -113,7 +113,7 @@ public void testMaybeCompleteWithErroneousTopicPartitions() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, future, - orderedMap(10, topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); + orderedSet(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); // Add both erroneous partition and complete request. shareFetch.addErroneous(topicIdPartition0, new RuntimeException()); @@ -134,7 +134,7 @@ public void testMaybeCompleteWithPartialErroneousTopicPartitions() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, future, - orderedMap(10, topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); + orderedSet(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); // Add an erroneous partition and complete request. shareFetch.addErroneous(topicIdPartition0, new RuntimeException()); @@ -154,7 +154,7 @@ public void testMaybeCompleteWithException() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, future, - orderedMap(10, topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); + orderedSet(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); shareFetch.maybeCompleteWithException(List.of(topicIdPartition0, topicIdPartition1), new RuntimeException()); assertEquals(2, future.join().size()); @@ -173,7 +173,7 @@ public void testMaybeCompleteWithExceptionPartialFailure() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, future, - orderedMap(10, topicIdPartition0, topicIdPartition1, topicIdPartition2), BATCH_SIZE, 100, brokerTopicStats); + orderedSet(topicIdPartition0, topicIdPartition1, topicIdPartition2), BATCH_SIZE, 100, brokerTopicStats); shareFetch.maybeCompleteWithException(List.of(topicIdPartition0, topicIdPartition2), new RuntimeException()); assertEquals(2, future.join().size()); @@ -191,7 +191,7 @@ public void testMaybeCompleteWithExceptionWithExistingErroneousTopicPartition() CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, future, - orderedMap(10, topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); + orderedSet(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); shareFetch.addErroneous(topicIdPartition0, new RuntimeException()); shareFetch.maybeCompleteWithException(List.of(topicIdPartition1), new RuntimeException()); diff --git a/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTestUtils.java b/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTestUtils.java index db7f15ef4c376..5e8e5ceb6eb01 100644 --- a/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTestUtils.java +++ b/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTestUtils.java @@ -30,15 +30,14 @@ import java.io.IOException; import java.nio.ByteBuffer; -import java.util.LinkedHashMap; +import java.util.Collections; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; -import java.util.Set; import static org.apache.kafka.test.TestUtils.tempFile; import static org.junit.jupiter.api.Assertions.assertArrayEquals; -import static org.junit.jupiter.api.Assertions.assertEquals; /** * Helper functions for writing share fetch unit tests. @@ -46,18 +45,15 @@ public class ShareFetchTestUtils { /** - * Create an ordered map of TopicIdPartition to partition max bytes. + * Create an ordered set of topic partitions. * - * @param partitionMaxBytes The maximum number of bytes that can be fetched for each partition. - * @param topicIdPartitions The topic partitions to create the map for. - * @return The ordered map of TopicIdPartition to partition max bytes. + * @param topicIdPartitions The topic partitions to create the set for. + * @return The ordered set of topic partitions. */ - public static LinkedHashMap orderedMap(int partitionMaxBytes, TopicIdPartition... topicIdPartitions) { - LinkedHashMap map = new LinkedHashMap<>(); - for (TopicIdPartition tp : topicIdPartitions) { - map.put(tp, partitionMaxBytes); - } - return map; + public static LinkedHashSet orderedSet(TopicIdPartition... topicIdPartitions) { + LinkedHashSet set = new LinkedHashSet<>(); + Collections.addAll(set, topicIdPartitions); + return set; } /** @@ -68,27 +64,22 @@ public static LinkedHashMap orderedMap(int partitionM * @param rotationAt The position to rotate the keys at. */ public static void validateRotatedMapEquals( - LinkedHashMap original, - LinkedHashMap result, + LinkedHashSet original, + LinkedHashSet result, int rotationAt ) { - Set originalKeys = original.keySet(); - Set resultKeys = result.keySet(); - TopicIdPartition[] originalKeysArray = new TopicIdPartition[originalKeys.size()]; + TopicIdPartition[] originalKeysArray = new TopicIdPartition[original.size()]; int i = 0; - for (TopicIdPartition key : originalKeys) { + for (TopicIdPartition key : original) { if (i < rotationAt) { - originalKeysArray[originalKeys.size() - rotationAt + i] = key; + originalKeysArray[original.size() - rotationAt + i] = key; } else { originalKeysArray[i - rotationAt] = key; } i++; } - assertArrayEquals(originalKeysArray, resultKeys.toArray()); - for (TopicIdPartition key : originalKeys) { - assertEquals(original.get(key), result.get(key)); - } + assertArrayEquals(originalKeysArray, result.toArray()); } /** From cd63101612b6f1d9fdfaf0f6810c869feb659af3 Mon Sep 17 00:00:00 2001 From: adixitconfluent Date: Fri, 7 Mar 2025 16:25:20 +0530 Subject: [PATCH 02/11] Removed spotless checkstyle failure --- .../apache/kafka/server/share/context/ShareSessionContext.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/apache/kafka/server/share/context/ShareSessionContext.java b/server/src/main/java/org/apache/kafka/server/share/context/ShareSessionContext.java index 77388b9a32ae6..61abe38053faa 100644 --- a/server/src/main/java/org/apache/kafka/server/share/context/ShareSessionContext.java +++ b/server/src/main/java/org/apache/kafka/server/share/context/ShareSessionContext.java @@ -40,8 +40,8 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; -import java.util.Set; import java.util.NoSuchElementException; +import java.util.Set; /** * The context for a share session fetch request. From 12c277192de9f99be59b9485f4a9e233c62545ca Mon Sep 17 00:00:00 2001 From: adixitconfluent Date: Fri, 7 Mar 2025 16:59:53 +0530 Subject: [PATCH 03/11] Minor refactor --- .../src/main/java/kafka/server/share/SharePartitionManager.java | 2 +- .../test/java/kafka/server/share/SharePartitionManagerTest.java | 2 +- .../kafka/server/share/fetch/PartitionRotateStrategy.java | 2 -- .../kafka/server/share/fetch/PartitionRotateStrategyTest.java | 2 +- 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/core/src/main/java/kafka/server/share/SharePartitionManager.java b/core/src/main/java/kafka/server/share/SharePartitionManager.java index fc9cd115b8775..2f2ab11c9495d 100644 --- a/core/src/main/java/kafka/server/share/SharePartitionManager.java +++ b/core/src/main/java/kafka/server/share/SharePartitionManager.java @@ -427,7 +427,7 @@ private CompletableFuture reqData1 = orderedSet(tp0, tp1); String groupId = "grp"; diff --git a/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java b/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java index aa6b3399c3715..9c517974e553e 100644 --- a/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java +++ b/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java @@ -79,8 +79,6 @@ static LinkedHashSet rotateRoundRobin( return topicIdPartitions; } - // TODO: Once the partition max bytes is removed then the partition will be a linked list and rotation - // will be a simple operation. Else consider using ImplicitLinkedHashCollection. LinkedHashSet suffixPartitions = new LinkedHashSet<>(rotateAt); LinkedHashSet rotatedPartitions = new LinkedHashSet<>(topicIdPartitions.size()); int i = 0; diff --git a/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java b/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java index 0fc66587107d4..103b7e7785f3e 100644 --- a/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java +++ b/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java @@ -85,7 +85,7 @@ public void testRoundRobinStrategyWithEmptyPartitions() { } /** - * Create an ordered map of topic partitions. + * Create an ordered set of topic partitions. * @param size The number of topic-partitions to create. * @return The ordered set of topic partitions. */ From 3fd43479bac354cbc1719480236092b118e3385a Mon Sep 17 00:00:00 2001 From: adixitconfluent Date: Sat, 8 Mar 2025 21:50:46 +0530 Subject: [PATCH 04/11] Addressed Apoorv'r round 1 review comment --- .../common/requests/ShareFetchRequest.java | 8 +- .../server/share/SharePartitionManager.java | 7 +- .../main/scala/kafka/server/KafkaApis.scala | 2 +- .../server/share/DelayedShareFetchTest.java | 62 ++++----- .../server/share/ShareFetchUtilsTest.java | 12 +- .../share/SharePartitionManagerTest.java | 121 +++++++++--------- .../unit/kafka/server/KafkaApisTest.scala | 26 ++-- .../kafka/server/ReplicaManagerTest.scala | 2 +- .../share/ErroneousAndValidPartitionData.java | 16 +-- .../share/context/ShareSessionContext.java | 11 +- .../share/fetch/PartitionRotateStrategy.java | 12 +- .../kafka/server/share/fetch/ShareFetch.java | 7 +- .../server/share/session/ShareSession.java | 3 +- .../fetch/PartitionRotateStrategyTest.java | 16 +-- .../server/share/fetch/ShareFetchTest.java | 18 +-- .../share/fetch/ShareFetchTestUtils.java | 20 +-- 16 files changed, 169 insertions(+), 174 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java index de5f0adcf83f3..9e80ab4e388af 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java @@ -28,10 +28,8 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; -import java.util.LinkedHashSet; import java.util.List; import java.util.Map; -import java.util.Set; public class ShareFetchRequest extends AbstractRequest { @@ -151,7 +149,7 @@ public String toString() { } private final ShareFetchRequestData data; - private volatile LinkedHashSet shareFetchData = null; + private volatile ArrayList shareFetchData = null; private volatile List toForget = null; public ShareFetchRequest(ShareFetchRequestData data, short version) { @@ -191,13 +189,13 @@ public int maxWait() { return data.maxWaitMs(); } - public Set shareFetchData(Map topicNames) { + public List shareFetchData(Map topicNames) { if (shareFetchData == null) { synchronized (this) { if (shareFetchData == null) { // Assigning the lazy-initialized `shareFetchData` in the last step // to avoid other threads accessing a half-initialized object. - final LinkedHashSet shareFetchDataTmp = new LinkedHashSet<>(); + final ArrayList shareFetchDataTmp = new ArrayList<>(); data.topics().forEach(shareFetchTopic -> { String name = topicNames.get(shareFetchTopic.topicId()); shareFetchTopic.partitions().forEach(shareFetchPartition -> { diff --git a/core/src/main/java/kafka/server/share/SharePartitionManager.java b/core/src/main/java/kafka/server/share/SharePartitionManager.java index 2f2ab11c9495d..2640348783803 100644 --- a/core/src/main/java/kafka/server/share/SharePartitionManager.java +++ b/core/src/main/java/kafka/server/share/SharePartitionManager.java @@ -67,7 +67,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; -import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -258,12 +257,12 @@ public CompletableFuture> fetchMessages( FetchParams fetchParams, int sessionEpoch, int batchSize, - LinkedHashSet topicPartitions + ArrayList topicPartitions ) { log.trace("Fetch request for topicIdPartitions: {} with groupId: {} fetch params: {}", topicPartitions, groupId, fetchParams); - LinkedHashSet topicIdPartitions = PartitionRotateStrategy + ArrayList topicIdPartitions = PartitionRotateStrategy .type(PartitionRotateStrategy.StrategyType.ROUND_ROBIN) .rotate(topicPartitions, new PartitionRotateMetadata(sessionEpoch)); @@ -433,7 +432,7 @@ private CompletableFuture shareFetchData, + public ShareFetchContext newContext(String groupId, List shareFetchData, List toForget, ShareRequestMetadata reqMetadata, Boolean isAcknowledgeDataPresent) { ShareFetchContext context; // If the request's epoch is FINAL_EPOCH or INITIAL_EPOCH, we should remove the existing sessions. Also, start a diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index 9d8509d2ef8c2..e721d486f4672 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -3135,7 +3135,7 @@ class KafkaApis(val requestChannel: RequestChannel, val erroneous = mutable.Map.empty[TopicIdPartition, ShareFetchResponseData.PartitionData] erroneousAndValidPartitionData.erroneous.forEach { (topicIdPartition, partitionData) => erroneous.put(topicIdPartition, partitionData) } - val interestedTopicPartitions = new util.LinkedHashSet[TopicIdPartition] + val interestedTopicPartitions = new util.ArrayList[TopicIdPartition] erroneousAndValidPartitionData.validTopicIdPartitions.forEach { case topicIdPartition => if (!authorizedTopics.contains(topicIdPartition.topicPartition.topic)) diff --git a/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java b/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java index 7c645f43c1026..5e0f3e721fa7c 100644 --- a/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java +++ b/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java @@ -72,8 +72,8 @@ import static kafka.server.share.SharePartitionManagerTest.DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL; import static kafka.server.share.SharePartitionManagerTest.buildLogReadResult; import static kafka.server.share.SharePartitionManagerTest.mockReplicaManagerDelayedShareFetch; +import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.arrayList; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.createShareAcquiredRecords; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.orderedSet; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; @@ -132,7 +132,7 @@ public void testDelayedShareFetchTryCompleteReturnsFalseDueToNonAcquirablePartit sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), orderedSet(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), arrayList(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(false); @@ -179,7 +179,7 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnFirstFetch() { ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 2, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), orderedSet(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), arrayList(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(true); @@ -196,7 +196,7 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnFirstFetch() { LogOffsetMetadata hwmOffsetMetadata = new LogOffsetMetadata(1, 1, 1); mockTopicIdPartitionFetchBytes(replicaManager, tp0, hwmOffsetMetadata); - doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(List.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); BiConsumer exceptionHandler = mockExceptionHandler(); PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Collections.singleton(tp0)); @@ -251,7 +251,7 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnSubsequentFetch() { ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 2, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), orderedSet(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), arrayList(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(true); @@ -303,14 +303,14 @@ public void testDelayedShareFetchTryCompleteReturnsTrue() { sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), orderedSet(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), arrayList(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); when(sp0.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class))).thenReturn( createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(List.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp0, 1); @@ -363,7 +363,7 @@ public void testEmptyFutureReturnedByDelayedShareFetchOnComplete() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - future, orderedSet(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); + future, arrayList(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(false); when(sp1.canAcquireRecords()).thenReturn(false); @@ -416,14 +416,14 @@ public void testReplicaManagerFetchShouldHappenOnComplete() { sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), orderedSet(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), arrayList(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); when(sp0.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class))).thenReturn( createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(List.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Collections.singleton(tp0)); @@ -472,7 +472,7 @@ public void testToCompleteAnAlreadyCompletedFuture() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - future, orderedSet(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); + future, arrayList(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.maybeAcquireFetchLock()).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(false); @@ -518,7 +518,7 @@ public void testForceCompleteTriggersDelayedActionsQueue() { TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(topicId, new TopicPartition("foo", 1)); TopicIdPartition tp2 = new TopicIdPartition(topicId, new TopicPartition("foo", 2)); - LinkedHashSet topicIdPartitions1 = orderedSet(tp0, tp1); + ArrayList topicIdPartitions1 = arrayList(tp0, tp1); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -560,12 +560,12 @@ public void testForceCompleteTriggersDelayedActionsQueue() { assertTrue(delayedShareFetch1.lock().tryLock()); delayedShareFetch1.lock().unlock(); - LinkedHashSet topicIdPartitions2 = orderedSet(tp0, tp1); + ArrayList topicIdPartitions2 = arrayList(tp0, tp1); ShareFetch shareFetch2 = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), new CompletableFuture<>(), topicIdPartitions2, BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); - doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp1))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(List.of(tp1))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Collections.singleton(tp1)); @@ -620,7 +620,7 @@ public void testCombineLogReadResponse() { ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - future, orderedSet(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); + future, arrayList(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Collections.singleton(tp1)); @@ -644,7 +644,7 @@ public void testCombineLogReadResponse() { when(logReadResult.info()).thenReturn(fetchDataInfo); logReadResponse.put(tp0, logReadResult); - doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp1))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(List.of(tp1))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); LinkedHashMap combinedLogReadResponse = delayedShareFetch.combineLogReadResponse(topicPartitionData, logReadResponse); assertEquals(topicPartitionData.keySet(), combinedLogReadResponse.keySet()); assertEquals(combinedLogReadResponse.get(tp0), logReadResponse.get(tp0)); @@ -676,13 +676,13 @@ public void testExceptionInMinBytesCalculation() { ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), orderedSet(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), arrayList(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(true); when(sp0.acquire(any(), anyInt(), anyInt(), anyLong(), any())).thenReturn( createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(List.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); // Mocking partition object to throw an exception during min bytes calculation while calling fetchOffsetSnapshot Partition partition = mock(Partition.class); @@ -753,10 +753,10 @@ public void testTryCompleteLocksReleasedOnCompleteException() { sharePartitions.put(tp0, sp0); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), - new CompletableFuture<>(), orderedSet(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), arrayList(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); - doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(List.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp0, 1); PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Collections.singleton(tp0)); @@ -789,11 +789,11 @@ public void testLocksReleasedForCompletedFetch() { sharePartitions1.put(tp0, sp0); ReplicaManager replicaManager = mock(ReplicaManager.class); - doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(List.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp0, 1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), orderedSet(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), arrayList(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Collections.singleton(tp0)); @@ -827,7 +827,7 @@ public void testLocksReleasedAcquireException() { sharePartitions.put(tp0, sp0); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), orderedSet(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), arrayList(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); DelayedShareFetch delayedShareFetch = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() @@ -856,7 +856,7 @@ public void testTryCompleteWhenPartitionMaxBytesStrategyThrowsException() { ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 2, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - future, orderedSet(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); + future, arrayList(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); // partitionMaxBytesStrategy.maxBytes() function throws an exception PartitionMaxBytesStrategy partitionMaxBytesStrategy = mock(PartitionMaxBytesStrategy.class); @@ -920,7 +920,7 @@ public void testPartitionMaxBytesFromUniformStrategyWhenAllPartitionsAreAcquirab sharePartitions.put(tp4, sp4); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), orderedSet(tp0, tp1, tp2, tp3, tp4), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), arrayList(tp0, tp1, tp2, tp3, tp4), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class))).thenReturn( @@ -935,7 +935,7 @@ public void testPartitionMaxBytesFromUniformStrategyWhenAllPartitionsAreAcquirab createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); // All 5 partitions are acquirable. - doAnswer(invocation -> buildLogReadResult(sharePartitions.keySet())).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(sharePartitions.keySet().stream().toList())).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); @@ -1016,7 +1016,7 @@ public void testPartitionMaxBytesFromUniformStrategyWhenFewPartitionsAreAcquirab sharePartitions.put(tp4, sp4); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), orderedSet(tp0, tp1, tp2, tp3, tp4), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), arrayList(tp0, tp1, tp2, tp3, tp4), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class))).thenReturn( @@ -1028,7 +1028,7 @@ public void testPartitionMaxBytesFromUniformStrategyWhenFewPartitionsAreAcquirab Set acquirableTopicPartitions = new LinkedHashSet<>(); acquirableTopicPartitions.add(tp0); acquirableTopicPartitions.add(tp1); - doAnswer(invocation -> buildLogReadResult(acquirableTopicPartitions)).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(acquirableTopicPartitions.stream().toList())).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); @@ -1088,7 +1088,7 @@ public void testPartitionMaxBytesFromUniformStrategyInCombineLogReadResponse() { ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), orderedSet(tp0, tp1, tp2), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), arrayList(tp0, tp1, tp2), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); DelayedShareFetch delayedShareFetch = DelayedShareFetchBuilder.builder() @@ -1116,7 +1116,7 @@ public void testPartitionMaxBytesFromUniformStrategyInCombineLogReadResponse() { fetchableTopicPartitions.add(tp1); fetchableTopicPartitions.add(tp2); // We will be doing replica manager fetch only for tp1 and tp2. - doAnswer(invocation -> buildLogReadResult(fetchableTopicPartitions)).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(fetchableTopicPartitions.stream().toList())).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); LinkedHashMap combinedLogReadResponse = delayedShareFetch.combineLogReadResponse(topicPartitionData, logReadResponse); assertEquals(topicPartitionData.keySet(), combinedLogReadResponse.keySet()); @@ -1146,7 +1146,7 @@ public void testPartitionMaxBytesFromUniformStrategyInCombineLogReadResponse() { public void testOnCompleteExecutionOnTimeout() { ShareFetch shareFetch = new ShareFetch( FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), - new CompletableFuture<>(), new LinkedHashSet<>(), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), new ArrayList<>(), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); DelayedShareFetch delayedShareFetch = DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) diff --git a/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java b/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java index aaedcdd95dd17..0496b659c06f9 100644 --- a/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java +++ b/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java @@ -59,10 +59,10 @@ import java.util.concurrent.CompletableFuture; import java.util.function.BiConsumer; +import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.arrayList; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.createFileRecords; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.createShareAcquiredRecords; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.memoryRecordsBuilder; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.orderedSet; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -113,7 +113,7 @@ public void testProcessFetchResponse() { sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, memberId, - new CompletableFuture<>(), orderedSet(tp0, tp1), BATCH_SIZE, 100, BROKER_TOPIC_STATS); + new CompletableFuture<>(), arrayList(tp0, tp1), BATCH_SIZE, 100, BROKER_TOPIC_STATS); MemoryRecords records = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("0".getBytes(), "v".getBytes()), @@ -174,7 +174,7 @@ public void testProcessFetchResponseWithEmptyRecords() { sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, memberId, - new CompletableFuture<>(), orderedSet(tp0, tp1), BATCH_SIZE, 100, BROKER_TOPIC_STATS); + new CompletableFuture<>(), arrayList(tp0, tp1), BATCH_SIZE, 100, BROKER_TOPIC_STATS); List responseData = List.of( new ShareFetchPartitionData(tp0, 0, new FetchPartitionData(Errors.NONE, 0L, 0L, @@ -213,7 +213,7 @@ public void testProcessFetchResponseWithLsoMovementForTopicPartition() { sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), orderedSet(tp0, tp1), BATCH_SIZE, 100, BROKER_TOPIC_STATS); + new CompletableFuture<>(), arrayList(tp0, tp1), BATCH_SIZE, 100, BROKER_TOPIC_STATS); ReplicaManager replicaManager = mock(ReplicaManager.class); @@ -305,7 +305,7 @@ public void testProcessFetchResponseWhenNoRecordsAreAcquired() { sharePartitions.put(tp0, sp0); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), orderedSet(tp0), BATCH_SIZE, 100, BROKER_TOPIC_STATS); + new CompletableFuture<>(), arrayList(tp0), BATCH_SIZE, 100, BROKER_TOPIC_STATS); ReplicaManager replicaManager = mock(ReplicaManager.class); @@ -373,7 +373,7 @@ public void testProcessFetchResponseWithMaxFetchRecords() throws IOException { Uuid memberId = Uuid.randomUuid(); // Set max fetch records to 10 ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, memberId.toString(), - new CompletableFuture<>(), orderedSet(tp0, tp1), BATCH_SIZE, 10, BROKER_TOPIC_STATS); + new CompletableFuture<>(), arrayList(tp0, tp1), BATCH_SIZE, 10, BROKER_TOPIC_STATS); LinkedHashMap recordsPerOffset = new LinkedHashMap<>(); recordsPerOffset.put(0L, 1); diff --git a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java index bed43f7069998..5fd25fb415f2a 100644 --- a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java +++ b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java @@ -98,7 +98,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; -import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -116,7 +115,7 @@ import scala.jdk.javaapi.CollectionConverters; import static kafka.server.share.DelayedShareFetchTest.mockTopicIdPartitionToReturnDataEqualToMinBytes; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.orderedSet; +import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.arrayList; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.validateRotatedMapEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -200,7 +199,7 @@ public void testNewContextReturnsFinalContextWithoutRequestData() { Uuid memberId = Uuid.randomUuid(); // Create a new share session with an initial share fetch request - Set reqData1 = orderedSet(tp0, tp1); + List reqData1 = arrayList(tp0, tp1); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); @@ -208,7 +207,7 @@ public void testNewContextReturnsFinalContextWithoutRequestData() { assertFalse(((ShareSessionContext) context1).isSubsequent()); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId, ShareRequestMetadata.FINAL_EPOCH); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptySet(), Collections.emptyList(), reqMetadata2, true); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptyList(), Collections.emptyList(), reqMetadata2, true); assertEquals(FinalContext.class, context2.getClass()); } @@ -228,7 +227,7 @@ public void testNewContextReturnsFinalContextWithRequestData() { Uuid memberId = Uuid.randomUuid(); // Create a new share session with an initial share fetch request - Set reqData1 = orderedSet(tp0, tp1); + List reqData1 = arrayList(tp0, tp1); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); @@ -239,7 +238,7 @@ public void testNewContextReturnsFinalContextWithRequestData() { // shareFetch is not empty, but the maxBytes of topic partition is 0, which means this is added only for acknowledgements. // New context should be created successfully - Set reqData3 = orderedSet(new TopicIdPartition(tpId1, new TopicPartition("foo", 0))); + List reqData3 = arrayList(new TopicIdPartition(tpId1, new TopicPartition("foo", 0))); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData3, Collections.emptyList(), reqMetadata2, true); assertEquals(FinalContext.class, context2.getClass()); } @@ -260,7 +259,7 @@ public void testNewContextReturnsFinalContextWhenTopicPartitionsArePresentInRequ Uuid memberId = Uuid.randomUuid(); // Create a new share session with an initial share fetch request - Set reqData1 = orderedSet(tp0, tp1); + List reqData1 = arrayList(tp0, tp1); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); @@ -270,7 +269,7 @@ public void testNewContextReturnsFinalContextWhenTopicPartitionsArePresentInRequ ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId, ShareRequestMetadata.FINAL_EPOCH); // shareFetch is not empty, and it contains tpId1, which should return FinalContext instance since it is FINAL_EPOCH - Set reqData3 = orderedSet(new TopicIdPartition(tpId1, new TopicPartition("foo", 0))); + List reqData3 = arrayList(new TopicIdPartition(tpId1, new TopicPartition("foo", 0))); assertInstanceOf(FinalContext.class, sharePartitionManager.newContext(groupId, reqData3, Collections.emptyList(), reqMetadata2, true)); } @@ -295,7 +294,7 @@ public void testNewContext() { String groupId = "grp"; // Create a new share session with an initial share fetch request - Set reqData2 = orderedSet(tp0, tp1); + List reqData2 = arrayList(tp0, tp1); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); @@ -325,7 +324,7 @@ public void testNewContext() { new ShareRequestMetadata(memberId4, 1), true)); // Continue the first share session we created. - ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, + ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context5); assertTrue(((ShareSessionContext) context5).isSubsequent()); @@ -347,14 +346,14 @@ public void testNewContext() { new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test generating a throttled response for a subsequent share session - ShareFetchContext context7 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, + ShareFetchContext context7 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); ShareFetchResponse resp7 = context7.throttleResponse(100); assertEquals(Errors.NONE, resp7.error()); assertEquals(100, resp7.throttleTimeMs()); // Get the final share session. - ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, + ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); assertEquals(FinalContext.class, context8.getClass()); assertEquals(1, cache.size()); @@ -389,7 +388,7 @@ public void testShareSessionExpiration() { TopicIdPartition foo1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); // Create a new share session, session 1 - Set session1req = orderedSet(foo0, foo1); + List session1req = arrayList(foo0, foo1); String groupId = "grp"; ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); @@ -412,7 +411,7 @@ public void testShareSessionExpiration() { time.sleep(500); // Create a second new share session - Set session2req = orderedSet(foo0, foo1); + List session2req = arrayList(foo0, foo1); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); @@ -436,7 +435,7 @@ public void testShareSessionExpiration() { time.sleep(500); // Create a subsequent share fetch context for session 1 - ShareFetchContext session1context2 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, + ShareFetchContext session1context2 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, session1context2); @@ -446,7 +445,7 @@ public void testShareSessionExpiration() { // create one final share session to test that the least recently used entry is evicted // the second share session should be evicted because the first share session was incrementally fetched // more recently than the second session was created - Set session3req = orderedSet(foo0, foo1); + List session3req = arrayList(foo0, foo1); ShareRequestMetadata reqMetadata3 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); @@ -481,7 +480,7 @@ public void testSubsequentShareSession() { TopicIdPartition tp2 = new TopicIdPartition(barId, new TopicPartition("bar", 0)); // Create a new share session with foo-0 and foo-1 - Set reqData1 = orderedSet(tp0, tp1); + List reqData1 = arrayList(tp0, tp1); String groupId = "grp"; ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); @@ -498,7 +497,7 @@ public void testSubsequentShareSession() { assertEquals(2, resp1.responseData(topicNames).size()); // Create a subsequent fetch request that removes foo-0 and adds bar-0 - Set reqData2 = orderedSet(tp2); + List reqData2 = arrayList(tp2); List removed2 = new ArrayList<>(); removed2.add(tp0); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, removed2, @@ -544,7 +543,7 @@ public void testZeroSizeShareSession() { TopicIdPartition foo1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); // Create a new share session with foo-0 and foo-1 - Set reqData1 = orderedSet(foo0, foo1); + List reqData1 = arrayList(foo0, foo1); String groupId = "grp"; ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); @@ -565,7 +564,7 @@ public void testZeroSizeShareSession() { List removed2 = new ArrayList<>(); removed2.add(foo0); removed2.add(foo1); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptySet(), removed2, + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptyList(), removed2, new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context2); @@ -590,7 +589,7 @@ public void testToForgetPartitions() { ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - Set reqData1 = orderedSet(foo, bar); + List reqData1 = arrayList(foo, bar); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); assertInstanceOf(ShareSessionContext.class, context1); @@ -598,7 +597,7 @@ public void testToForgetPartitions() { mockUpdateAndGenerateResponseData(context1, groupId, reqMetadata1.memberId()); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptySet(), Collections.singletonList(foo), + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptyList(), Collections.singletonList(foo), new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); // So foo is removed but not the others. @@ -606,7 +605,7 @@ public void testToForgetPartitions() { mockUpdateAndGenerateResponseData(context2, groupId, reqMetadata1.memberId()); - ShareFetchContext context3 = sharePartitionManager.newContext(groupId, Collections.emptySet(), Collections.singletonList(bar), + ShareFetchContext context3 = sharePartitionManager.newContext(groupId, Collections.emptyList(), Collections.singletonList(bar), new ShareRequestMetadata(reqMetadata1.memberId(), 2), true); assertPartitionsPresent((ShareSessionContext) context3, Collections.emptyList()); } @@ -631,7 +630,7 @@ public void testShareSessionUpdateTopicIdsBrokerSide() { topicNames.put(barId, "bar"); // Create a new share session with foo-0 and bar-1 - Set reqData1 = orderedSet(foo, bar); + List reqData1 = arrayList(foo, bar); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); @@ -649,7 +648,7 @@ public void testShareSessionUpdateTopicIdsBrokerSide() { assertEquals(2, resp1.responseData(topicNames).size()); // Create a subsequent share fetch request as though no topics changed. - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context2); @@ -681,7 +680,7 @@ public void testGetErroneousAndValidTopicIdPartitions() { String groupId = "grp"; // Create a new share session with an initial share fetch request - Set reqData2 = orderedSet(tp0, tp1, tpNull1); + List reqData2 = arrayList(tp0, tp1, tpNull1); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); @@ -713,7 +712,7 @@ public void testGetErroneousAndValidTopicIdPartitions() { new ShareRequestMetadata(Uuid.randomUuid(), 1), true)); // Continue the first share session we created. - ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, + ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context5); assertTrue(((ShareSessionContext) context5).isSubsequent()); @@ -728,7 +727,7 @@ public void testGetErroneousAndValidTopicIdPartitions() { new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test generating a throttled response for a subsequent share session - Set reqData7 = orderedSet(tpNull2); + List reqData7 = arrayList(tpNull2); ShareFetchContext context7 = sharePartitionManager.newContext(groupId, reqData7, EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); // Check for throttled response @@ -739,7 +738,7 @@ public void testGetErroneousAndValidTopicIdPartitions() { assertErroneousAndValidTopicIdPartitions(context7.getErroneousAndValidTopicIdPartitions(), Arrays.asList(tpNull1, tpNull2), Arrays.asList(tp0, tp1)); // Get the final share session. - ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, + ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); assertEquals(FinalContext.class, context8.getClass()); assertEquals(1, cache.size()); @@ -778,7 +777,7 @@ public void testShareFetchContextResponseSize() { String groupId = "grp"; // Create a new share session with an initial share fetch request - Set reqData2 = orderedSet(tp0, tp1); + List reqData2 = arrayList(tp0, tp1); // For response size expected value calculation ObjectSerializationCache objectSerializationCache = new ObjectSerializationCache(); @@ -813,7 +812,7 @@ public void testShareFetchContextResponseSize() { new ShareRequestMetadata(memberId4, 1), true)); // Continue the first share session we created. - Set reqData5 = orderedSet(tp2); + List reqData5 = arrayList(tp2); ShareFetchContext context5 = sharePartitionManager.newContext(groupId, reqData5, EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context5); @@ -833,7 +832,7 @@ public void testShareFetchContextResponseSize() { new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test generating a throttled response for a subsequent share session - ShareFetchContext context7 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, + ShareFetchContext context7 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); int respSize7 = context7.responseSize(respData2, version); @@ -844,7 +843,7 @@ public void testShareFetchContextResponseSize() { assertEquals(4 + new ShareFetchResponseData().size(objectSerializationCache, version), respSize7); // Get the final share session. - ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, + ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); assertEquals(FinalContext.class, context8.getClass()); assertEquals(1, cache.size()); @@ -888,7 +887,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { Uuid memberId2 = Uuid.randomUuid(); // Create a new share session with an initial share fetch request. - Set reqData1 = orderedSet(tp0, tp1); + List reqData1 = arrayList(tp0, tp1); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId1, ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); @@ -909,7 +908,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { new HashSet<>(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1))); // Create a new share session with an initial share fetch request. - Set reqData2 = orderedSet(tp2); + List reqData2 = arrayList(tp2); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId2, ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); @@ -928,7 +927,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { assertEquals(Collections.singletonList(tp2), sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); // Continue the first share session we created. - Set reqData3 = orderedSet(tp2); + List reqData3 = arrayList(tp2); ShareFetchContext context3 = sharePartitionManager.newContext(groupId, reqData3, EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey1.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context3); @@ -943,7 +942,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { new HashSet<>(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1))); // Continue the second session we created. - Set reqData4 = orderedSet(tp3); + List reqData4 = arrayList(tp3); ShareFetchContext context4 = sharePartitionManager.newContext(groupId, reqData4, Collections.singletonList(tp2), new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context4); @@ -957,7 +956,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { assertEquals(Collections.singletonList(tp3), sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); // Get the final share session. - ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptySet(), EMPTY_PART_LIST, + ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata1.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); assertEquals(FinalContext.class, context5.getClass()); @@ -972,7 +971,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { assertTrue(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1).isEmpty()); // Continue the second share session . - ShareFetchContext context6 = sharePartitionManager.newContext(groupId, Collections.emptySet(), Collections.singletonList(tp3), + ShareFetchContext context6 = sharePartitionManager.newContext(groupId, Collections.emptyList(), Collections.singletonList(tp3), new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); assertInstanceOf(ShareSessionContext.class, context6); assertTrue(((ShareSessionContext) context6).isSubsequent()); @@ -1020,7 +1019,7 @@ public void testMultipleSequentialShareFetches() { TopicIdPartition tp4 = new TopicIdPartition(fooId, new TopicPartition("foo", 2)); TopicIdPartition tp5 = new TopicIdPartition(barId, new TopicPartition("bar", 2)); TopicIdPartition tp6 = new TopicIdPartition(fooId, new TopicPartition("foo", 3)); - LinkedHashSet topicIdPartitions = orderedSet(tp0, tp1, tp2, tp3, tp4, tp5, tp6); + ArrayList topicIdPartitions = arrayList(tp0, tp1, tp2, tp3, tp4, tp5, tp6); mockFetchOffsetForTimestamp(mockReplicaManager); @@ -1082,7 +1081,7 @@ public void testMultipleConcurrentShareFetches() throws InterruptedException { TopicIdPartition tp1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); TopicIdPartition tp2 = new TopicIdPartition(barId, new TopicPartition("bar", 0)); TopicIdPartition tp3 = new TopicIdPartition(barId, new TopicPartition("bar", 1)); - LinkedHashSet topicIdPartitions = orderedSet(tp0, tp1, tp2, tp3); + ArrayList topicIdPartitions = arrayList(tp0, tp1, tp2, tp3); mockFetchOffsetForTimestamp(mockReplicaManager); @@ -1175,7 +1174,7 @@ public void testReplicaManagerFetchShouldNotProceed() { Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - LinkedHashSet topicIdPartitions = orderedSet(tp0); + ArrayList topicIdPartitions = arrayList(tp0); SharePartition sp0 = mock(SharePartition.class); when(sp0.maybeAcquireFetchLock()).thenReturn(true); @@ -1218,7 +1217,7 @@ public void testReplicaManagerFetchShouldProceed() { Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - LinkedHashSet topicIdPartitions = orderedSet(tp0); + ArrayList topicIdPartitions = arrayList(tp0); mockFetchOffsetForTimestamp(mockReplicaManager); @@ -1710,7 +1709,7 @@ public void testAcknowledgeCompletesDelayedShareFetchRequest() { TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo1", 0)); TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); - LinkedHashSet topicIdPartitions = orderedSet(tp1, tp2); + ArrayList topicIdPartitions = arrayList(tp1, tp2); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); @@ -1814,7 +1813,7 @@ public void testAcknowledgeDoesNotCompleteDelayedShareFetchRequest() { TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0)); - LinkedHashSet topicIdPartitions = orderedSet(tp1, tp2); + ArrayList topicIdPartitions = arrayList(tp1, tp2); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); @@ -1920,7 +1919,7 @@ public void testReleaseSessionCompletesDelayedShareFetchRequest() { TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0)); - LinkedHashSet topicIdPartitions = orderedSet(tp1, tp2); + ArrayList topicIdPartitions = arrayList(tp1, tp2); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); @@ -1996,7 +1995,7 @@ public void testReleaseSessionCompletesDelayedShareFetchRequest() { // The share session for this share group member returns tp1 and tp3, tp1 is common in both the delayed fetch request and the share session. when(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, Uuid.fromString(memberId))).thenReturn(Arrays.asList(tp1, tp3)); - doAnswer(invocation -> buildLogReadResult(Set.of(tp1))).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(List.of(tp1))).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any())).thenReturn(new ShareAcquiredRecords(Collections.emptyList(), 0)); // Release acquired records on session close request for tp1 and tp3. sharePartitionManager.releaseSession(groupId, memberId); @@ -2020,7 +2019,7 @@ public void testReleaseSessionDoesNotCompleteDelayedShareFetchRequest() { TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0)); - LinkedHashSet topicIdPartitions = orderedSet(tp1, tp2); + ArrayList topicIdPartitions = arrayList(tp1, tp2); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); @@ -2122,7 +2121,7 @@ public void testPendingInitializationShouldCompleteFetchRequest() throws Excepti Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - LinkedHashSet topicIdPartitions = orderedSet(tp0); + ArrayList topicIdPartitions = arrayList(tp0); SharePartition sp0 = mock(SharePartition.class); Map partitionCacheMap = new HashMap<>(); @@ -2186,7 +2185,7 @@ public void testPartitionLoadTimeMetricWithMultiplePartitions() throws Exception String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - LinkedHashSet topicIdPartitions = orderedSet(tp0, tp1); + ArrayList topicIdPartitions = arrayList(tp0, tp1); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -2255,7 +2254,7 @@ public void testDelayedInitializationShouldCompleteFetchRequest() { Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - LinkedHashSet topicIdPartitions = orderedSet(tp0); + ArrayList topicIdPartitions = arrayList(tp0); SharePartition sp0 = mock(SharePartition.class); Map partitionCacheMap = new HashMap<>(); @@ -2329,7 +2328,7 @@ public void testSharePartitionInitializationExceptions() throws Exception { Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - LinkedHashSet topicIdPartitions = orderedSet(tp0); + ArrayList topicIdPartitions = arrayList(tp0); SharePartition sp0 = mock(SharePartition.class); Map partitionCacheMap = new HashMap<>(); @@ -2460,7 +2459,7 @@ public void testSharePartitionInitializationExceptions() throws Exception { public void testShareFetchProcessingExceptions() throws Exception { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - LinkedHashSet topicIdPartitions = orderedSet(tp0); + ArrayList topicIdPartitions = arrayList(tp0); Map partitionCacheMap = (Map) mock(Map.class); // Throw the exception for first fetch request. Return share partition for next. @@ -2492,7 +2491,7 @@ public void testShareFetchProcessingExceptions() throws Exception { public void testSharePartitionInitializationFailure() throws Exception { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - LinkedHashSet topicIdPartitions = orderedSet(tp0); + ArrayList topicIdPartitions = arrayList(tp0); // Send map to check no share partition is created. Map partitionCacheMap = new HashMap<>(); @@ -2550,7 +2549,7 @@ public void testSharePartitionPartialInitializationFailure() throws Exception { TopicIdPartition tp1 = new TopicIdPartition(memberId1, new TopicPartition("foo", 1)); // For tp2, share partition initialization will fail. TopicIdPartition tp2 = new TopicIdPartition(memberId1, new TopicPartition("foo", 2)); - LinkedHashSet topicIdPartitions = orderedSet(tp0, tp1, tp2); + ArrayList topicIdPartitions = arrayList(tp0, tp1, tp2); // Mark partition0 as not the leader. Partition partition0 = mock(Partition.class); @@ -2582,7 +2581,7 @@ public void testSharePartitionPartialInitializationFailure() throws Exception { when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp1, 1); - doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp1))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(List.of(tp1))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); sharePartitionManager = SharePartitionManagerBuilder.builder() .withReplicaManager(replicaManager) @@ -2625,7 +2624,7 @@ public void testReplicaManagerFetchException() { String groupId = "grp"; Uuid memberId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - LinkedHashSet topicIdPartitions = orderedSet(tp0); + ArrayList topicIdPartitions = arrayList(tp0); SharePartition sp0 = mock(SharePartition.class); when(sp0.maybeAcquireFetchLock()).thenReturn(true); @@ -2678,7 +2677,7 @@ public void testReplicaManagerFetchMultipleSharePartitionsException() { TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 0)); - LinkedHashSet topicIdPartitions = orderedSet(tp0, tp1); + ArrayList topicIdPartitions = arrayList(tp0, tp1); SharePartition sp0 = mock(SharePartition.class); when(sp0.maybeAcquireFetchLock()).thenReturn(true); @@ -2748,7 +2747,7 @@ public void testListenerRegistration() { TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 0)); - LinkedHashSet topicIdPartitions = orderedSet(tp0, tp1); + ArrayList topicIdPartitions = arrayList(tp0, tp1); ReplicaManager mockReplicaManager = mock(ReplicaManager.class); Partition partition = mockPartition(); @@ -2817,7 +2816,7 @@ public void testFetchMessagesRotatePartitions() { TopicIdPartition tp4 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); TopicIdPartition tp5 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 2)); TopicIdPartition tp6 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 3)); - LinkedHashSet topicIdPartitions = orderedSet(tp0, tp1, tp2, tp3, tp4, tp5, tp6); + ArrayList topicIdPartitions = arrayList(tp0, tp1, tp2, tp3, tp4, tp5, tp6); sharePartitionManager = Mockito.spy(SharePartitionManagerBuilder.builder().withBrokerTopicStats(brokerTopicStats).build()); // Capture the arguments passed to processShareFetch. @@ -3025,7 +3024,7 @@ private void validateBrokerTopicStatsMetrics( }); } - static Seq> buildLogReadResult(Set topicIdPartitions) { + static Seq> buildLogReadResult(List topicIdPartitions) { List> logReadResults = new ArrayList<>(); topicIdPartitions.forEach(topicIdPartition -> logReadResults.add(new Tuple2<>(topicIdPartition, new LogReadResult( new FetchDataInfo(new LogOffsetMetadata(0, 0, 0), MemoryRecords.EMPTY), diff --git a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala index ba55c687631ca..ddf937c8a619d 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala @@ -3918,7 +3918,7 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, shareSessionEpoch), Set( + new ShareSessionContext(new ShareRequestMetadata(memberId, shareSessionEpoch), List( new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex))).asJava) ) @@ -4093,7 +4093,7 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Set( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) ).asJava) ).thenThrow(Errors.INVALID_REQUEST.exception) @@ -4184,7 +4184,7 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Set( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) ).asJava) ) @@ -4380,7 +4380,7 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Set( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) ).asJava) ) @@ -4449,7 +4449,7 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Set( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) ).asJava) ).thenThrow(Errors.SHARE_SESSION_NOT_FOUND.exception) @@ -4541,7 +4541,7 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Set( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) ).asJava) ).thenThrow(Errors.INVALID_SHARE_SESSION_EPOCH.exception) @@ -4682,7 +4682,7 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Set( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) ).asJava) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( @@ -4951,7 +4951,7 @@ class KafkaApisTest extends Logging { )) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Set( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)), new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)), new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)), @@ -5355,7 +5355,7 @@ class KafkaApisTest extends Logging { val erroneousPartitions: util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = new util.HashMap() - val validPartitions: util.Set[TopicIdPartition] = new util.HashSet() + val validPartitions: util.List[TopicIdPartition] = new util.ArrayList() validPartitions.add(tp1) validPartitions.add(tp2) validPartitions.add(tp3) @@ -5501,7 +5501,7 @@ class KafkaApisTest extends Logging { .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code) ) - val validPartitions: util.Set[TopicIdPartition] = new util.HashSet() + val validPartitions: util.List[TopicIdPartition] = new util.ArrayList() validPartitions.add(tp1) val erroneousAndValidPartitionData: ErroneousAndValidPartitionData = @@ -5634,7 +5634,7 @@ class KafkaApisTest extends Logging { val erroneousPartitions: util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = new util.HashMap() - val validPartitions: util.Set[TopicIdPartition] = new util.HashSet() + val validPartitions: util.List[TopicIdPartition] = new util.ArrayList() validPartitions.add(tp1) validPartitions.add(tp2) validPartitions.add(tp3) @@ -5784,7 +5784,7 @@ class KafkaApisTest extends Logging { val erroneousPartitions: util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = new util.HashMap() - val validPartitions: util.Set[TopicIdPartition] = new util.HashSet() + val validPartitions: util.List[TopicIdPartition] = new util.ArrayList() validPartitions.add(tp1) validPartitions.add(tp2) validPartitions.add(tp3) @@ -5972,7 +5972,7 @@ class KafkaApisTest extends Logging { )) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Set( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) ).asJava) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala index f816da4358de0..bd74798c019f9 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala @@ -6083,7 +6083,7 @@ class ReplicaManagerTest { try { val groupId = "grp" val tp1 = new TopicIdPartition(Uuid.randomUuid, new TopicPartition("foo1", 0)) - val topicPartitions = new util.LinkedHashSet[TopicIdPartition] + val topicPartitions = new util.ArrayList[TopicIdPartition] topicPartitions.add(tp1) val sp1 = mock(classOf[SharePartition]) diff --git a/server/src/main/java/org/apache/kafka/server/share/ErroneousAndValidPartitionData.java b/server/src/main/java/org/apache/kafka/server/share/ErroneousAndValidPartitionData.java index 90d9fd511cba5..c7810eabfcec9 100644 --- a/server/src/main/java/org/apache/kafka/server/share/ErroneousAndValidPartitionData.java +++ b/server/src/main/java/org/apache/kafka/server/share/ErroneousAndValidPartitionData.java @@ -22,27 +22,27 @@ import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ShareFetchResponse; +import java.util.ArrayList; import java.util.HashMap; -import java.util.HashSet; +import java.util.List; import java.util.Map; -import java.util.Set; /** * Helper class to return the erroneous partitions and valid partition data */ public class ErroneousAndValidPartitionData { private final Map erroneous; - private final Set validTopicIdPartitions; + private final List validTopicIdPartitions; public ErroneousAndValidPartitionData(Map erroneous, - Set validTopicIdPartitions) { + List validTopicIdPartitions) { this.erroneous = erroneous; this.validTopicIdPartitions = validTopicIdPartitions; } - public ErroneousAndValidPartitionData(Set shareFetchData) { + public ErroneousAndValidPartitionData(List shareFetchData) { erroneous = new HashMap<>(); - validTopicIdPartitions = new HashSet<>(); + validTopicIdPartitions = new ArrayList<>(); shareFetchData.forEach(topicIdPartition -> { if (topicIdPartition.topic() == null) { erroneous.put(topicIdPartition, ShareFetchResponse.partitionResponse(topicIdPartition, Errors.UNKNOWN_TOPIC_ID)); @@ -54,14 +54,14 @@ public ErroneousAndValidPartitionData(Set shareFetchData) { public ErroneousAndValidPartitionData() { this.erroneous = new HashMap<>(); - this.validTopicIdPartitions = new HashSet<>(); + this.validTopicIdPartitions = new ArrayList<>(); } public Map erroneous() { return erroneous; } - public Set validTopicIdPartitions() { + public List validTopicIdPartitions() { return validTopicIdPartitions; } } diff --git a/server/src/main/java/org/apache/kafka/server/share/context/ShareSessionContext.java b/server/src/main/java/org/apache/kafka/server/share/context/ShareSessionContext.java index 61abe38053faa..99eb92a85b94c 100644 --- a/server/src/main/java/org/apache/kafka/server/share/context/ShareSessionContext.java +++ b/server/src/main/java/org/apache/kafka/server/share/context/ShareSessionContext.java @@ -32,16 +32,15 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.NoSuchElementException; -import java.util.Set; /** * The context for a share session fetch request. @@ -52,7 +51,7 @@ public class ShareSessionContext extends ShareFetchContext { private final ShareRequestMetadata reqMetadata; private final boolean isSubsequent; - private Set shareFetchData; + private List shareFetchData; private ShareSession session; /** @@ -62,7 +61,7 @@ public class ShareSessionContext extends ShareFetchContext { * @param shareFetchData The share partition data from the share fetch request. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, - Set shareFetchData) { + List shareFetchData) { this.reqMetadata = reqMetadata; this.shareFetchData = shareFetchData; this.isSubsequent = false; @@ -81,7 +80,7 @@ public ShareSessionContext(ShareRequestMetadata reqMetadata, ShareSession sessio } // Visible for testing - public Set shareFetchData() { + public List shareFetchData() { return shareFetchData; } @@ -229,7 +228,7 @@ public ErroneousAndValidPartitionData getErroneousAndValidTopicIdPartitions() { return new ErroneousAndValidPartitionData(shareFetchData); } Map erroneous = new HashMap<>(); - Set valid = new HashSet<>(); + List valid = new ArrayList<>(); // Take the session lock and iterate over all the cached partitions. synchronized (session) { session.partitionMap().forEach(cachedSharePartition -> { diff --git a/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java b/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java index 9c517974e553e..459223f3e41a9 100644 --- a/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java +++ b/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java @@ -18,7 +18,7 @@ import org.apache.kafka.common.TopicIdPartition; -import java.util.LinkedHashSet; +import java.util.ArrayList; import java.util.Locale; /** @@ -47,7 +47,7 @@ public String toString() { * * @return the rotated topicIdPartitions */ - LinkedHashSet rotate(LinkedHashSet topicIdPartitions, PartitionRotateMetadata metadata); + ArrayList rotate(ArrayList topicIdPartitions, PartitionRotateMetadata metadata); static PartitionRotateStrategy type(StrategyType type) { return switch (type) { @@ -63,8 +63,8 @@ static PartitionRotateStrategy type(StrategyType type) { * * @return the rotated topicIdPartitions */ - static LinkedHashSet rotateRoundRobin( - LinkedHashSet topicIdPartitions, + static ArrayList rotateRoundRobin( + ArrayList topicIdPartitions, PartitionRotateMetadata metadata ) { if (topicIdPartitions.isEmpty() || topicIdPartitions.size() == 1 || metadata.sessionEpoch < 1) { @@ -79,8 +79,8 @@ static LinkedHashSet rotateRoundRobin( return topicIdPartitions; } - LinkedHashSet suffixPartitions = new LinkedHashSet<>(rotateAt); - LinkedHashSet rotatedPartitions = new LinkedHashSet<>(topicIdPartitions.size()); + ArrayList suffixPartitions = new ArrayList<>(rotateAt); + ArrayList rotatedPartitions = new ArrayList<>(topicIdPartitions.size()); int i = 0; for (TopicIdPartition topicIdPartition : topicIdPartitions) { if (i < rotateAt) { diff --git a/server/src/main/java/org/apache/kafka/server/share/fetch/ShareFetch.java b/server/src/main/java/org/apache/kafka/server/share/fetch/ShareFetch.java index f1375b4fe8eb4..93480a0f9fe3d 100644 --- a/server/src/main/java/org/apache/kafka/server/share/fetch/ShareFetch.java +++ b/server/src/main/java/org/apache/kafka/server/share/fetch/ShareFetch.java @@ -23,6 +23,7 @@ import org.apache.kafka.server.storage.log.FetchParams; import org.apache.kafka.storage.log.metrics.BrokerTopicStats; +import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; @@ -56,7 +57,7 @@ public class ShareFetch { /** * The topic partitions to be fetched. */ - private final LinkedHashSet topicIdPartitions; + private final ArrayList topicIdPartitions; /** * The batch size of the fetch request. */ @@ -80,7 +81,7 @@ public ShareFetch( String groupId, String memberId, CompletableFuture> future, - LinkedHashSet topicIdPartitions, + ArrayList topicIdPartitions, int batchSize, int maxFetchRecords, BrokerTopicStats brokerTopicStats @@ -103,7 +104,7 @@ public String memberId() { return memberId; } - public LinkedHashSet topicIdPartitions() { + public ArrayList topicIdPartitions() { return topicIdPartitions; } diff --git a/server/src/main/java/org/apache/kafka/server/share/session/ShareSession.java b/server/src/main/java/org/apache/kafka/server/share/session/ShareSession.java index 1dbcc485954f6..a7ab8e2c507ff 100644 --- a/server/src/main/java/org/apache/kafka/server/share/session/ShareSession.java +++ b/server/src/main/java/org/apache/kafka/server/share/session/ShareSession.java @@ -26,7 +26,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Set; public class ShareSession { @@ -112,7 +111,7 @@ public synchronized LastUsedKey lastUsedKey() { // Update the cached partition data based on the request. public synchronized Map> update( - Set shareFetchData, + List shareFetchData, List toForget) { List added = new ArrayList<>(); List updated = new ArrayList<>(); diff --git a/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java b/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java index 103b7e7785f3e..6fe3705303c52 100644 --- a/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java +++ b/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java @@ -24,7 +24,7 @@ import org.junit.jupiter.api.Test; -import java.util.LinkedHashSet; +import java.util.ArrayList; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.validateRotatedMapEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -35,9 +35,9 @@ public class PartitionRotateStrategyTest { @Test public void testRoundRobinStrategy() { PartitionRotateStrategy strategy = PartitionRotateStrategy.type(StrategyType.ROUND_ROBIN); - LinkedHashSet partitions = createPartitions(3); + ArrayList partitions = createPartitions(3); - LinkedHashSet result = strategy.rotate(partitions, new PartitionRotateMetadata(1)); + ArrayList result = strategy.rotate(partitions, new PartitionRotateMetadata(1)); assertEquals(3, result.size()); validateRotatedMapEquals(partitions, result, 1); @@ -61,8 +61,8 @@ public void testRoundRobinStrategy() { public void testRoundRobinStrategyWithSpecialSessionEpochs() { PartitionRotateStrategy strategy = PartitionRotateStrategy.type(StrategyType.ROUND_ROBIN); - LinkedHashSet partitions = createPartitions(3); - LinkedHashSet result = strategy.rotate( + ArrayList partitions = createPartitions(3); + ArrayList result = strategy.rotate( partitions, new PartitionRotateMetadata(ShareRequestMetadata.INITIAL_EPOCH)); assertEquals(3, result.size()); @@ -79,7 +79,7 @@ public void testRoundRobinStrategyWithSpecialSessionEpochs() { public void testRoundRobinStrategyWithEmptyPartitions() { PartitionRotateStrategy strategy = PartitionRotateStrategy.type(StrategyType.ROUND_ROBIN); // Empty partitions. - LinkedHashSet result = strategy.rotate(new LinkedHashSet<>(), new PartitionRotateMetadata(5)); + ArrayList result = strategy.rotate(new ArrayList<>(), new PartitionRotateMetadata(5)); // The result should be empty. assertTrue(result.isEmpty()); } @@ -89,8 +89,8 @@ public void testRoundRobinStrategyWithEmptyPartitions() { * @param size The number of topic-partitions to create. * @return The ordered set of topic partitions. */ - private LinkedHashSet createPartitions(int size) { - LinkedHashSet partitions = new LinkedHashSet<>(); + private ArrayList createPartitions(int size) { + ArrayList partitions = new ArrayList<>(); for (int i = 0; i < size; i++) { partitions.add(new TopicIdPartition(Uuid.randomUuid(), i, "foo" + i)); } diff --git a/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTest.java b/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTest.java index df66550672a26..e80eeb4d326a4 100644 --- a/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTest.java +++ b/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTest.java @@ -32,7 +32,7 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.orderedSet; +import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.arrayList; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -60,7 +60,7 @@ public void tearDown() throws Exception { public void testErrorInAllPartitions() { TopicIdPartition topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, new CompletableFuture<>(), - orderedSet(topicIdPartition), BATCH_SIZE, 100, brokerTopicStats); + arrayList(topicIdPartition), BATCH_SIZE, 100, brokerTopicStats); assertFalse(shareFetch.errorInAllPartitions()); shareFetch.addErroneous(topicIdPartition, new RuntimeException()); @@ -72,7 +72,7 @@ public void testErrorInAllPartitionsWithMultipleTopicIdPartitions() { TopicIdPartition topicIdPartition0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition topicIdPartition1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, new CompletableFuture<>(), - orderedSet(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); + arrayList(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); assertFalse(shareFetch.errorInAllPartitions()); shareFetch.addErroneous(topicIdPartition0, new RuntimeException()); @@ -87,7 +87,7 @@ public void testFilterErroneousTopicPartitions() { TopicIdPartition topicIdPartition0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition topicIdPartition1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, new CompletableFuture<>(), - orderedSet(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); + arrayList(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); Set result = shareFetch.filterErroneousTopicPartitions(Set.of(topicIdPartition0, topicIdPartition1)); // No erroneous partitions, hence all partitions should be returned. assertEquals(2, result.size()); @@ -113,7 +113,7 @@ public void testMaybeCompleteWithErroneousTopicPartitions() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, future, - orderedSet(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); + arrayList(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); // Add both erroneous partition and complete request. shareFetch.addErroneous(topicIdPartition0, new RuntimeException()); @@ -134,7 +134,7 @@ public void testMaybeCompleteWithPartialErroneousTopicPartitions() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, future, - orderedSet(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); + arrayList(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); // Add an erroneous partition and complete request. shareFetch.addErroneous(topicIdPartition0, new RuntimeException()); @@ -154,7 +154,7 @@ public void testMaybeCompleteWithException() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, future, - orderedSet(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); + arrayList(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); shareFetch.maybeCompleteWithException(List.of(topicIdPartition0, topicIdPartition1), new RuntimeException()); assertEquals(2, future.join().size()); @@ -173,7 +173,7 @@ public void testMaybeCompleteWithExceptionPartialFailure() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, future, - orderedSet(topicIdPartition0, topicIdPartition1, topicIdPartition2), BATCH_SIZE, 100, brokerTopicStats); + arrayList(topicIdPartition0, topicIdPartition1, topicIdPartition2), BATCH_SIZE, 100, brokerTopicStats); shareFetch.maybeCompleteWithException(List.of(topicIdPartition0, topicIdPartition2), new RuntimeException()); assertEquals(2, future.join().size()); @@ -191,7 +191,7 @@ public void testMaybeCompleteWithExceptionWithExistingErroneousTopicPartition() CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, future, - orderedSet(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); + arrayList(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); shareFetch.addErroneous(topicIdPartition0, new RuntimeException()); shareFetch.maybeCompleteWithException(List.of(topicIdPartition1), new RuntimeException()); diff --git a/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTestUtils.java b/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTestUtils.java index 5e8e5ceb6eb01..57461fe63c5ee 100644 --- a/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTestUtils.java +++ b/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTestUtils.java @@ -30,8 +30,8 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.util.ArrayList; import java.util.Collections; -import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -45,15 +45,15 @@ public class ShareFetchTestUtils { /** - * Create an ordered set of topic partitions. + * Create an ArrayList of topic partitions. * - * @param topicIdPartitions The topic partitions to create the set for. - * @return The ordered set of topic partitions. + * @param topicIdPartitions The topic partitions to create the list for. + * @return The list of topic partitions. */ - public static LinkedHashSet orderedSet(TopicIdPartition... topicIdPartitions) { - LinkedHashSet set = new LinkedHashSet<>(); - Collections.addAll(set, topicIdPartitions); - return set; + public static ArrayList arrayList(TopicIdPartition... topicIdPartitions) { + ArrayList list = new ArrayList<>(); + Collections.addAll(list, topicIdPartitions); + return list; } /** @@ -64,8 +64,8 @@ public static LinkedHashSet orderedSet(TopicIdPartition... top * @param rotationAt The position to rotate the keys at. */ public static void validateRotatedMapEquals( - LinkedHashSet original, - LinkedHashSet result, + ArrayList original, + ArrayList result, int rotationAt ) { From 3dae2095756dbc5d6f3d7ea445e0af0a317ee0a9 Mon Sep 17 00:00:00 2001 From: adixitconfluent Date: Mon, 10 Mar 2025 13:03:46 +0530 Subject: [PATCH 05/11] Addressed Apoorv's round 1 review comments --- .../common/requests/ShareFetchRequest.java | 4 +- .../server/share/SharePartitionManager.java | 4 +- .../server/share/DelayedShareFetchTest.java | 38 ++++----- .../server/share/ShareFetchUtilsTest.java | 11 ++- .../share/SharePartitionManagerTest.java | 83 +++++++++---------- .../share/fetch/PartitionRotateStrategy.java | 7 +- .../kafka/server/share/fetch/ShareFetch.java | 8 +- .../server/share/session/ShareSession.java | 3 +- .../fetch/PartitionRotateStrategyTest.java | 7 +- .../server/share/fetch/ShareFetchTest.java | 17 ++-- .../share/fetch/ShareFetchTestUtils.java | 18 +--- 11 files changed, 92 insertions(+), 108 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java index 9e80ab4e388af..7db1bf86c8d71 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java @@ -149,7 +149,7 @@ public String toString() { } private final ShareFetchRequestData data; - private volatile ArrayList shareFetchData = null; + private volatile List shareFetchData = null; private volatile List toForget = null; public ShareFetchRequest(ShareFetchRequestData data, short version) { @@ -195,7 +195,7 @@ public List shareFetchData(Map topicNames) { if (shareFetchData == null) { // Assigning the lazy-initialized `shareFetchData` in the last step // to avoid other threads accessing a half-initialized object. - final ArrayList shareFetchDataTmp = new ArrayList<>(); + final List shareFetchDataTmp = new ArrayList<>(); data.topics().forEach(shareFetchTopic -> { String name = topicNames.get(shareFetchTopic.topicId()); shareFetchTopic.partitions().forEach(shareFetchPartition -> { diff --git a/core/src/main/java/kafka/server/share/SharePartitionManager.java b/core/src/main/java/kafka/server/share/SharePartitionManager.java index 2640348783803..a240e98f418cd 100644 --- a/core/src/main/java/kafka/server/share/SharePartitionManager.java +++ b/core/src/main/java/kafka/server/share/SharePartitionManager.java @@ -257,12 +257,12 @@ public CompletableFuture> fetchMessages( FetchParams fetchParams, int sessionEpoch, int batchSize, - ArrayList topicPartitions + List topicPartitions ) { log.trace("Fetch request for topicIdPartitions: {} with groupId: {} fetch params: {}", topicPartitions, groupId, fetchParams); - ArrayList topicIdPartitions = PartitionRotateStrategy + List topicIdPartitions = PartitionRotateStrategy .type(PartitionRotateStrategy.StrategyType.ROUND_ROBIN) .rotate(topicPartitions, new PartitionRotateMetadata(sessionEpoch)); diff --git a/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java b/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java index 5e0f3e721fa7c..fc7fc872afccc 100644 --- a/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java +++ b/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java @@ -72,7 +72,6 @@ import static kafka.server.share.SharePartitionManagerTest.DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL; import static kafka.server.share.SharePartitionManagerTest.buildLogReadResult; import static kafka.server.share.SharePartitionManagerTest.mockReplicaManagerDelayedShareFetch; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.arrayList; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.createShareAcquiredRecords; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -132,7 +131,7 @@ public void testDelayedShareFetchTryCompleteReturnsFalseDueToNonAcquirablePartit sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), arrayList(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(false); @@ -179,7 +178,7 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnFirstFetch() { ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 2, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), arrayList(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(true); @@ -251,7 +250,7 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnSubsequentFetch() { ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 2, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), arrayList(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(true); @@ -303,7 +302,7 @@ public void testDelayedShareFetchTryCompleteReturnsTrue() { sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), arrayList(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(true); @@ -363,7 +362,7 @@ public void testEmptyFutureReturnedByDelayedShareFetchOnComplete() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - future, arrayList(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); + future, List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(false); when(sp1.canAcquireRecords()).thenReturn(false); @@ -416,7 +415,7 @@ public void testReplicaManagerFetchShouldHappenOnComplete() { sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), arrayList(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(true); @@ -472,7 +471,7 @@ public void testToCompleteAnAlreadyCompletedFuture() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - future, arrayList(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); + future, List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.maybeAcquireFetchLock()).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(false); @@ -518,7 +517,7 @@ public void testForceCompleteTriggersDelayedActionsQueue() { TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(topicId, new TopicPartition("foo", 1)); TopicIdPartition tp2 = new TopicIdPartition(topicId, new TopicPartition("foo", 2)); - ArrayList topicIdPartitions1 = arrayList(tp0, tp1); + List topicIdPartitions1 = List.of(tp0, tp1); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -560,9 +559,8 @@ public void testForceCompleteTriggersDelayedActionsQueue() { assertTrue(delayedShareFetch1.lock().tryLock()); delayedShareFetch1.lock().unlock(); - ArrayList topicIdPartitions2 = arrayList(tp0, tp1); ShareFetch shareFetch2 = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), topicIdPartitions2, BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); doAnswer(invocation -> buildLogReadResult(List.of(tp1))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); @@ -620,7 +618,7 @@ public void testCombineLogReadResponse() { ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - future, arrayList(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); + future, List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Collections.singleton(tp1)); @@ -676,7 +674,7 @@ public void testExceptionInMinBytesCalculation() { ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), arrayList(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.canAcquireRecords()).thenReturn(true); @@ -753,7 +751,7 @@ public void testTryCompleteLocksReleasedOnCompleteException() { sharePartitions.put(tp0, sp0); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), - new CompletableFuture<>(), arrayList(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); doAnswer(invocation -> buildLogReadResult(List.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); @@ -793,7 +791,7 @@ public void testLocksReleasedForCompletedFetch() { mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp0, 1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), arrayList(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Collections.singleton(tp0)); @@ -827,7 +825,7 @@ public void testLocksReleasedAcquireException() { sharePartitions.put(tp0, sp0); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), arrayList(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); DelayedShareFetch delayedShareFetch = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() @@ -856,7 +854,7 @@ public void testTryCompleteWhenPartitionMaxBytesStrategyThrowsException() { ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 2, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - future, arrayList(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); + future, List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); // partitionMaxBytesStrategy.maxBytes() function throws an exception PartitionMaxBytesStrategy partitionMaxBytesStrategy = mock(PartitionMaxBytesStrategy.class); @@ -920,7 +918,7 @@ public void testPartitionMaxBytesFromUniformStrategyWhenAllPartitionsAreAcquirab sharePartitions.put(tp4, sp4); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), arrayList(tp0, tp1, tp2, tp3, tp4), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), List.of(tp0, tp1, tp2, tp3, tp4), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class))).thenReturn( @@ -1016,7 +1014,7 @@ public void testPartitionMaxBytesFromUniformStrategyWhenFewPartitionsAreAcquirab sharePartitions.put(tp4, sp4); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), arrayList(tp0, tp1, tp2, tp3, tp4), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), List.of(tp0, tp1, tp2, tp3, tp4), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); when(sp0.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class))).thenReturn( @@ -1088,7 +1086,7 @@ public void testPartitionMaxBytesFromUniformStrategyInCombineLogReadResponse() { ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), arrayList(tp0, tp1, tp2), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), List.of(tp0, tp1, tp2), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); DelayedShareFetch delayedShareFetch = DelayedShareFetchBuilder.builder() diff --git a/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java b/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java index 0496b659c06f9..cf66d5522e753 100644 --- a/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java +++ b/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java @@ -59,7 +59,6 @@ import java.util.concurrent.CompletableFuture; import java.util.function.BiConsumer; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.arrayList; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.createFileRecords; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.createShareAcquiredRecords; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.memoryRecordsBuilder; @@ -113,7 +112,7 @@ public void testProcessFetchResponse() { sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, memberId, - new CompletableFuture<>(), arrayList(tp0, tp1), BATCH_SIZE, 100, BROKER_TOPIC_STATS); + new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, 100, BROKER_TOPIC_STATS); MemoryRecords records = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("0".getBytes(), "v".getBytes()), @@ -174,7 +173,7 @@ public void testProcessFetchResponseWithEmptyRecords() { sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, memberId, - new CompletableFuture<>(), arrayList(tp0, tp1), BATCH_SIZE, 100, BROKER_TOPIC_STATS); + new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, 100, BROKER_TOPIC_STATS); List responseData = List.of( new ShareFetchPartitionData(tp0, 0, new FetchPartitionData(Errors.NONE, 0L, 0L, @@ -213,7 +212,7 @@ public void testProcessFetchResponseWithLsoMovementForTopicPartition() { sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), arrayList(tp0, tp1), BATCH_SIZE, 100, BROKER_TOPIC_STATS); + new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, 100, BROKER_TOPIC_STATS); ReplicaManager replicaManager = mock(ReplicaManager.class); @@ -305,7 +304,7 @@ public void testProcessFetchResponseWhenNoRecordsAreAcquired() { sharePartitions.put(tp0, sp0); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), arrayList(tp0), BATCH_SIZE, 100, BROKER_TOPIC_STATS); + new CompletableFuture<>(), List.of(tp0), BATCH_SIZE, 100, BROKER_TOPIC_STATS); ReplicaManager replicaManager = mock(ReplicaManager.class); @@ -373,7 +372,7 @@ public void testProcessFetchResponseWithMaxFetchRecords() throws IOException { Uuid memberId = Uuid.randomUuid(); // Set max fetch records to 10 ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, memberId.toString(), - new CompletableFuture<>(), arrayList(tp0, tp1), BATCH_SIZE, 10, BROKER_TOPIC_STATS); + new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, 10, BROKER_TOPIC_STATS); LinkedHashMap recordsPerOffset = new LinkedHashMap<>(); recordsPerOffset.put(0L, 1); diff --git a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java index 5fd25fb415f2a..b77a147261fa0 100644 --- a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java +++ b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java @@ -115,7 +115,6 @@ import scala.jdk.javaapi.CollectionConverters; import static kafka.server.share.DelayedShareFetchTest.mockTopicIdPartitionToReturnDataEqualToMinBytes; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.arrayList; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.validateRotatedMapEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -199,7 +198,7 @@ public void testNewContextReturnsFinalContextWithoutRequestData() { Uuid memberId = Uuid.randomUuid(); // Create a new share session with an initial share fetch request - List reqData1 = arrayList(tp0, tp1); + List reqData1 = List.of(tp0, tp1); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); @@ -227,7 +226,7 @@ public void testNewContextReturnsFinalContextWithRequestData() { Uuid memberId = Uuid.randomUuid(); // Create a new share session with an initial share fetch request - List reqData1 = arrayList(tp0, tp1); + List reqData1 = List.of(tp0, tp1); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); @@ -238,7 +237,7 @@ public void testNewContextReturnsFinalContextWithRequestData() { // shareFetch is not empty, but the maxBytes of topic partition is 0, which means this is added only for acknowledgements. // New context should be created successfully - List reqData3 = arrayList(new TopicIdPartition(tpId1, new TopicPartition("foo", 0))); + List reqData3 = List.of(new TopicIdPartition(tpId1, new TopicPartition("foo", 0))); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData3, Collections.emptyList(), reqMetadata2, true); assertEquals(FinalContext.class, context2.getClass()); } @@ -259,7 +258,7 @@ public void testNewContextReturnsFinalContextWhenTopicPartitionsArePresentInRequ Uuid memberId = Uuid.randomUuid(); // Create a new share session with an initial share fetch request - List reqData1 = arrayList(tp0, tp1); + List reqData1 = List.of(tp0, tp1); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); @@ -269,7 +268,7 @@ public void testNewContextReturnsFinalContextWhenTopicPartitionsArePresentInRequ ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId, ShareRequestMetadata.FINAL_EPOCH); // shareFetch is not empty, and it contains tpId1, which should return FinalContext instance since it is FINAL_EPOCH - List reqData3 = arrayList(new TopicIdPartition(tpId1, new TopicPartition("foo", 0))); + List reqData3 = List.of(new TopicIdPartition(tpId1, new TopicPartition("foo", 0))); assertInstanceOf(FinalContext.class, sharePartitionManager.newContext(groupId, reqData3, Collections.emptyList(), reqMetadata2, true)); } @@ -294,7 +293,7 @@ public void testNewContext() { String groupId = "grp"; // Create a new share session with an initial share fetch request - List reqData2 = arrayList(tp0, tp1); + List reqData2 = List.of(tp0, tp1); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); @@ -388,7 +387,7 @@ public void testShareSessionExpiration() { TopicIdPartition foo1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); // Create a new share session, session 1 - List session1req = arrayList(foo0, foo1); + List session1req = List.of(foo0, foo1); String groupId = "grp"; ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); @@ -411,7 +410,7 @@ public void testShareSessionExpiration() { time.sleep(500); // Create a second new share session - List session2req = arrayList(foo0, foo1); + List session2req = List.of(foo0, foo1); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); @@ -445,7 +444,7 @@ public void testShareSessionExpiration() { // create one final share session to test that the least recently used entry is evicted // the second share session should be evicted because the first share session was incrementally fetched // more recently than the second session was created - List session3req = arrayList(foo0, foo1); + List session3req = List.of(foo0, foo1); ShareRequestMetadata reqMetadata3 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); @@ -480,7 +479,7 @@ public void testSubsequentShareSession() { TopicIdPartition tp2 = new TopicIdPartition(barId, new TopicPartition("bar", 0)); // Create a new share session with foo-0 and foo-1 - List reqData1 = arrayList(tp0, tp1); + List reqData1 = List.of(tp0, tp1); String groupId = "grp"; ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); @@ -497,7 +496,7 @@ public void testSubsequentShareSession() { assertEquals(2, resp1.responseData(topicNames).size()); // Create a subsequent fetch request that removes foo-0 and adds bar-0 - List reqData2 = arrayList(tp2); + List reqData2 = List.of(tp2); List removed2 = new ArrayList<>(); removed2.add(tp0); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, removed2, @@ -543,7 +542,7 @@ public void testZeroSizeShareSession() { TopicIdPartition foo1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); // Create a new share session with foo-0 and foo-1 - List reqData1 = arrayList(foo0, foo1); + List reqData1 = List.of(foo0, foo1); String groupId = "grp"; ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); @@ -589,7 +588,7 @@ public void testToForgetPartitions() { ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - List reqData1 = arrayList(foo, bar); + List reqData1 = List.of(foo, bar); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); assertInstanceOf(ShareSessionContext.class, context1); @@ -630,7 +629,7 @@ public void testShareSessionUpdateTopicIdsBrokerSide() { topicNames.put(barId, "bar"); // Create a new share session with foo-0 and bar-1 - List reqData1 = arrayList(foo, bar); + List reqData1 = List.of(foo, bar); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); @@ -680,7 +679,7 @@ public void testGetErroneousAndValidTopicIdPartitions() { String groupId = "grp"; // Create a new share session with an initial share fetch request - List reqData2 = arrayList(tp0, tp1, tpNull1); + List reqData2 = List.of(tp0, tp1, tpNull1); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); @@ -727,7 +726,7 @@ public void testGetErroneousAndValidTopicIdPartitions() { new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test generating a throttled response for a subsequent share session - List reqData7 = arrayList(tpNull2); + List reqData7 = List.of(tpNull2); ShareFetchContext context7 = sharePartitionManager.newContext(groupId, reqData7, EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); // Check for throttled response @@ -777,7 +776,7 @@ public void testShareFetchContextResponseSize() { String groupId = "grp"; // Create a new share session with an initial share fetch request - List reqData2 = arrayList(tp0, tp1); + List reqData2 = List.of(tp0, tp1); // For response size expected value calculation ObjectSerializationCache objectSerializationCache = new ObjectSerializationCache(); @@ -812,7 +811,7 @@ public void testShareFetchContextResponseSize() { new ShareRequestMetadata(memberId4, 1), true)); // Continue the first share session we created. - List reqData5 = arrayList(tp2); + List reqData5 = List.of(tp2); ShareFetchContext context5 = sharePartitionManager.newContext(groupId, reqData5, EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context5); @@ -887,7 +886,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { Uuid memberId2 = Uuid.randomUuid(); // Create a new share session with an initial share fetch request. - List reqData1 = arrayList(tp0, tp1); + List reqData1 = List.of(tp0, tp1); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId1, ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); @@ -908,7 +907,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { new HashSet<>(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1))); // Create a new share session with an initial share fetch request. - List reqData2 = arrayList(tp2); + List reqData2 = List.of(tp2); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId2, ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); @@ -927,7 +926,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { assertEquals(Collections.singletonList(tp2), sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); // Continue the first share session we created. - List reqData3 = arrayList(tp2); + List reqData3 = List.of(tp2); ShareFetchContext context3 = sharePartitionManager.newContext(groupId, reqData3, EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey1.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context3); @@ -942,7 +941,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { new HashSet<>(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1))); // Continue the second session we created. - List reqData4 = arrayList(tp3); + List reqData4 = List.of(tp3); ShareFetchContext context4 = sharePartitionManager.newContext(groupId, reqData4, Collections.singletonList(tp2), new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context4); @@ -1019,7 +1018,7 @@ public void testMultipleSequentialShareFetches() { TopicIdPartition tp4 = new TopicIdPartition(fooId, new TopicPartition("foo", 2)); TopicIdPartition tp5 = new TopicIdPartition(barId, new TopicPartition("bar", 2)); TopicIdPartition tp6 = new TopicIdPartition(fooId, new TopicPartition("foo", 3)); - ArrayList topicIdPartitions = arrayList(tp0, tp1, tp2, tp3, tp4, tp5, tp6); + List topicIdPartitions = List.of(tp0, tp1, tp2, tp3, tp4, tp5, tp6); mockFetchOffsetForTimestamp(mockReplicaManager); @@ -1081,7 +1080,7 @@ public void testMultipleConcurrentShareFetches() throws InterruptedException { TopicIdPartition tp1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); TopicIdPartition tp2 = new TopicIdPartition(barId, new TopicPartition("bar", 0)); TopicIdPartition tp3 = new TopicIdPartition(barId, new TopicPartition("bar", 1)); - ArrayList topicIdPartitions = arrayList(tp0, tp1, tp2, tp3); + List topicIdPartitions = List.of(tp0, tp1, tp2, tp3); mockFetchOffsetForTimestamp(mockReplicaManager); @@ -1174,7 +1173,7 @@ public void testReplicaManagerFetchShouldNotProceed() { Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - ArrayList topicIdPartitions = arrayList(tp0); + List topicIdPartitions = List.of(tp0); SharePartition sp0 = mock(SharePartition.class); when(sp0.maybeAcquireFetchLock()).thenReturn(true); @@ -1217,7 +1216,7 @@ public void testReplicaManagerFetchShouldProceed() { Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - ArrayList topicIdPartitions = arrayList(tp0); + List topicIdPartitions = List.of(tp0); mockFetchOffsetForTimestamp(mockReplicaManager); @@ -1709,7 +1708,7 @@ public void testAcknowledgeCompletesDelayedShareFetchRequest() { TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo1", 0)); TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); - ArrayList topicIdPartitions = arrayList(tp1, tp2); + List topicIdPartitions = List.of(tp1, tp2); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); @@ -1813,7 +1812,7 @@ public void testAcknowledgeDoesNotCompleteDelayedShareFetchRequest() { TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0)); - ArrayList topicIdPartitions = arrayList(tp1, tp2); + List topicIdPartitions = List.of(tp1, tp2); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); @@ -1919,7 +1918,7 @@ public void testReleaseSessionCompletesDelayedShareFetchRequest() { TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0)); - ArrayList topicIdPartitions = arrayList(tp1, tp2); + List topicIdPartitions = List.of(tp1, tp2); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); @@ -2019,7 +2018,7 @@ public void testReleaseSessionDoesNotCompleteDelayedShareFetchRequest() { TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0)); - ArrayList topicIdPartitions = arrayList(tp1, tp2); + List topicIdPartitions = List.of(tp1, tp2); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); @@ -2121,7 +2120,7 @@ public void testPendingInitializationShouldCompleteFetchRequest() throws Excepti Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - ArrayList topicIdPartitions = arrayList(tp0); + List topicIdPartitions = List.of(tp0); SharePartition sp0 = mock(SharePartition.class); Map partitionCacheMap = new HashMap<>(); @@ -2185,7 +2184,7 @@ public void testPartitionLoadTimeMetricWithMultiplePartitions() throws Exception String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - ArrayList topicIdPartitions = arrayList(tp0, tp1); + List topicIdPartitions = List.of(tp0, tp1); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -2254,7 +2253,7 @@ public void testDelayedInitializationShouldCompleteFetchRequest() { Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - ArrayList topicIdPartitions = arrayList(tp0); + List topicIdPartitions = List.of(tp0); SharePartition sp0 = mock(SharePartition.class); Map partitionCacheMap = new HashMap<>(); @@ -2328,7 +2327,7 @@ public void testSharePartitionInitializationExceptions() throws Exception { Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - ArrayList topicIdPartitions = arrayList(tp0); + List topicIdPartitions = List.of(tp0); SharePartition sp0 = mock(SharePartition.class); Map partitionCacheMap = new HashMap<>(); @@ -2459,7 +2458,7 @@ public void testSharePartitionInitializationExceptions() throws Exception { public void testShareFetchProcessingExceptions() throws Exception { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - ArrayList topicIdPartitions = arrayList(tp0); + List topicIdPartitions = List.of(tp0); Map partitionCacheMap = (Map) mock(Map.class); // Throw the exception for first fetch request. Return share partition for next. @@ -2491,7 +2490,7 @@ public void testShareFetchProcessingExceptions() throws Exception { public void testSharePartitionInitializationFailure() throws Exception { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - ArrayList topicIdPartitions = arrayList(tp0); + List topicIdPartitions = List.of(tp0); // Send map to check no share partition is created. Map partitionCacheMap = new HashMap<>(); @@ -2549,7 +2548,7 @@ public void testSharePartitionPartialInitializationFailure() throws Exception { TopicIdPartition tp1 = new TopicIdPartition(memberId1, new TopicPartition("foo", 1)); // For tp2, share partition initialization will fail. TopicIdPartition tp2 = new TopicIdPartition(memberId1, new TopicPartition("foo", 2)); - ArrayList topicIdPartitions = arrayList(tp0, tp1, tp2); + List topicIdPartitions = List.of(tp0, tp1, tp2); // Mark partition0 as not the leader. Partition partition0 = mock(Partition.class); @@ -2624,7 +2623,7 @@ public void testReplicaManagerFetchException() { String groupId = "grp"; Uuid memberId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - ArrayList topicIdPartitions = arrayList(tp0); + List topicIdPartitions = List.of(tp0); SharePartition sp0 = mock(SharePartition.class); when(sp0.maybeAcquireFetchLock()).thenReturn(true); @@ -2677,7 +2676,7 @@ public void testReplicaManagerFetchMultipleSharePartitionsException() { TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 0)); - ArrayList topicIdPartitions = arrayList(tp0, tp1); + List topicIdPartitions = List.of(tp0, tp1); SharePartition sp0 = mock(SharePartition.class); when(sp0.maybeAcquireFetchLock()).thenReturn(true); @@ -2747,7 +2746,7 @@ public void testListenerRegistration() { TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 0)); - ArrayList topicIdPartitions = arrayList(tp0, tp1); + List topicIdPartitions = List.of(tp0, tp1); ReplicaManager mockReplicaManager = mock(ReplicaManager.class); Partition partition = mockPartition(); @@ -2816,7 +2815,7 @@ public void testFetchMessagesRotatePartitions() { TopicIdPartition tp4 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); TopicIdPartition tp5 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 2)); TopicIdPartition tp6 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 3)); - ArrayList topicIdPartitions = arrayList(tp0, tp1, tp2, tp3, tp4, tp5, tp6); + List topicIdPartitions = List.of(tp0, tp1, tp2, tp3, tp4, tp5, tp6); sharePartitionManager = Mockito.spy(SharePartitionManagerBuilder.builder().withBrokerTopicStats(brokerTopicStats).build()); // Capture the arguments passed to processShareFetch. diff --git a/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java b/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java index 459223f3e41a9..9c7f1673aa268 100644 --- a/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java +++ b/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java @@ -19,6 +19,7 @@ import org.apache.kafka.common.TopicIdPartition; import java.util.ArrayList; +import java.util.List; import java.util.Locale; /** @@ -47,7 +48,7 @@ public String toString() { * * @return the rotated topicIdPartitions */ - ArrayList rotate(ArrayList topicIdPartitions, PartitionRotateMetadata metadata); + List rotate(List topicIdPartitions, PartitionRotateMetadata metadata); static PartitionRotateStrategy type(StrategyType type) { return switch (type) { @@ -63,8 +64,8 @@ static PartitionRotateStrategy type(StrategyType type) { * * @return the rotated topicIdPartitions */ - static ArrayList rotateRoundRobin( - ArrayList topicIdPartitions, + static List rotateRoundRobin( + List topicIdPartitions, PartitionRotateMetadata metadata ) { if (topicIdPartitions.isEmpty() || topicIdPartitions.size() == 1 || metadata.sessionEpoch < 1) { diff --git a/server/src/main/java/org/apache/kafka/server/share/fetch/ShareFetch.java b/server/src/main/java/org/apache/kafka/server/share/fetch/ShareFetch.java index 93480a0f9fe3d..8406f9efa91c3 100644 --- a/server/src/main/java/org/apache/kafka/server/share/fetch/ShareFetch.java +++ b/server/src/main/java/org/apache/kafka/server/share/fetch/ShareFetch.java @@ -23,11 +23,11 @@ import org.apache.kafka.server.storage.log.FetchParams; import org.apache.kafka.storage.log.metrics.BrokerTopicStats; -import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CompletableFuture; @@ -57,7 +57,7 @@ public class ShareFetch { /** * The topic partitions to be fetched. */ - private final ArrayList topicIdPartitions; + private final List topicIdPartitions; /** * The batch size of the fetch request. */ @@ -81,7 +81,7 @@ public ShareFetch( String groupId, String memberId, CompletableFuture> future, - ArrayList topicIdPartitions, + List topicIdPartitions, int batchSize, int maxFetchRecords, BrokerTopicStats brokerTopicStats @@ -104,7 +104,7 @@ public String memberId() { return memberId; } - public ArrayList topicIdPartitions() { + public List topicIdPartitions() { return topicIdPartitions; } diff --git a/server/src/main/java/org/apache/kafka/server/share/session/ShareSession.java b/server/src/main/java/org/apache/kafka/server/share/session/ShareSession.java index a7ab8e2c507ff..362f32e61975e 100644 --- a/server/src/main/java/org/apache/kafka/server/share/session/ShareSession.java +++ b/server/src/main/java/org/apache/kafka/server/share/session/ShareSession.java @@ -112,7 +112,8 @@ public synchronized LastUsedKey lastUsedKey() { // Update the cached partition data based on the request. public synchronized Map> update( List shareFetchData, - List toForget) { + List toForget + ) { List added = new ArrayList<>(); List updated = new ArrayList<>(); List removed = new ArrayList<>(); diff --git a/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java b/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java index 6fe3705303c52..7dc0a8f299ea7 100644 --- a/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java +++ b/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java @@ -25,6 +25,7 @@ import org.junit.jupiter.api.Test; import java.util.ArrayList; +import java.util.List; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.validateRotatedMapEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -37,7 +38,7 @@ public void testRoundRobinStrategy() { PartitionRotateStrategy strategy = PartitionRotateStrategy.type(StrategyType.ROUND_ROBIN); ArrayList partitions = createPartitions(3); - ArrayList result = strategy.rotate(partitions, new PartitionRotateMetadata(1)); + List result = strategy.rotate(partitions, new PartitionRotateMetadata(1)); assertEquals(3, result.size()); validateRotatedMapEquals(partitions, result, 1); @@ -62,7 +63,7 @@ public void testRoundRobinStrategyWithSpecialSessionEpochs() { PartitionRotateStrategy strategy = PartitionRotateStrategy.type(StrategyType.ROUND_ROBIN); ArrayList partitions = createPartitions(3); - ArrayList result = strategy.rotate( + List result = strategy.rotate( partitions, new PartitionRotateMetadata(ShareRequestMetadata.INITIAL_EPOCH)); assertEquals(3, result.size()); @@ -79,7 +80,7 @@ public void testRoundRobinStrategyWithSpecialSessionEpochs() { public void testRoundRobinStrategyWithEmptyPartitions() { PartitionRotateStrategy strategy = PartitionRotateStrategy.type(StrategyType.ROUND_ROBIN); // Empty partitions. - ArrayList result = strategy.rotate(new ArrayList<>(), new PartitionRotateMetadata(5)); + List result = strategy.rotate(new ArrayList<>(), new PartitionRotateMetadata(5)); // The result should be empty. assertTrue(result.isEmpty()); } diff --git a/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTest.java b/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTest.java index e80eeb4d326a4..f7e29e2484f5f 100644 --- a/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTest.java +++ b/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTest.java @@ -32,7 +32,6 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.arrayList; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -60,7 +59,7 @@ public void tearDown() throws Exception { public void testErrorInAllPartitions() { TopicIdPartition topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, new CompletableFuture<>(), - arrayList(topicIdPartition), BATCH_SIZE, 100, brokerTopicStats); + List.of(topicIdPartition), BATCH_SIZE, 100, brokerTopicStats); assertFalse(shareFetch.errorInAllPartitions()); shareFetch.addErroneous(topicIdPartition, new RuntimeException()); @@ -72,7 +71,7 @@ public void testErrorInAllPartitionsWithMultipleTopicIdPartitions() { TopicIdPartition topicIdPartition0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition topicIdPartition1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, new CompletableFuture<>(), - arrayList(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); + List.of(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); assertFalse(shareFetch.errorInAllPartitions()); shareFetch.addErroneous(topicIdPartition0, new RuntimeException()); @@ -87,7 +86,7 @@ public void testFilterErroneousTopicPartitions() { TopicIdPartition topicIdPartition0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition topicIdPartition1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, new CompletableFuture<>(), - arrayList(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); + List.of(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); Set result = shareFetch.filterErroneousTopicPartitions(Set.of(topicIdPartition0, topicIdPartition1)); // No erroneous partitions, hence all partitions should be returned. assertEquals(2, result.size()); @@ -113,7 +112,7 @@ public void testMaybeCompleteWithErroneousTopicPartitions() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, future, - arrayList(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); + List.of(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); // Add both erroneous partition and complete request. shareFetch.addErroneous(topicIdPartition0, new RuntimeException()); @@ -134,7 +133,7 @@ public void testMaybeCompleteWithPartialErroneousTopicPartitions() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, future, - arrayList(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); + List.of(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); // Add an erroneous partition and complete request. shareFetch.addErroneous(topicIdPartition0, new RuntimeException()); @@ -154,7 +153,7 @@ public void testMaybeCompleteWithException() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, future, - arrayList(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); + List.of(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); shareFetch.maybeCompleteWithException(List.of(topicIdPartition0, topicIdPartition1), new RuntimeException()); assertEquals(2, future.join().size()); @@ -173,7 +172,7 @@ public void testMaybeCompleteWithExceptionPartialFailure() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, future, - arrayList(topicIdPartition0, topicIdPartition1, topicIdPartition2), BATCH_SIZE, 100, brokerTopicStats); + List.of(topicIdPartition0, topicIdPartition1, topicIdPartition2), BATCH_SIZE, 100, brokerTopicStats); shareFetch.maybeCompleteWithException(List.of(topicIdPartition0, topicIdPartition2), new RuntimeException()); assertEquals(2, future.join().size()); @@ -191,7 +190,7 @@ public void testMaybeCompleteWithExceptionWithExistingErroneousTopicPartition() CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, future, - arrayList(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); + List.of(topicIdPartition0, topicIdPartition1), BATCH_SIZE, 100, brokerTopicStats); shareFetch.addErroneous(topicIdPartition0, new RuntimeException()); shareFetch.maybeCompleteWithException(List.of(topicIdPartition1), new RuntimeException()); diff --git a/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTestUtils.java b/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTestUtils.java index 57461fe63c5ee..e9fc9d4bd6a05 100644 --- a/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTestUtils.java +++ b/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTestUtils.java @@ -30,8 +30,6 @@ import java.io.IOException; import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -44,18 +42,6 @@ */ public class ShareFetchTestUtils { - /** - * Create an ArrayList of topic partitions. - * - * @param topicIdPartitions The topic partitions to create the list for. - * @return The list of topic partitions. - */ - public static ArrayList arrayList(TopicIdPartition... topicIdPartitions) { - ArrayList list = new ArrayList<>(); - Collections.addAll(list, topicIdPartitions); - return list; - } - /** * Validate that the rotated map is equal to the original map with the keys rotated by the given position. * @@ -64,8 +50,8 @@ public static ArrayList arrayList(TopicIdPartition... topicIdP * @param rotationAt The position to rotate the keys at. */ public static void validateRotatedMapEquals( - ArrayList original, - ArrayList result, + List original, + List result, int rotationAt ) { From 0d30e13eb59482aea3aea27e5b6ab4990dc2e4fd Mon Sep 17 00:00:00 2001 From: adixitconfluent Date: Mon, 10 Mar 2025 13:20:50 +0530 Subject: [PATCH 06/11] Addressed Apoorv's round 1 review comments --- .../share/fetch/PartitionRotateStrategy.java | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java b/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java index 9c7f1673aa268..820ee0fbd4d7c 100644 --- a/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java +++ b/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java @@ -19,6 +19,7 @@ import org.apache.kafka.common.TopicIdPartition; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Locale; @@ -80,18 +81,8 @@ static List rotateRoundRobin( return topicIdPartitions; } - ArrayList suffixPartitions = new ArrayList<>(rotateAt); - ArrayList rotatedPartitions = new ArrayList<>(topicIdPartitions.size()); - int i = 0; - for (TopicIdPartition topicIdPartition : topicIdPartitions) { - if (i < rotateAt) { - suffixPartitions.add(topicIdPartition); - } else { - rotatedPartitions.add(topicIdPartition); - } - i++; - } - rotatedPartitions.addAll(suffixPartitions); + List rotatedPartitions = new ArrayList<>(topicIdPartitions); + Collections.rotate(rotatedPartitions, -1 * rotateAt); return rotatedPartitions; } From c90f8488efc26b0701529a38a09f50f99309fe78 Mon Sep 17 00:00:00 2001 From: adixitconfluent Date: Mon, 10 Mar 2025 15:45:04 +0530 Subject: [PATCH 07/11] Minor refactor --- .../kafka/server/share/SharePartitionManager.java | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/kafka/server/share/SharePartitionManager.java b/core/src/main/java/kafka/server/share/SharePartitionManager.java index a240e98f418cd..4a8c373fa695b 100644 --- a/core/src/main/java/kafka/server/share/SharePartitionManager.java +++ b/core/src/main/java/kafka/server/share/SharePartitionManager.java @@ -247,7 +247,7 @@ private SharePartitionManager( * @param memberId The member id, generated by the group-coordinator, this is used to identify the client. * @param fetchParams The fetch parameters from the share fetch request. * @param batchSize The number of records per acquired records batch. - * @param topicPartitions The topic partitions to fetch for. + * @param topicIdPartitions The topic partitions to fetch for. * * @return A future that will be completed with the fetched messages. */ @@ -257,17 +257,17 @@ public CompletableFuture> fetchMessages( FetchParams fetchParams, int sessionEpoch, int batchSize, - List topicPartitions + List topicIdPartitions ) { log.trace("Fetch request for topicIdPartitions: {} with groupId: {} fetch params: {}", - topicPartitions, groupId, fetchParams); + topicIdPartitions, groupId, fetchParams); - List topicIdPartitions = PartitionRotateStrategy + List rotatedTopicIdPartitions = PartitionRotateStrategy .type(PartitionRotateStrategy.StrategyType.ROUND_ROBIN) - .rotate(topicPartitions, new PartitionRotateMetadata(sessionEpoch)); + .rotate(topicIdPartitions, new PartitionRotateMetadata(sessionEpoch)); CompletableFuture> future = new CompletableFuture<>(); - processShareFetch(new ShareFetch(fetchParams, groupId, memberId, future, topicIdPartitions, batchSize, maxFetchRecords, brokerTopicStats)); + processShareFetch(new ShareFetch(fetchParams, groupId, memberId, future, rotatedTopicIdPartitions, batchSize, maxFetchRecords, brokerTopicStats)); return future; } From 4c0433f0ee7686521edfcf490320751ecb584e26 Mon Sep 17 00:00:00 2001 From: adixitconfluent Date: Mon, 10 Mar 2025 19:12:10 +0530 Subject: [PATCH 08/11] Addresses Andrew's comments --- .../internals/ShareSessionHandler.java | 2 +- .../common/requests/ShareFetchRequest.java | 12 +- .../common/message/ShareFetchRequest.json | 2 - .../share/SharePartitionManagerTest.java | 12 +- .../unit/kafka/server/KafkaApisTest.scala | 132 ++++++------------ .../ShareFetchAcknowledgeRequestTest.scala | 101 +++++++------- .../fetch/PartitionRotateStrategyTest.java | 14 +- .../share/fetch/ShareFetchTestUtils.java | 10 +- 8 files changed, 112 insertions(+), 173 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java index b7fefbcaadbaf..e88318217ad4c 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java @@ -179,7 +179,7 @@ public ShareFetchRequest.Builder newShareFetchBuilder(String groupId, FetchConfi return ShareFetchRequest.Builder.forConsumer( groupId, nextMetadata, fetchConfig.maxWaitMs, - fetchConfig.minBytes, fetchConfig.maxBytes, fetchConfig.fetchSize, fetchConfig.maxPollRecords, + fetchConfig.minBytes, fetchConfig.maxBytes, fetchConfig.maxPollRecords, added, removed, acknowledgementBatches); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java index 7db1bf86c8d71..ff395ea7eb397 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java @@ -47,19 +47,17 @@ public Builder(ShareFetchRequestData data, boolean enableUnstableLastVersion) { } public static Builder forConsumer(String groupId, ShareRequestMetadata metadata, - int maxWait, int minBytes, int maxBytes, int fetchSize, int batchSize, + int maxWait, int minBytes, int maxBytes, int batchSize, List send, List forget, Map> acknowledgementsMap) { ShareFetchRequestData data = new ShareFetchRequestData(); data.setGroupId(groupId); - int ackOnlyPartitionMaxBytes = fetchSize; boolean isClosingShareSession = false; if (metadata != null) { data.setMemberId(metadata.memberId().toString()); data.setShareSessionEpoch(metadata.epoch()); if (metadata.isFinalEpoch()) { isClosingShareSession = true; - ackOnlyPartitionMaxBytes = 0; } } data.setMaxWaitMs(maxWait); @@ -75,8 +73,7 @@ public static Builder forConsumer(String groupId, ShareRequestMetadata metadata, for (TopicIdPartition tip : send) { Map partMap = fetchMap.computeIfAbsent(tip.topicId(), k -> new HashMap<>()); ShareFetchRequestData.FetchPartition fetchPartition = new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(tip.partition()) - .setPartitionMaxBytes(fetchSize); + .setPartitionIndex(tip.partition()); partMap.put(tip.partition(), fetchPartition); } } @@ -89,8 +86,7 @@ public static Builder forConsumer(String groupId, ShareRequestMetadata metadata, ShareFetchRequestData.FetchPartition fetchPartition = partMap.get(tip.partition()); if (fetchPartition == null) { fetchPartition = new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(tip.partition()) - .setPartitionMaxBytes(ackOnlyPartitionMaxBytes); + .setPartitionIndex(tip.partition()); partMap.put(tip.partition(), fetchPartition); } fetchPartition.setAcknowledgementBatches(acknowledgeEntry.getValue()); @@ -200,7 +196,7 @@ public List shareFetchData(Map topicNames) { String name = topicNames.get(shareFetchTopic.topicId()); shareFetchTopic.partitions().forEach(shareFetchPartition -> { // Topic name may be null here if the topic name was unable to be resolved using the topicNames map. - shareFetchDataTmp.add(new TopicIdPartition(shareFetchTopic.topicId(), new TopicPartition(name, shareFetchPartition.partitionIndex()))); + shareFetchDataTmp.add(new TopicIdPartition(shareFetchTopic.topicId(), shareFetchPartition.partitionIndex(), name)); }); }); shareFetchData = shareFetchDataTmp; diff --git a/clients/src/main/resources/common/message/ShareFetchRequest.json b/clients/src/main/resources/common/message/ShareFetchRequest.json index b0b91b82228a3..e85fc0958610d 100644 --- a/clients/src/main/resources/common/message/ShareFetchRequest.json +++ b/clients/src/main/resources/common/message/ShareFetchRequest.json @@ -46,8 +46,6 @@ "about": "The partitions to fetch.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index." }, - { "name": "PartitionMaxBytes", "type": "int32", "versions": "0+", - "about": "TO BE REMOVED. The maximum bytes to fetch from this partition. 0 when only acknowledgement with no fetching is required. See KIP-74 for cases where this limit may not be honored." }, { "name": "AcknowledgementBatches", "type": "[]AcknowledgementBatch", "versions": "0+", "about": "Record batches to acknowledge.", "fields": [ { "name": "FirstOffset", "type": "int64", "versions": "0+", diff --git a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java index b77a147261fa0..b2894c0f2b460 100644 --- a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java +++ b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java @@ -115,7 +115,7 @@ import scala.jdk.javaapi.CollectionConverters; import static kafka.server.share.DelayedShareFetchTest.mockTopicIdPartitionToReturnDataEqualToMinBytes; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.validateRotatedMapEquals; +import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.validateRotatedListEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -2826,7 +2826,7 @@ public void testFetchMessagesRotatePartitions() { verify(sharePartitionManager, times(1)).processShareFetch(captor.capture()); // Verify the partitions rotation, no rotation. ShareFetch resultShareFetch = captor.getValue(); - validateRotatedMapEquals(resultShareFetch.topicIdPartitions(), topicIdPartitions, 0); + validateRotatedListEquals(resultShareFetch.topicIdPartitions(), topicIdPartitions, 0); // Single rotation. sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, 1, BATCH_SIZE, @@ -2834,7 +2834,7 @@ public void testFetchMessagesRotatePartitions() { verify(sharePartitionManager, times(2)).processShareFetch(captor.capture()); // Verify the partitions rotation, rotate by 1. resultShareFetch = captor.getValue(); - validateRotatedMapEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 1); + validateRotatedListEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 1); // Rotation by 3, less that the number of partitions. sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, 3, BATCH_SIZE, @@ -2842,7 +2842,7 @@ public void testFetchMessagesRotatePartitions() { verify(sharePartitionManager, times(3)).processShareFetch(captor.capture()); // Verify the partitions rotation, rotate by 3. resultShareFetch = captor.getValue(); - validateRotatedMapEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 3); + validateRotatedListEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 3); // Rotation by 12, more than the number of partitions. sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, 12, BATCH_SIZE, @@ -2850,14 +2850,14 @@ public void testFetchMessagesRotatePartitions() { verify(sharePartitionManager, times(4)).processShareFetch(captor.capture()); // Verify the partitions rotation, rotate by 5 (12 % 7). resultShareFetch = captor.getValue(); - validateRotatedMapEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 5); + validateRotatedListEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 5); // Rotation by Integer.MAX_VALUE, boundary test. sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, Integer.MAX_VALUE, BATCH_SIZE, topicIdPartitions); verify(sharePartitionManager, times(5)).processShareFetch(captor.capture()); // Verify the partitions rotation, rotate by 1 (2147483647 % 7). resultShareFetch = captor.getValue(); - validateRotatedMapEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 1); + validateRotatedListEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 1); } private Timer systemTimerReaper() { diff --git a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala index ddf937c8a619d..a4ffc53f40da5 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala @@ -148,7 +148,6 @@ class KafkaApisTest extends Logging { private val time = new MockTime private val clientId = "" private var kafkaApis: KafkaApis = _ - private val partitionMaxBytes = 40000 @AfterEach def tearDown(): Unit = { @@ -3919,7 +3918,7 @@ class KafkaApisTest extends Logging { when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( new ShareSessionContext(new ShareRequestMetadata(memberId, shareSessionEpoch), List( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex))).asJava) + new TopicIdPartition(topicId, partitionIndex, topicName)).asJava) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( @@ -3933,8 +3932,7 @@ class KafkaApisTest extends Logging { setTopicId(topicId). setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(partitionIndex) - .setPartitionMaxBytes(partitionMaxBytes)).asJava)).asJava) + .setPartitionIndex(partitionIndex)).asJava)).asJava) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) @@ -3989,7 +3987,7 @@ class KafkaApisTest extends Logging { val cachedSharePartitions = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), false)) + new TopicIdPartition(topicId, partitionIndex, topicName), false)) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenThrow( Errors.INVALID_REQUEST.exception() @@ -4009,8 +4007,7 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(partitionIndex) - .setPartitionMaxBytes(partitionMaxBytes) - setAcknowledgementBatches(List( + .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) @@ -4042,7 +4039,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes) ).asJava) ).asJava) @@ -4094,7 +4090,7 @@ class KafkaApisTest extends Logging { when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) + new TopicIdPartition(topicId, partitionIndex, topicName) ).asJava) ).thenThrow(Errors.INVALID_REQUEST.exception) @@ -4118,8 +4114,7 @@ class KafkaApisTest extends Logging { setTopicId(topicId). setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes)).asJava)).asJava) + .setPartitionIndex(0)).asJava)).asJava) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) @@ -4151,7 +4146,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes) // partitionMaxBytes are set even on the final fetch request, this is an invalid request .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(0) @@ -4185,7 +4179,7 @@ class KafkaApisTest extends Logging { when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) + new TopicIdPartition(topicId, partitionIndex, topicName) ).asJava) ) @@ -4200,8 +4194,7 @@ class KafkaApisTest extends Logging { setTopicId(topicId). setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes)).asJava)).asJava) + .setPartitionIndex(0)).asJava)).asJava) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) @@ -4251,7 +4244,7 @@ class KafkaApisTest extends Logging { val cachedSharePartitions = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), false)) + new TopicIdPartition(topicId, partitionIndex, topicName), false)) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())) .thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( @@ -4270,7 +4263,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes) .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(0) @@ -4315,7 +4307,7 @@ class KafkaApisTest extends Logging { val cachedSharePartitions = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), false)) + new TopicIdPartition(topicId, partitionIndex, topicName), false)) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())) .thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( @@ -4334,7 +4326,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes) .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(0) @@ -4381,7 +4372,7 @@ class KafkaApisTest extends Logging { when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) + new TopicIdPartition(topicId, partitionIndex, topicName) ).asJava) ) @@ -4396,8 +4387,7 @@ class KafkaApisTest extends Logging { setTopicId(topicId). setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes)).asJava)).asJava) + .setPartitionIndex(0)).asJava)).asJava) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) @@ -4435,7 +4425,7 @@ class KafkaApisTest extends Logging { when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), any())).thenReturn( CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + new TopicIdPartition(topicId, partitionIndex, topicName) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records) @@ -4450,7 +4440,7 @@ class KafkaApisTest extends Logging { when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) + new TopicIdPartition(topicId, partitionIndex, topicName) ).asJava) ).thenThrow(Errors.SHARE_SESSION_NOT_FOUND.exception) @@ -4465,8 +4455,7 @@ class KafkaApisTest extends Logging { setTopicId(topicId). setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes)).asJava)).asJava) + .setPartitionIndex(0)).asJava)).asJava) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) @@ -4501,8 +4490,7 @@ class KafkaApisTest extends Logging { setTopicId(topicId). setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes)).asJava)).asJava) + .setPartitionIndex(0)).asJava)).asJava) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -4542,7 +4530,7 @@ class KafkaApisTest extends Logging { when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) + new TopicIdPartition(topicId, partitionIndex, topicName) ).asJava) ).thenThrow(Errors.INVALID_SHARE_SESSION_EPOCH.exception) @@ -4557,8 +4545,7 @@ class KafkaApisTest extends Logging { setTopicId(topicId). setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes)).asJava)).asJava) + .setPartitionIndex(0)).asJava)).asJava) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) @@ -4590,8 +4577,7 @@ class KafkaApisTest extends Logging { setTopicId(topicId). setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes)).asJava)).asJava) + .setPartitionIndex(0)).asJava)).asJava) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -4678,12 +4664,12 @@ class KafkaApisTest extends Logging { val cachedSharePartitions = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), false) + new TopicIdPartition(topicId, partitionIndex, topicName), false) ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) + new TopicIdPartition(topicId, partitionIndex, topicName) ).asJava) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( new ShareSessionKey(groupId, memberId), cachedSharePartitions, 0L, 0L, 2)) @@ -4702,8 +4688,7 @@ class KafkaApisTest extends Logging { setTopicId(topicId). setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(partitionIndex) - .setPartitionMaxBytes(partitionMaxBytes)).asJava)).asJava) + .setPartitionIndex(partitionIndex)).asJava)).asJava) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) @@ -4742,7 +4727,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition(). setPartitionIndex(partitionIndex). - setPartitionMaxBytes(partitionMaxBytes). setAcknowledgementBatches(List( new ShareFetchRequestData.AcknowledgementBatch(). setFirstOffset(0). @@ -4780,7 +4764,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition(). setPartitionIndex(partitionIndex). - setPartitionMaxBytes(partitionMaxBytes). setAcknowledgementBatches(List( new ShareFetchRequestData.AcknowledgementBatch(). setFirstOffset(10). @@ -4927,27 +4910,27 @@ class KafkaApisTest extends Logging { val cachedSharePartitions1 = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions1.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)), false + new TopicIdPartition(topicId1, 0, topicName1), false )) cachedSharePartitions1.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)), false + new TopicIdPartition(topicId1, 1, topicName1), false )) cachedSharePartitions1.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)), false + new TopicIdPartition(topicId2, 0, topicName2), false )) cachedSharePartitions1.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)), false + new TopicIdPartition(topicId2, 1, topicName2), false )) cachedSharePartitions1.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)), false + new TopicIdPartition(topicId3, 0, topicName3), false )) val cachedSharePartitions2 = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions2.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)), false + new TopicIdPartition(topicId3, 0, topicName3), false )) cachedSharePartitions2.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId4, new TopicPartition(topicName4, 0)), false + new TopicIdPartition(topicId4, 0, topicName4), false )) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( @@ -5017,21 +5000,17 @@ class KafkaApisTest extends Logging { setTopicId(topicId1). setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes), + .setPartitionIndex(0), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setPartitionMaxBytes(partitionMaxBytes) ).asJava), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes), + .setPartitionIndex(0), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setPartitionMaxBytes(partitionMaxBytes) ).asJava) ).asJava) @@ -5120,7 +5099,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes) ).asJava), ).asJava) @@ -5155,7 +5133,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes) ).asJava), ).asJava) .setForgottenTopicsData(List( @@ -5215,7 +5192,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(0) .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(0) @@ -5224,7 +5200,6 @@ class KafkaApisTest extends Logging { ).asJava), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setPartitionMaxBytes(0) .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(10) @@ -5237,7 +5212,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(0) .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(43) @@ -5246,7 +5220,6 @@ class KafkaApisTest extends Logging { ).asJava), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setPartitionMaxBytes(0) .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(17) @@ -5259,7 +5232,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(0) .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(54) @@ -5272,7 +5244,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(0) .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(10) @@ -5377,17 +5348,14 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes) ).asJava), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes), + .setPartitionIndex(0), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setPartitionMaxBytes(partitionMaxBytes) ).asJava), ).asJava) @@ -5520,18 +5488,15 @@ class KafkaApisTest extends Logging { setTopicId(topicId1). setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes), + .setPartitionIndex(0), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setPartitionMaxBytes(partitionMaxBytes) ).asJava), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes), + .setPartitionIndex(0), ).asJava), ).asJava) @@ -5656,17 +5621,14 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes) ).asJava), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes), + .setPartitionIndex(0), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setPartitionMaxBytes(partitionMaxBytes) ).asJava), ).asJava) @@ -5808,24 +5770,20 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes) ).asJava), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes), + .setPartitionIndex(0), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setPartitionMaxBytes(partitionMaxBytes) ).asJava), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId3). setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(partitionMaxBytes) ).asJava), ).asJava) @@ -5968,12 +5926,12 @@ class KafkaApisTest extends Logging { val cachedSharePartitions = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId, new TopicPartition(topicName, 0)), false + new TopicIdPartition(topicId, 0, topicName), false )) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) + new TopicIdPartition(topicId, partitionIndex, topicName) ).asJava) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( new ShareSessionKey(groupId, memberId), cachedSharePartitions, 0L, 0L, 2)) @@ -5999,8 +5957,7 @@ class KafkaApisTest extends Logging { setTopicId(topicId). setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0) - .setPartitionMaxBytes(40000)).asJava)).asJava) + .setPartitionIndex(0)).asJava)).asJava) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) @@ -6032,7 +5989,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(40000) .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(0) @@ -6077,7 +6033,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(40000) .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(0) @@ -6121,7 +6076,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(40000) .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(0) @@ -6168,7 +6122,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(partitionIndex) - .setPartitionMaxBytes(40000) .setAcknowledgementBatches(List( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) @@ -6925,7 +6878,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(40000) .setAcknowledgementBatches(List( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) @@ -6938,7 +6890,6 @@ class KafkaApisTest extends Logging { ).asJava), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setPartitionMaxBytes(40000) .setAcknowledgementBatches(List( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) @@ -6951,7 +6902,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(40000) .setAcknowledgementBatches(List( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(24) @@ -6960,7 +6910,6 @@ class KafkaApisTest extends Logging { ).asJava), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setPartitionMaxBytes(40000) ).asJava) ).asJava) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) @@ -7002,7 +6951,6 @@ class KafkaApisTest extends Logging { setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(40000) .setAcknowledgementBatches(List( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) @@ -7011,7 +6959,6 @@ class KafkaApisTest extends Logging { ).asJava), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setPartitionMaxBytes(40000) .setAcknowledgementBatches(List( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) @@ -7024,7 +6971,6 @@ class KafkaApisTest extends Logging { .setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setPartitionMaxBytes(40000) .setAcknowledgementBatches(List( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(24) diff --git a/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala b/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala index 843a2b4c3ba64..910e0317b1360 100644 --- a/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala @@ -59,7 +59,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("topic1", 1)) ) - val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) assertEquals(Errors.UNSUPPORTED_VERSION.code(), shareFetchResponse.data().errorCode()) @@ -127,7 +127,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo val send: Seq[TopicIdPartition] = Seq(topicIdPartition) // Send the share fetch request to the non-replica and verify the error code - val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest, nonReplicaId) val partitionData = shareFetchResponse.responseData(topicNames).get(topicIdPartition) assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.code, partitionData.errorCode) @@ -181,7 +181,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above val metadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap) + val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMap) val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) val shareFetchResponseData = shareFetchResponse.data() @@ -251,7 +251,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above val metadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap) + val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMap) // For the multi partition fetch request, the response may not be available in the first attempt // as the share partitions might not be initialized yet. So, we retry until we get the response. @@ -352,9 +352,9 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the first share fetch request to initialize the share partitions // Create different share fetch requests for different partitions as they may have leaders on separate brokers - var shareFetchRequest1 = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send1, Seq.empty, acknowledgementsMap) - var shareFetchRequest2 = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send2, Seq.empty, acknowledgementsMap) - var shareFetchRequest3 = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send3, Seq.empty, acknowledgementsMap) + var shareFetchRequest1 = createShareFetchRequest(groupId, metadata, send1, Seq.empty, acknowledgementsMap) + var shareFetchRequest2 = createShareFetchRequest(groupId, metadata, send2, Seq.empty, acknowledgementsMap) + var shareFetchRequest3 = createShareFetchRequest(groupId, metadata, send3, Seq.empty, acknowledgementsMap) var shareFetchResponse1 = connectAndReceive[ShareFetchResponse](shareFetchRequest1, destination = leader1) var shareFetchResponse2 = connectAndReceive[ShareFetchResponse](shareFetchRequest2, destination = leader2) @@ -368,9 +368,9 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above // Create different share fetch requests for different partitions as they may have leaders on separate brokers - shareFetchRequest1 = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send1, Seq.empty, acknowledgementsMap) - shareFetchRequest2 = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send2, Seq.empty, acknowledgementsMap) - shareFetchRequest3 = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send3, Seq.empty, acknowledgementsMap) + shareFetchRequest1 = createShareFetchRequest(groupId, metadata, send1, Seq.empty, acknowledgementsMap) + shareFetchRequest2 = createShareFetchRequest(groupId, metadata, send2, Seq.empty, acknowledgementsMap) + shareFetchRequest3 = createShareFetchRequest(groupId, metadata, send3, Seq.empty, acknowledgementsMap) shareFetchResponse1 = connectAndReceive[ShareFetchResponse](shareFetchRequest1, destination = leader1) shareFetchResponse2 = connectAndReceive[ShareFetchResponse](shareFetchRequest2, destination = leader2) @@ -469,7 +469,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) val acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() @@ -517,7 +517,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a third share fetch request to check if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() @@ -585,7 +585,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo var shareSessionEpoch: Int = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) var acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() @@ -613,7 +613,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo .setFirstOffset(0) .setLastOffset(9) .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Accept the records - shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() @@ -637,7 +637,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a fourth share fetch request to confirm if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() @@ -705,7 +705,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) val acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() @@ -750,7 +750,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a third share fetch request to check if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() @@ -818,7 +818,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) var acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() @@ -862,7 +862,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo .setAcknowledgeTypes(Collections.singletonList(2.toByte))).asJava) // Release the records releaseAcknowledgementSent = true } - shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() @@ -935,7 +935,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) val acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() @@ -983,7 +983,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a third share fetch request to check if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() @@ -1051,7 +1051,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) var acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() @@ -1079,7 +1079,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo .setFirstOffset(0) .setLastOffset(9) .setAcknowledgeTypes(Collections.singletonList(3.toByte))).asJava) // Reject the records - shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() @@ -1103,7 +1103,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a fourth share fetch request to confirm if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() @@ -1173,7 +1173,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) val acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() @@ -1218,7 +1218,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a third share fetch request to check if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() @@ -1265,7 +1265,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a fourth share fetch request to check if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() @@ -1335,7 +1335,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above val metadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap) + val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMap) val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) val shareFetchResponseData = shareFetchResponse.data() @@ -1410,15 +1410,15 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // mocking the behaviour of multiple share consumers from the same share group val metadata1: ShareRequestMetadata = new ShareRequestMetadata(memberId1, ShareRequestMetadata.INITIAL_EPOCH) val acknowledgementsMap1: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest1 = createShareFetchRequest(groupId, metadata1, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap1, minBytes = 100, maxBytes = 1500) + val shareFetchRequest1 = createShareFetchRequest(groupId, metadata1, send, Seq.empty, acknowledgementsMap1, minBytes = 100, maxBytes = 1500) val metadata2: ShareRequestMetadata = new ShareRequestMetadata(memberId2, ShareRequestMetadata.INITIAL_EPOCH) val acknowledgementsMap2: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest2 = createShareFetchRequest(groupId, metadata2, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap2, minBytes = 100, maxBytes = 1500) + val shareFetchRequest2 = createShareFetchRequest(groupId, metadata2, send, Seq.empty, acknowledgementsMap2, minBytes = 100, maxBytes = 1500) val metadata3: ShareRequestMetadata = new ShareRequestMetadata(memberId3, ShareRequestMetadata.INITIAL_EPOCH) val acknowledgementsMap3: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest3 = createShareFetchRequest(groupId, metadata3, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap3, minBytes = 100, maxBytes = 1500) + val shareFetchRequest3 = createShareFetchRequest(groupId, metadata3, send, Seq.empty, acknowledgementsMap3, minBytes = 100, maxBytes = 1500) val shareFetchResponse1 = connectAndReceive[ShareFetchResponse](shareFetchRequest1) val shareFetchResponse2 = connectAndReceive[ShareFetchResponse](shareFetchRequest2) @@ -1510,15 +1510,15 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // mocking the behaviour of 3 different share groups val metadata1 = new ShareRequestMetadata(memberId1, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) val acknowledgementsMap1: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest1 = createShareFetchRequest(groupId1, metadata1, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap1) + val shareFetchRequest1 = createShareFetchRequest(groupId1, metadata1, send, Seq.empty, acknowledgementsMap1) val metadata2 = new ShareRequestMetadata(memberId2, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) val acknowledgementsMap2: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest2 = createShareFetchRequest(groupId2, metadata2, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap2) + val shareFetchRequest2 = createShareFetchRequest(groupId2, metadata2, send, Seq.empty, acknowledgementsMap2) val metadata3 = new ShareRequestMetadata(memberId3, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) val acknowledgementsMap3: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest3 = createShareFetchRequest(groupId3, metadata3, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap3) + val shareFetchRequest3 = createShareFetchRequest(groupId3, metadata3, send, Seq.empty, acknowledgementsMap3) val shareFetchResponse1 = connectAndReceive[ShareFetchResponse](shareFetchRequest1) val shareFetchResponse2 = connectAndReceive[ShareFetchResponse](shareFetchRequest2) @@ -1604,7 +1604,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) var acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() @@ -1632,7 +1632,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo .setFirstOffset(0) .setLastOffset(9) .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Accept the records - shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() @@ -1657,7 +1657,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo .setFirstOffset(10) .setLastOffset(19) .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Accept the records - shareFetchRequest = createShareFetchRequest(groupId, metadata, 0, send, Seq.empty, Map.empty) + shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() @@ -1714,7 +1714,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) var acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() @@ -1742,7 +1742,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo .setFirstOffset(0) .setLastOffset(9) .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Accept the records - shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() @@ -1833,7 +1833,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo .setFirstOffset(0) .setLastOffset(9) .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Acknowledgements in the Initial Fetch Request - val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap) + val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMap) val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) val shareFetchResponseData = shareFetchResponse.data() @@ -1940,7 +1940,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() @@ -1961,7 +1961,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a thord Share Fetch request with invalid share session epoch shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.nextEpoch(shareSessionEpoch)) metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() @@ -2016,7 +2016,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) val shareFetchResponseData = shareFetchResponse.data() @@ -2098,7 +2098,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() @@ -2119,7 +2119,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a third Share Fetch request with wrong member Id shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) metadata = new ShareRequestMetadata(wrongMemberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() @@ -2175,7 +2175,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) val shareFetchResponseData = shareFetchResponse.data() @@ -2260,7 +2260,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap) + var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMap) // For the multi partition fetch request, the response may not be available in the first attempt // as the share partitions might not be initialized yet. So, we retry until we get the response. @@ -2290,7 +2290,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) val forget: Seq[TopicIdPartition] = Seq(topicIdPartition1) - shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, Seq.empty, forget, acknowledgementsMap) + shareFetchRequest = createShareFetchRequest(groupId, metadata, Seq.empty, forget, acknowledgementsMap) val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) val shareFetchResponseData = shareFetchResponse.data() @@ -2315,7 +2315,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo val partitions: util.Set[Integer] = new util.HashSet() TestUtils.waitUntilTrue(() => { val metadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH) - val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, topicIdPartitions, Seq.empty, Map.empty) + val shareFetchRequest = createShareFetchRequest(groupId, metadata, topicIdPartitions, Seq.empty, Map.empty) val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) val shareFetchResponseData = shareFetchResponse.data() @@ -2358,7 +2358,6 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo private def createShareFetchRequest(groupId: String, metadata: ShareRequestMetadata, - maxPartitionBytes: Int, send: Seq[TopicIdPartition], forget: Seq[TopicIdPartition], acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]], @@ -2366,7 +2365,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo minBytes: Int = 0, maxBytes: Int = Int.MaxValue, batchSize: Int = 500): ShareFetchRequest = { - ShareFetchRequest.Builder.forConsumer(groupId, metadata, maxWaitMs, minBytes, maxBytes, maxPartitionBytes, batchSize, send.asJava, forget.asJava, acknowledgementsMap.asJava) + ShareFetchRequest.Builder.forConsumer(groupId, metadata, maxWaitMs, minBytes, maxBytes, batchSize, send.asJava, forget.asJava, acknowledgementsMap.asJava) .build() } diff --git a/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java b/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java index 7dc0a8f299ea7..d2929ce81162d 100644 --- a/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java +++ b/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java @@ -27,7 +27,7 @@ import java.util.ArrayList; import java.util.List; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.validateRotatedMapEquals; +import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.validateRotatedListEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -40,22 +40,22 @@ public void testRoundRobinStrategy() { List result = strategy.rotate(partitions, new PartitionRotateMetadata(1)); assertEquals(3, result.size()); - validateRotatedMapEquals(partitions, result, 1); + validateRotatedListEquals(partitions, result, 1); // Session epoch is greater than the number of partitions. result = strategy.rotate(partitions, new PartitionRotateMetadata(5)); assertEquals(3, result.size()); - validateRotatedMapEquals(partitions, result, 2); + validateRotatedListEquals(partitions, result, 2); // Session epoch is at Integer.MAX_VALUE. result = strategy.rotate(partitions, new PartitionRotateMetadata(Integer.MAX_VALUE)); assertEquals(3, result.size()); - validateRotatedMapEquals(partitions, result, 1); + validateRotatedListEquals(partitions, result, 1); // No rotation at same size as epoch. result = strategy.rotate(partitions, new PartitionRotateMetadata(3)); assertEquals(3, result.size()); - validateRotatedMapEquals(partitions, result, 0); + validateRotatedListEquals(partitions, result, 0); } @Test @@ -67,13 +67,13 @@ public void testRoundRobinStrategyWithSpecialSessionEpochs() { partitions, new PartitionRotateMetadata(ShareRequestMetadata.INITIAL_EPOCH)); assertEquals(3, result.size()); - validateRotatedMapEquals(partitions, result, 0); + validateRotatedListEquals(partitions, result, 0); result = strategy.rotate( partitions, new PartitionRotateMetadata(ShareRequestMetadata.FINAL_EPOCH)); assertEquals(3, result.size()); - validateRotatedMapEquals(partitions, result, 0); + validateRotatedListEquals(partitions, result, 0); } @Test diff --git a/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTestUtils.java b/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTestUtils.java index e9fc9d4bd6a05..db3aa45d6876a 100644 --- a/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTestUtils.java +++ b/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTestUtils.java @@ -43,13 +43,13 @@ public class ShareFetchTestUtils { /** - * Validate that the rotated map is equal to the original map with the keys rotated by the given position. + * Validate that the rotated list is equal to the original list rotated by the given position. * - * @param original The original map. - * @param result The rotated map. - * @param rotationAt The position to rotate the keys at. + * @param original The original list. + * @param result The rotated list. + * @param rotationAt The position to rotate the elements at. */ - public static void validateRotatedMapEquals( + public static void validateRotatedListEquals( List original, List result, int rotationAt From 24ac28cd6623542bdb5731b753e603f959138e84 Mon Sep 17 00:00:00 2001 From: adixitconfluent Date: Wed, 12 Mar 2025 11:32:19 +0530 Subject: [PATCH 09/11] Addressed Apoorv's review comments round 2 - part 1 --- .../server/share/DelayedShareFetchTest.java | 2 +- .../share/SharePartitionManagerTest.java | 44 +++++------ .../kafka/server/ReplicaManagerTest.scala | 3 +- .../ShareFetchAcknowledgeRequestTest.scala | 76 +------------------ .../share/ErroneousAndValidPartitionData.java | 4 +- .../share/fetch/PartitionRotateStrategy.java | 3 + .../fetch/PartitionRotateStrategyTest.java | 8 +- 7 files changed, 34 insertions(+), 106 deletions(-) diff --git a/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java b/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java index fc7fc872afccc..187b602183195 100644 --- a/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java +++ b/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java @@ -1144,7 +1144,7 @@ public void testPartitionMaxBytesFromUniformStrategyInCombineLogReadResponse() { public void testOnCompleteExecutionOnTimeout() { ShareFetch shareFetch = new ShareFetch( FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), - new CompletableFuture<>(), new ArrayList<>(), BATCH_SIZE, MAX_FETCH_RECORDS, + new CompletableFuture<>(), List.of(), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); DelayedShareFetch delayedShareFetch = DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) diff --git a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java index b2894c0f2b460..c4a531ae3e99a 100644 --- a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java +++ b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java @@ -206,7 +206,7 @@ public void testNewContextReturnsFinalContextWithoutRequestData() { assertFalse(((ShareSessionContext) context1).isSubsequent()); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId, ShareRequestMetadata.FINAL_EPOCH); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptyList(), Collections.emptyList(), reqMetadata2, true); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, List.of(), List.of(), reqMetadata2, true); assertEquals(FinalContext.class, context2.getClass()); } @@ -238,7 +238,7 @@ public void testNewContextReturnsFinalContextWithRequestData() { // shareFetch is not empty, but the maxBytes of topic partition is 0, which means this is added only for acknowledgements. // New context should be created successfully List reqData3 = List.of(new TopicIdPartition(tpId1, new TopicPartition("foo", 0))); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData3, Collections.emptyList(), reqMetadata2, true); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData3, List.of(), reqMetadata2, true); assertEquals(FinalContext.class, context2.getClass()); } @@ -270,7 +270,7 @@ public void testNewContextReturnsFinalContextWhenTopicPartitionsArePresentInRequ // shareFetch is not empty, and it contains tpId1, which should return FinalContext instance since it is FINAL_EPOCH List reqData3 = List.of(new TopicIdPartition(tpId1, new TopicPartition("foo", 0))); assertInstanceOf(FinalContext.class, - sharePartitionManager.newContext(groupId, reqData3, Collections.emptyList(), reqMetadata2, true)); + sharePartitionManager.newContext(groupId, reqData3, List.of(), reqMetadata2, true)); } @Test @@ -323,7 +323,7 @@ public void testNewContext() { new ShareRequestMetadata(memberId4, 1), true)); // Continue the first share session we created. - ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, + ShareFetchContext context5 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context5); assertTrue(((ShareSessionContext) context5).isSubsequent()); @@ -345,14 +345,14 @@ public void testNewContext() { new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test generating a throttled response for a subsequent share session - ShareFetchContext context7 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, + ShareFetchContext context7 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); ShareFetchResponse resp7 = context7.throttleResponse(100); assertEquals(Errors.NONE, resp7.error()); assertEquals(100, resp7.throttleTimeMs()); // Get the final share session. - ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, + ShareFetchContext context8 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); assertEquals(FinalContext.class, context8.getClass()); assertEquals(1, cache.size()); @@ -434,7 +434,7 @@ public void testShareSessionExpiration() { time.sleep(500); // Create a subsequent share fetch context for session 1 - ShareFetchContext session1context2 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, + ShareFetchContext session1context2 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, session1context2); @@ -563,7 +563,7 @@ public void testZeroSizeShareSession() { List removed2 = new ArrayList<>(); removed2.add(foo0); removed2.add(foo1); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptyList(), removed2, + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, List.of(), removed2, new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context2); @@ -596,7 +596,7 @@ public void testToForgetPartitions() { mockUpdateAndGenerateResponseData(context1, groupId, reqMetadata1.memberId()); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptyList(), Collections.singletonList(foo), + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, List.of(), Collections.singletonList(foo), new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); // So foo is removed but not the others. @@ -604,9 +604,9 @@ public void testToForgetPartitions() { mockUpdateAndGenerateResponseData(context2, groupId, reqMetadata1.memberId()); - ShareFetchContext context3 = sharePartitionManager.newContext(groupId, Collections.emptyList(), Collections.singletonList(bar), + ShareFetchContext context3 = sharePartitionManager.newContext(groupId, List.of(), Collections.singletonList(bar), new ShareRequestMetadata(reqMetadata1.memberId(), 2), true); - assertPartitionsPresent((ShareSessionContext) context3, Collections.emptyList()); + assertPartitionsPresent((ShareSessionContext) context3, List.of()); } // This test simulates a share session where the topic ID changes broker side (the one handling the request) in both the metadata cache and the log @@ -647,7 +647,7 @@ public void testShareSessionUpdateTopicIdsBrokerSide() { assertEquals(2, resp1.responseData(topicNames).size()); // Create a subsequent share fetch request as though no topics changed. - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context2); @@ -711,7 +711,7 @@ public void testGetErroneousAndValidTopicIdPartitions() { new ShareRequestMetadata(Uuid.randomUuid(), 1), true)); // Continue the first share session we created. - ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, + ShareFetchContext context5 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); assertInstanceOf(ShareSessionContext.class, context5); assertTrue(((ShareSessionContext) context5).isSubsequent()); @@ -737,12 +737,12 @@ public void testGetErroneousAndValidTopicIdPartitions() { assertErroneousAndValidTopicIdPartitions(context7.getErroneousAndValidTopicIdPartitions(), Arrays.asList(tpNull1, tpNull2), Arrays.asList(tp0, tp1)); // Get the final share session. - ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, + ShareFetchContext context8 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); assertEquals(FinalContext.class, context8.getClass()); assertEquals(1, cache.size()); - assertErroneousAndValidTopicIdPartitions(context8.getErroneousAndValidTopicIdPartitions(), Collections.emptyList(), Collections.emptyList()); + assertErroneousAndValidTopicIdPartitions(context8.getErroneousAndValidTopicIdPartitions(), List.of(), List.of()); // Check for throttled response ShareFetchResponse resp8 = context8.throttleResponse(100); assertEquals(Errors.NONE, resp8.error()); @@ -831,7 +831,7 @@ public void testShareFetchContextResponseSize() { new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test generating a throttled response for a subsequent share session - ShareFetchContext context7 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, + ShareFetchContext context7 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); int respSize7 = context7.responseSize(respData2, version); @@ -842,7 +842,7 @@ public void testShareFetchContextResponseSize() { assertEquals(4 + new ShareFetchResponseData().size(objectSerializationCache, version), respSize7); // Get the final share session. - ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, + ShareFetchContext context8 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); assertEquals(FinalContext.class, context8.getClass()); assertEquals(1, cache.size()); @@ -955,7 +955,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { assertEquals(Collections.singletonList(tp3), sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); // Get the final share session. - ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptyList(), EMPTY_PART_LIST, + ShareFetchContext context5 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata1.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); assertEquals(FinalContext.class, context5.getClass()); @@ -970,7 +970,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { assertTrue(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1).isEmpty()); // Continue the second share session . - ShareFetchContext context6 = sharePartitionManager.newContext(groupId, Collections.emptyList(), Collections.singletonList(tp3), + ShareFetchContext context6 = sharePartitionManager.newContext(groupId, List.of(), Collections.singletonList(tp3), new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); assertInstanceOf(ShareSessionContext.class, context6); assertTrue(((ShareSessionContext) context6).isSubsequent()); @@ -979,7 +979,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { ShareFetchResponse resp6 = context6.updateAndGenerateResponseData(groupId, reqMetadata2.memberId(), respData6); assertEquals(Errors.NONE, resp6.error()); - assertEquals(Collections.emptyList(), sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); + assertEquals(List.of(), sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); } @Test @@ -1995,7 +1995,7 @@ public void testReleaseSessionCompletesDelayedShareFetchRequest() { when(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, Uuid.fromString(memberId))).thenReturn(Arrays.asList(tp1, tp3)); doAnswer(invocation -> buildLogReadResult(List.of(tp1))).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any())).thenReturn(new ShareAcquiredRecords(Collections.emptyList(), 0)); + when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any())).thenReturn(new ShareAcquiredRecords(List.of(), 0)); // Release acquired records on session close request for tp1 and tp3. sharePartitionManager.releaseSession(groupId, memberId); @@ -2565,7 +2565,7 @@ public void testSharePartitionPartialInitializationFailure() throws Exception { when(sp1.maybeAcquireFetchLock()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(true); when(sp1.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); - when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any())).thenReturn(new ShareAcquiredRecords(Collections.emptyList(), 0)); + when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any())).thenReturn(new ShareAcquiredRecords(List.of(), 0)); // Fail initialization for tp2. SharePartition sp2 = mock(SharePartition.class); diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala index bd74798c019f9..cba5f9ea5daf5 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala @@ -6083,8 +6083,7 @@ class ReplicaManagerTest { try { val groupId = "grp" val tp1 = new TopicIdPartition(Uuid.randomUuid, new TopicPartition("foo1", 0)) - val topicPartitions = new util.ArrayList[TopicIdPartition] - topicPartitions.add(tp1) + val topicPartitions = util.List.of(tp1) val sp1 = mock(classOf[SharePartition]) val sharePartitions = new util.LinkedHashMap[TopicIdPartition, SharePartition] diff --git a/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala b/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala index 910e0317b1360..1e5775e7b6f84 100644 --- a/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala @@ -37,8 +37,7 @@ import scala.jdk.CollectionConverters._ )) @Tag("integration") class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster){ - - private final val MAX_PARTITION_BYTES = 10000 + private final val MAX_WAIT_MS = 5000 @AfterEach @@ -1284,79 +1283,6 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) } - @ClusterTests( - Array( - new ClusterTest( - serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), - new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") - ) - ), - new ClusterTest( - serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), - new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") - ) - ), - ) - ) - def testShareFetchBrokerDoesNotRespectPartitionsSizeLimit(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() - - val topic = "topic" - val partition = 0 - - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) - - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) - - // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(memberId, groupId, send) - - initProducer() - // Producing 3 large messages to the topic created above - produceData(topicIdPartition, 10) - produceData(topicIdPartition, "large message 1", new String(new Array[Byte](MAX_PARTITION_BYTES/3))) - produceData(topicIdPartition, "large message 2", new String(new Array[Byte](MAX_PARTITION_BYTES/3))) - produceData(topicIdPartition, "large message 3", new String(new Array[Byte](MAX_PARTITION_BYTES/3))) - - // Send the second share fetch request to fetch the records produced above - val metadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) - val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMap) - val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) - - val shareFetchResponseData = shareFetchResponse.data() - assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) - - val expectedPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) - .setErrorCode(Errors.NONE.code()) - .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(12), Collections.singletonList(1))) - // The first 10 records will be consumed as it is. For the last 3 records, each of size MAX_PARTITION_BYTES/3, - // all 3 of then will be consumed (offsets 10, 11 and 12) because even though the inclusion of the third last record will exceed - // the max partition bytes limit. We should only consider the request level maxBytes as the hard limit. - - val partitionData = shareFetchResponseData.responses().get(0).partitions().get(0) - compareFetchResponsePartitions(expectedPartitionData, partitionData) - } - @ClusterTests( Array( new ClusterTest( diff --git a/server/src/main/java/org/apache/kafka/server/share/ErroneousAndValidPartitionData.java b/server/src/main/java/org/apache/kafka/server/share/ErroneousAndValidPartitionData.java index c7810eabfcec9..d2a1e68a11c6f 100644 --- a/server/src/main/java/org/apache/kafka/server/share/ErroneousAndValidPartitionData.java +++ b/server/src/main/java/org/apache/kafka/server/share/ErroneousAndValidPartitionData.java @@ -53,8 +53,8 @@ public ErroneousAndValidPartitionData(List shareFetchData) { } public ErroneousAndValidPartitionData() { - this.erroneous = new HashMap<>(); - this.validTopicIdPartitions = new ArrayList<>(); + this.erroneous = Map.of(); + this.validTopicIdPartitions = List.of(); } public Map erroneous() { diff --git a/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java b/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java index 820ee0fbd4d7c..c7d7118263b5f 100644 --- a/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java +++ b/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java @@ -81,7 +81,10 @@ static List rotateRoundRobin( return topicIdPartitions; } + // We don't want to modify the original list, hence created a copy. List rotatedPartitions = new ArrayList<>(topicIdPartitions); + // We want the elements from the end of the list to move left by the distance provided i.e. if the original list is [1,2,3], + // and we want to rotate it by 1, we want the output as [2,3,1] and not [3,1,2]. Hence, we need negation of distance here. Collections.rotate(rotatedPartitions, -1 * rotateAt); return rotatedPartitions; } diff --git a/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java b/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java index d2929ce81162d..7055a21b74836 100644 --- a/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java +++ b/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java @@ -36,7 +36,7 @@ public class PartitionRotateStrategyTest { @Test public void testRoundRobinStrategy() { PartitionRotateStrategy strategy = PartitionRotateStrategy.type(StrategyType.ROUND_ROBIN); - ArrayList partitions = createPartitions(3); + List partitions = createPartitions(3); List result = strategy.rotate(partitions, new PartitionRotateMetadata(1)); assertEquals(3, result.size()); @@ -62,7 +62,7 @@ public void testRoundRobinStrategy() { public void testRoundRobinStrategyWithSpecialSessionEpochs() { PartitionRotateStrategy strategy = PartitionRotateStrategy.type(StrategyType.ROUND_ROBIN); - ArrayList partitions = createPartitions(3); + List partitions = createPartitions(3); List result = strategy.rotate( partitions, new PartitionRotateMetadata(ShareRequestMetadata.INITIAL_EPOCH)); @@ -90,8 +90,8 @@ public void testRoundRobinStrategyWithEmptyPartitions() { * @param size The number of topic-partitions to create. * @return The ordered set of topic partitions. */ - private ArrayList createPartitions(int size) { - ArrayList partitions = new ArrayList<>(); + private List createPartitions(int size) { + List partitions = new ArrayList<>(); for (int i = 0; i < size; i++) { partitions.add(new TopicIdPartition(Uuid.randomUuid(), i, "foo" + i)); } From aed4e9f5df08d02ff092eaa970bef1a27e1be741 Mon Sep 17 00:00:00 2001 From: adixitconfluent Date: Wed, 12 Mar 2025 12:20:14 +0530 Subject: [PATCH 10/11] Addressed Apoorv's review comments round 2 - part 2 --- .../unit/kafka/server/KafkaApisTest.scala | 443 +++++++++--------- 1 file changed, 216 insertions(+), 227 deletions(-) diff --git a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala index a4ffc53f40da5..c62926c82c7bc 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala @@ -3917,8 +3917,8 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, shareSessionEpoch), List( - new TopicIdPartition(topicId, partitionIndex, topicName)).asJava) + new ShareSessionContext(new ShareRequestMetadata(memberId, shareSessionEpoch), util.List.of( + new TopicIdPartition(topicId, partitionIndex, topicName))) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( @@ -3928,11 +3928,11 @@ class KafkaApisTest extends Logging { setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(shareSessionEpoch). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(partitionIndex)).asJava)).asJava) + .setPartitionIndex(partitionIndex))))) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) @@ -4002,19 +4002,19 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + )) + )) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) @@ -4034,13 +4034,13 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - ).asJava) - ).asJava) + )) + )) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -4089,9 +4089,9 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( new TopicIdPartition(topicId, partitionIndex, topicName) - ).asJava) + )) ).thenThrow(Errors.INVALID_REQUEST.exception) when(sharePartitionManager.releaseSession(any(), any())).thenReturn( @@ -4110,11 +4110,11 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0)).asJava)).asJava) + .setPartitionIndex(0))))) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) @@ -4178,9 +4178,9 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( new TopicIdPartition(topicId, partitionIndex, topicName) - ).asJava) + )) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( @@ -4190,11 +4190,11 @@ class KafkaApisTest extends Logging { setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0)).asJava)).asJava) + .setPartitionIndex(0))))) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) @@ -4258,19 +4258,19 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + )) + )) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) @@ -4321,19 +4321,19 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + )) + )) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) @@ -4371,9 +4371,9 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( new TopicIdPartition(topicId, partitionIndex, topicName) - ).asJava) + )) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( @@ -4383,11 +4383,11 @@ class KafkaApisTest extends Logging { setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0)).asJava)).asJava) + .setPartitionIndex(0))))) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) @@ -4439,9 +4439,9 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( new TopicIdPartition(topicId, partitionIndex, topicName) - ).asJava) + )) ).thenThrow(Errors.SHARE_SESSION_NOT_FOUND.exception) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( @@ -4451,11 +4451,11 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0)).asJava)).asJava) + .setPartitionIndex(0))))) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) @@ -4486,11 +4486,11 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId2.toString). setShareSessionEpoch(1). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0)).asJava)).asJava) + .setPartitionIndex(0))))) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -4529,9 +4529,9 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( new TopicIdPartition(topicId, partitionIndex, topicName) - ).asJava) + )) ).thenThrow(Errors.INVALID_SHARE_SESSION_EPOCH.exception) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( @@ -4541,11 +4541,11 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0)).asJava)).asJava) + .setPartitionIndex(0))))) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) @@ -4573,11 +4573,11 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(2). // Invalid share session epoch, should have 1 for the second request. - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0)).asJava)).asJava) + .setPartitionIndex(0))))) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -4668,9 +4668,9 @@ class KafkaApisTest extends Logging { ) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( new TopicIdPartition(topicId, partitionIndex, topicName) - ).asJava) + )) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( new ShareSessionKey(groupId, memberId), cachedSharePartitions, 0L, 0L, 2)) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 2), new ShareSession( @@ -4684,11 +4684,11 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(partitionIndex)).asJava)).asJava) + .setPartitionIndex(partitionIndex))))) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) @@ -4722,16 +4722,16 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition(). setPartitionIndex(partitionIndex). - setAcknowledgementBatches(List( + setAcknowledgementBatches(util.List.of( new ShareFetchRequestData.AcknowledgementBatch(). setFirstOffset(0). setLastOffset(9). - setAcknowledgeTypes(List[java.lang.Byte](1.toByte).asJava)).asJava)).asJava)).asJava) + setAcknowledgeTypes(List[java.lang.Byte](1.toByte).asJava))))))) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -4759,16 +4759,16 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(2). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition(). setPartitionIndex(partitionIndex). - setAcknowledgementBatches(List( + setAcknowledgementBatches(util.List.of( new ShareFetchRequestData.AcknowledgementBatch(). setFirstOffset(10). setLastOffset(19). - setAcknowledgeTypes(List[java.lang.Byte](1.toByte).asJava)).asJava)).asJava)).asJava) + setAcknowledgeTypes(List[java.lang.Byte](1.toByte).asJava))))))) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -4934,12 +4934,12 @@ class KafkaApisTest extends Logging { )) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)), new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)), new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)), new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) - ).asJava) + )) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( new ShareSessionKey(groupId, memberId), cachedSharePartitions1, 0L, 0L, 2)) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 2), new ShareSession( @@ -4995,24 +4995,24 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(List( + setTopics(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - ).asJava), + )), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - ).asJava) - ).asJava) + )) + )) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) @@ -5093,14 +5093,14 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(List( + setTopics(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId3). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - ).asJava), - ).asJava) + )), + )) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -5127,22 +5127,22 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(2). - setTopics(List( + setTopics(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId4). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - ).asJava), - ).asJava) - .setForgottenTopicsData(List( + )), + )) + .setForgottenTopicsData(util.List.of( new ForgottenTopic() .setTopicId(topicId1) - .setPartitions(List(Integer.valueOf(0), Integer.valueOf(1)).asJava), + .setPartitions(util.List.of(Integer.valueOf(0), Integer.valueOf(1))), new ForgottenTopic() .setTopicId(topicId2) - .setPartitions(List(Integer.valueOf(0), Integer.valueOf(1)).asJava) - ).asJava) + .setPartitions(util.List.of(Integer.valueOf(0), Integer.valueOf(1))) + )) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -5186,72 +5186,72 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(-1). - setTopics(List( + setTopics(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)), - ).asJava), + .setAcknowledgeTypes(util.List.of(1.toByte)), + )), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new AcknowledgementBatch() .setFirstOffset(10) .setLastOffset(19) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)), - ).asJava) - ).asJava), + .setAcknowledgeTypes(util.List.of(1.toByte)), + )) + )), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new AcknowledgementBatch() .setFirstOffset(43) .setLastOffset(52) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)), - ).asJava), + .setAcknowledgeTypes(util.List.of(1.toByte)), + )), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new AcknowledgementBatch() .setFirstOffset(17) .setLastOffset(26) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)), - ).asJava) - ).asJava), + .setAcknowledgeTypes(util.List.of(1.toByte)), + )) + )), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId3). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new AcknowledgementBatch() .setFirstOffset(54) .setLastOffset(93) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)), - ).asJava), - ).asJava), + .setAcknowledgeTypes(util.List.of(1.toByte)), + )), + )), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId4). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new AcknowledgementBatch() .setFirstOffset(10) .setLastOffset(24) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)), - ).asJava), - ).asJava), - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)), + )), + )), + )) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -5326,10 +5326,7 @@ class KafkaApisTest extends Logging { val erroneousPartitions: util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = new util.HashMap() - val validPartitions: util.List[TopicIdPartition] = new util.ArrayList() - validPartitions.add(tp1) - validPartitions.add(tp2) - validPartitions.add(tp3) + val validPartitions: util.List[TopicIdPartition] = util.List.of(tp1, tp2, tp3) val erroneousAndValidPartitionData: ErroneousAndValidPartitionData = new ErroneousAndValidPartitionData(erroneousPartitions, validPartitions) @@ -5342,22 +5339,22 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(shareSessionEpoch). - setTopics(List( + setTopics(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - ).asJava), + )), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - ).asJava), - ).asJava) + )), + )) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) @@ -5469,8 +5466,7 @@ class KafkaApisTest extends Logging { .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code) ) - val validPartitions: util.List[TopicIdPartition] = new util.ArrayList() - validPartitions.add(tp1) + val validPartitions: util.List[TopicIdPartition] = util.List.of(tp1) val erroneousAndValidPartitionData: ErroneousAndValidPartitionData = new ErroneousAndValidPartitionData(erroneousPartitions, validPartitions) @@ -5483,22 +5479,22 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(shareSessionEpoch). - setTopics(List( + setTopics(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - ).asJava), + )), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0), - ).asJava), - ).asJava) + )), + )) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) @@ -5599,10 +5595,7 @@ class KafkaApisTest extends Logging { val erroneousPartitions: util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = new util.HashMap() - val validPartitions: util.List[TopicIdPartition] = new util.ArrayList() - validPartitions.add(tp1) - validPartitions.add(tp2) - validPartitions.add(tp3) + val validPartitions: util.List[TopicIdPartition] = util.List.of(tp1, tp2, tp3) val erroneousAndValidPartitionData: ErroneousAndValidPartitionData = new ErroneousAndValidPartitionData(erroneousPartitions, validPartitions) @@ -5615,22 +5608,22 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(shareSessionEpoch). - setTopics(List( + setTopics(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - ).asJava), + )), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - ).asJava), - ).asJava) + )), + )) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) @@ -5746,11 +5739,7 @@ class KafkaApisTest extends Logging { val erroneousPartitions: util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = new util.HashMap() - val validPartitions: util.List[TopicIdPartition] = new util.ArrayList() - validPartitions.add(tp1) - validPartitions.add(tp2) - validPartitions.add(tp3) - validPartitions.add(tp4) + val validPartitions: util.List[TopicIdPartition] = util.List.of(tp1, tp2, tp3, tp4) val erroneousAndValidPartitionData: ErroneousAndValidPartitionData = new ErroneousAndValidPartitionData(erroneousPartitions, validPartitions) @@ -5764,28 +5753,28 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(shareSessionEpoch). - setTopics(List( + setTopics(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - ).asJava), + )), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - ).asJava), + )), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId3). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - ).asJava), - ).asJava) + )), + )) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) @@ -5930,9 +5919,9 @@ class KafkaApisTest extends Logging { )) when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), List( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( new TopicIdPartition(topicId, partitionIndex, topicName) - ).asJava) + )) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( new ShareSessionKey(groupId, memberId), cachedSharePartitions, 0L, 0L, 2)) ) @@ -5953,11 +5942,11 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0)).asJava)).asJava) + .setPartitionIndex(0))))) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) @@ -5984,19 +5973,19 @@ class KafkaApisTest extends Logging { setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + )) + )) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -6028,19 +6017,19 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + )) + )) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) @@ -6071,19 +6060,19 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + )) + )) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) @@ -6117,19 +6106,19 @@ class KafkaApisTest extends Logging { setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + )) + )) val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any(), any())).thenReturn(List[AuthorizationResult]( @@ -6872,46 +6861,46 @@ class KafkaApisTest extends Logging { setGroupId("group"). setMemberId(Uuid.randomUuid().toString). setShareSessionEpoch(0). - setTopics(List( + setTopics(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)), + .setAcknowledgeTypes(util.List.of(1.toByte)), new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(10) .setLastOffset(17) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava), + .setAcknowledgeTypes(util.List.of(1.toByte)) + )), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(2.toByte)) - ).asJava) - ).asJava), + .setAcknowledgeTypes(util.List.of(2.toByte)) + )) + )), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(24) .setLastOffset(65) - .setAcknowledgeTypes(Collections.singletonList(3.toByte)) - ).asJava), + .setAcknowledgeTypes(util.List.of(3.toByte)) + )), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - ).asJava) - ).asJava) + )) + )) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val topicNames = new util.HashMap[Uuid, String] topicNames.put(topicId1, "foo1") @@ -6945,40 +6934,40 @@ class KafkaApisTest extends Logging { setGroupId("group"). setMemberId(Uuid.randomUuid().toString). setShareSessionEpoch(0). - setTopics(List( + setTopics(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(List( + setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(7.toByte)) // wrong acknowledgement type here (can only be 0, 1, 2 or 3) - ).asJava), + .setAcknowledgeTypes(util.List.of(7.toByte)) // wrong acknowledgement type here (can only be 0, 1, 2 or 3) + )), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.emptyList()) // wrong acknowledgement type here (can only be 0, 1, 2 or 3) - ).asJava) - ).asJava), + .setAcknowledgeTypes(util.List.of()) // wrong acknowledgement type here (can only be 0, 1, 2 or 3) + )) + )), new ShareFetchRequestData.FetchTopic() .setTopicId(topicId2) - .setPartitions(List( + .setPartitions(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(24) .setLastOffset(65) - .setAcknowledgeTypes(Collections.singletonList(3.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(3.toByte)) + )) + )) + )) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val topicIdNames = new util.HashMap[Uuid, String] topicIdNames.put(topicId1, "foo1") // topicId2 is not present in topicIdNames From 465c2d22f289e22c62450d907eded062a040b7c2 Mon Sep 17 00:00:00 2001 From: adixitconfluent Date: Wed, 12 Mar 2025 15:31:39 +0530 Subject: [PATCH 11/11] Addressed Apoorv's review comments round 3 --- .../kafka/server/share/fetch/PartitionRotateStrategy.java | 6 +++--- .../server/share/fetch/PartitionRotateStrategyTest.java | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java b/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java index c7d7118263b5f..600f18165b033 100644 --- a/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java +++ b/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategy.java @@ -81,10 +81,10 @@ static List rotateRoundRobin( return topicIdPartitions; } - // We don't want to modify the original list, hence created a copy. + // Avoid modifying the original list, create copy. List rotatedPartitions = new ArrayList<>(topicIdPartitions); - // We want the elements from the end of the list to move left by the distance provided i.e. if the original list is [1,2,3], - // and we want to rotate it by 1, we want the output as [2,3,1] and not [3,1,2]. Hence, we need negation of distance here. + // Elements from the list should move left by the distance provided i.e. if the original list is [1,2,3], + // and rotation is by 1, then output should be [2,3,1] and not [3,1,2]. Hence, negate the distance here. Collections.rotate(rotatedPartitions, -1 * rotateAt); return rotatedPartitions; } diff --git a/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java b/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java index 7055a21b74836..da53b0af6a879 100644 --- a/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java +++ b/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionRotateStrategyTest.java @@ -86,9 +86,9 @@ public void testRoundRobinStrategyWithEmptyPartitions() { } /** - * Create an ordered set of topic partitions. + * Create a list of topic partitions. * @param size The number of topic-partitions to create. - * @return The ordered set of topic partitions. + * @return The list of topic partitions. */ private List createPartitions(int size) { List partitions = new ArrayList<>();