Skip to content

Commit 78f38d4

Browse files
removed partition_max_bytes from delayed share fetch
1 parent 615c8c0 commit 78f38d4

File tree

2 files changed

+27
-8
lines changed

2 files changed

+27
-8
lines changed

clients/src/main/java/org/apache/kafka/common/requests/FetchRequest.java

+14-1
Original file line numberDiff line numberDiff line change
@@ -59,9 +59,18 @@ public static final class PartitionData {
5959
public final Uuid topicId;
6060
public final long fetchOffset;
6161
public final long logStartOffset;
62-
public final int maxBytes;
6362
public final Optional<Integer> currentLeaderEpoch;
6463
public final Optional<Integer> lastFetchedEpoch;
64+
public int maxBytes;
65+
66+
public PartitionData(
67+
Uuid topicId,
68+
long fetchOffset,
69+
long logStartOffset,
70+
Optional<Integer> currentLeaderEpoch
71+
) {
72+
this(topicId, fetchOffset, logStartOffset, 0, currentLeaderEpoch, Optional.empty());
73+
}
6574

6675
public PartitionData(
6776
Uuid topicId,
@@ -89,6 +98,10 @@ public PartitionData(
8998
this.lastFetchedEpoch = lastFetchedEpoch;
9099
}
91100

101+
public void updateMaxBytes(int maxBytes) {
102+
this.maxBytes = maxBytes;
103+
}
104+
92105
@Override
93106
public boolean equals(Object o) {
94107
if (this == o) return true;

core/src/main/java/kafka/server/share/DelayedShareFetch.java

+13-7
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@
4141
import java.util.Map;
4242
import java.util.Optional;
4343
import java.util.Set;
44+
import java.util.concurrent.atomic.AtomicInteger;
4445
import java.util.concurrent.locks.Lock;
4546
import java.util.stream.Collectors;
4647

@@ -124,7 +125,7 @@ private void completeShareFetchRequest(LinkedHashMap<TopicIdPartition, FetchRequ
124125
try {
125126
LinkedHashMap<TopicIdPartition, LogReadResult> responseData;
126127
if (partitionsAlreadyFetched.isEmpty())
127-
responseData = readFromLog(topicPartitionData);
128+
responseData = readFromLog(topicPartitionData, shareFetch.fetchParams().maxBytes / topicPartitionData.size());
128129
else
129130
// There shouldn't be a case when we have a partitionsAlreadyFetched value here and this variable is getting
130131
// updated in a different tryComplete thread.
@@ -206,7 +207,6 @@ LinkedHashMap<TopicIdPartition, FetchRequest.PartitionData> acquirablePartitions
206207
LinkedHashMap<TopicIdPartition, FetchRequest.PartitionData> topicPartitionData = new LinkedHashMap<>();
207208

208209
sharePartitions.forEach((topicIdPartition, sharePartition) -> {
209-
int partitionMaxBytes = shareFetch.partitionMaxBytes().getOrDefault(topicIdPartition, 0);
210210
// Add the share partition to the list of partitions to be fetched only if we can
211211
// acquire the fetch lock on it.
212212
if (sharePartition.maybeAcquireFetchLock()) {
@@ -219,7 +219,6 @@ LinkedHashMap<TopicIdPartition, FetchRequest.PartitionData> acquirablePartitions
219219
topicIdPartition.topicId(),
220220
sharePartition.nextFetchOffset(),
221221
0,
222-
partitionMaxBytes,
223222
Optional.empty()
224223
)
225224
);
@@ -250,7 +249,7 @@ private LinkedHashMap<TopicIdPartition, LogReadResult> maybeReadFromLog(LinkedHa
250249
return new LinkedHashMap<>();
251250
}
252251
// We fetch data from replica manager corresponding to the topic partitions that have missing fetch offset metadata.
253-
return readFromLog(partitionsNotMatchingFetchOffsetMetadata);
252+
return readFromLog(partitionsNotMatchingFetchOffsetMetadata, shareFetch.fetchParams().maxBytes / topicPartitionData.size());
254253
}
255254

256255
private void maybeUpdateFetchOffsetMetadata(
@@ -311,7 +310,7 @@ private boolean isMinBytesSatisfied(LinkedHashMap<TopicIdPartition, FetchRequest
311310
return true;
312311
} else if (fetchOffsetMetadata.onSameSegment(endOffsetMetadata)) {
313312
// we take the partition fetch size as upper bound when accumulating the bytes.
314-
long bytesAvailable = Math.min(endOffsetMetadata.positionDiff(fetchOffsetMetadata), partitionData.maxBytes);
313+
long bytesAvailable = Math.min(endOffsetMetadata.positionDiff(fetchOffsetMetadata), shareFetch.fetchParams().maxBytes / topicPartitionData.size());
315314
accumulatedSize += bytesAvailable;
316315
}
317316
}
@@ -334,12 +333,13 @@ else if (isolationType == FetchIsolation.HIGH_WATERMARK)
334333

335334
}
336335

337-
private LinkedHashMap<TopicIdPartition, LogReadResult> readFromLog(LinkedHashMap<TopicIdPartition, FetchRequest.PartitionData> topicPartitionData) {
336+
private LinkedHashMap<TopicIdPartition, LogReadResult> readFromLog(LinkedHashMap<TopicIdPartition, FetchRequest.PartitionData> topicPartitionData, int partitionMaxBytes) {
338337
// Filter if there already exists any erroneous topic partition.
339338
Set<TopicIdPartition> partitionsToFetch = shareFetch.filterErroneousTopicPartitions(topicPartitionData.keySet());
340339
if (partitionsToFetch.isEmpty()) {
341340
return new LinkedHashMap<>();
342341
}
342+
topicPartitionData.values().forEach(partitionData -> partitionData.updateMaxBytes(partitionMaxBytes));
343343

344344
Seq<Tuple2<TopicIdPartition, LogReadResult>> responseLogResult = replicaManager.readFromLog(
345345
shareFetch.fetchParams(),
@@ -392,15 +392,21 @@ private void handleFetchException(
392392
LinkedHashMap<TopicIdPartition, LogReadResult> combineLogReadResponse(LinkedHashMap<TopicIdPartition, FetchRequest.PartitionData> topicPartitionData,
393393
LinkedHashMap<TopicIdPartition, LogReadResult> existingFetchedData) {
394394
LinkedHashMap<TopicIdPartition, FetchRequest.PartitionData> missingLogReadTopicPartitions = new LinkedHashMap<>();
395+
AtomicInteger totalPartitionMaxBytesUsed = new AtomicInteger();
395396
topicPartitionData.forEach((topicIdPartition, partitionData) -> {
396397
if (!existingFetchedData.containsKey(topicIdPartition)) {
397398
missingLogReadTopicPartitions.put(topicIdPartition, partitionData);
399+
} else {
400+
totalPartitionMaxBytesUsed.addAndGet(partitionData.maxBytes);
398401
}
399402
});
400403
if (missingLogReadTopicPartitions.isEmpty()) {
401404
return existingFetchedData;
402405
}
403-
LinkedHashMap<TopicIdPartition, LogReadResult> missingTopicPartitionsLogReadResponse = readFromLog(missingLogReadTopicPartitions);
406+
LinkedHashMap<TopicIdPartition, LogReadResult> missingTopicPartitionsLogReadResponse = readFromLog(
407+
missingLogReadTopicPartitions,
408+
(shareFetch.fetchParams().maxBytes - totalPartitionMaxBytesUsed.get()) / missingLogReadTopicPartitions.size()
409+
);
404410
missingTopicPartitionsLogReadResponse.putAll(existingFetchedData);
405411
return missingTopicPartitionsLogReadResponse;
406412
}

0 commit comments

Comments
 (0)