17
17
18
18
package kafka .log
19
19
20
- import kafka .common ._
21
20
import kafka .server .KafkaConfig
22
21
import kafka .utils .{CoreUtils , Logging , Pool , TestUtils }
23
22
import org .apache .kafka .common .TopicPartition
@@ -29,7 +28,7 @@ import org.apache.kafka.common.utils.Utils
29
28
import org .apache .kafka .coordinator .transaction .TransactionLogConfig
30
29
import org .apache .kafka .server .metrics .{KafkaMetricsGroup , KafkaYammerMetrics }
31
30
import org .apache .kafka .server .util .MockTime
32
- import org .apache .kafka .storage .internals .log .{AbortedTxn , AppendOrigin , CleanerConfig , LocalLog , LogAppendInfo , LogConfig , LogDirFailureChannel , LogFileUtils , LogLoader , LogSegment , LogSegments , LogStartOffsetIncrementReason , OffsetMap , ProducerStateManager , ProducerStateManagerConfig }
31
+ import org .apache .kafka .storage .internals .log .{AbortedTxn , AppendOrigin , CleanerConfig , LocalLog , LogAppendInfo , LogCleaningAbortedException , LogConfig , LogDirFailureChannel , LogFileUtils , LogLoader , LogSegment , LogSegments , LogStartOffsetIncrementReason , OffsetMap , ProducerStateManager , ProducerStateManagerConfig }
33
32
import org .apache .kafka .storage .internals .utils .Throttler
34
33
import org .apache .kafka .storage .log .metrics .BrokerTopicStats
35
34
import org .junit .jupiter .api .Assertions ._
@@ -1217,18 +1216,18 @@ class LogCleanerTest extends Logging {
1217
1216
1218
1217
def distinctValuesBySegment = log.logSegments.asScala.map(s => s.log.records.asScala.map(record => TestUtils .readString(record.value)).toSet.size).toSeq
1219
1218
1220
- val disctinctValuesBySegmentBeforeClean = distinctValuesBySegment
1219
+ val distinctValuesBySegmentBeforeClean = distinctValuesBySegment
1221
1220
assertTrue(distinctValuesBySegment.reverse.tail.forall(_ > N ),
1222
1221
" Test is not effective unless each segment contains duplicates. Increase segment size or decrease number of keys." )
1223
1222
1224
1223
cleaner.clean(LogToClean (new TopicPartition (" test" , 0 ), log, 0 , firstUncleanableOffset))
1225
1224
1226
1225
val distinctValuesBySegmentAfterClean = distinctValuesBySegment
1227
1226
1228
- assertTrue(disctinctValuesBySegmentBeforeClean .zip(distinctValuesBySegmentAfterClean)
1227
+ assertTrue(distinctValuesBySegmentBeforeClean .zip(distinctValuesBySegmentAfterClean)
1229
1228
.take(numCleanableSegments).forall { case (before, after) => after < before },
1230
1229
" The cleanable segments should have fewer number of values after cleaning" )
1231
- assertTrue(disctinctValuesBySegmentBeforeClean .zip(distinctValuesBySegmentAfterClean)
1230
+ assertTrue(distinctValuesBySegmentBeforeClean .zip(distinctValuesBySegmentAfterClean)
1232
1231
.slice(numCleanableSegments, numTotalSegments).forall { x => x._1 == x._2 }, " The uncleanable segments should have the same number of values after cleaning" )
1233
1232
}
1234
1233
@@ -1240,9 +1239,9 @@ class LogCleanerTest extends Logging {
1240
1239
val log = makeLog(config = LogConfig .fromProps(logConfig.originals, logProps))
1241
1240
1242
1241
// create 6 segments with only one message in each segment
1243
- def createRecorcs = TestUtils .singletonRecords(value = Array .fill[Byte ](25 )(0 ), key = 1 .toString.getBytes)
1242
+ def createRecords = TestUtils .singletonRecords(value = Array .fill[Byte ](25 )(0 ), key = 1 .toString.getBytes)
1244
1243
for (_ <- 0 until 6 )
1245
- log.appendAsLeader(createRecorcs , leaderEpoch = 0 )
1244
+ log.appendAsLeader(createRecords , leaderEpoch = 0 )
1246
1245
1247
1246
val logToClean = LogToClean (new TopicPartition (" test" , 0 ), log, log.activeSegment.baseOffset, log.activeSegment.baseOffset)
1248
1247
0 commit comments