From 67c49eb8f80e22e049891881174b3621e9be2105 Mon Sep 17 00:00:00 2001 From: Kryesh Date: Fri, 18 Apr 2025 21:24:34 +1000 Subject: [PATCH 1/5] Update `LogMergePolicy` to skip to a target number of documents per segment when possible --- src/indexer/index_writer.rs | 4 +- src/indexer/log_merge_policy.rs | 140 +++++++++++++++++++++++++++----- 2 files changed, 123 insertions(+), 21 deletions(-) diff --git a/src/indexer/index_writer.rs b/src/indexer/index_writer.rs index c15d6d456b..5340c9cadf 100644 --- a/src/indexer/index_writer.rs +++ b/src/indexer/index_writer.rs @@ -1002,8 +1002,8 @@ mod tests { let index_writer: IndexWriter = index.writer_for_tests().unwrap(); assert_eq!( format!("{:?}", index_writer.get_merge_policy()), - "LogMergePolicy { min_num_segments: 8, max_docs_before_merge: 10000000, \ - min_layer_size: 10000, level_log_size: 0.75, del_docs_ratio_before_merge: 1.0 }" + "LogMergePolicy { min_num_segments: 8, target_segment_size: 10000000, min_layer_size: \ + 10000, level_log_size: 0.75, del_docs_ratio_before_merge: 1.0 }" ); let merge_policy = Box::::default(); index_writer.set_merge_policy(merge_policy); diff --git a/src/indexer/log_merge_policy.rs b/src/indexer/log_merge_policy.rs index 86a8fd3417..48df82a236 100644 --- a/src/indexer/log_merge_policy.rs +++ b/src/indexer/log_merge_policy.rs @@ -19,7 +19,7 @@ const DEFAULT_DEL_DOCS_RATIO_BEFORE_MERGE: f32 = 1.0f32; #[derive(Debug, Clone)] pub struct LogMergePolicy { min_num_segments: usize, - max_docs_before_merge: usize, + target_segment_size: usize, min_layer_size: u32, level_log_size: f64, del_docs_ratio_before_merge: f32, @@ -38,8 +38,8 @@ impl LogMergePolicy { /// Set the maximum number docs in a segment for it to be considered for /// merging. A segment can still reach more than max_docs, by merging many /// smaller ones. - pub fn set_max_docs_before_merge(&mut self, max_docs_merge_size: usize) { - self.max_docs_before_merge = max_docs_merge_size; + pub fn set_target_segment_size(&mut self, max_docs_merge_size: usize) { + self.target_segment_size = max_docs_merge_size; } /// Set the minimum segment size under which all segment belong @@ -92,38 +92,87 @@ fn deletes_ratio(segment: &SegmentMeta) -> f32 { impl MergePolicy for LogMergePolicy { fn compute_merge_candidates(&self, segments: &[SegmentMeta]) -> Vec { + // Get segments that are small enough to be merged and sort them by size in descending + // order. let size_sorted_segments = segments .iter() - .filter(|seg| seg.num_docs() <= (self.max_docs_before_merge as u32)) + .filter(|seg| (seg.num_docs() as usize) < self.target_segment_size) .sorted_by_key(|seg| std::cmp::Reverse(seg.max_doc())) - .collect::>(); + .collect_vec(); + // If there are no small enough segments, return an empty vector. if size_sorted_segments.is_empty() { - return vec![]; + return Vec::new(); } + let mut candidates = Vec::new(); + let mut levels = Vec::new(); + let mut unmerged_docs = 0usize; let mut current_max_log_size = f64::MAX; - let mut levels = vec![]; for (_, merge_group) in &size_sorted_segments.into_iter().chunk_by(|segment| { let segment_log_size = f64::from(self.clip_min_size(segment.num_docs())).log2(); if segment_log_size < (current_max_log_size - self.level_log_size) { // update current_max_log_size to create a new group current_max_log_size = segment_log_size; } + // accumulate the number of documents + unmerged_docs += segment.num_docs() as usize; // return current_max_log_size to be grouped to the current group current_max_log_size }) { levels.push(merge_group.collect::>()); } + // If the total number of unmerged documents is large enough to reach the target size, + // then start collecting segments in ascending size until we reach the target size. + if unmerged_docs >= self.target_segment_size { + let mut batch_docs = 0usize; + let mut batch = Vec::new(); + // Pop segments segments from levels, smallest first due to sort at start + while let Some(segments) = levels.pop() { + for s in segments { + batch_docs += s.num_docs() as usize; + batch.push(s); + + // If the current batch has enough documents to be merged, create a merge + // candidate and push it to candidates + if batch_docs >= self.target_segment_size { + unmerged_docs -= batch_docs; + batch_docs = 0; + candidates.push(MergeCandidate( + // drain to reuse the buffer + batch.drain(..).map(|seg| seg.id()).collect(), + )); + } + } + + // If there are no longer enough documents to create a skip merge, break the loop + // unmerged_docs is only updated when a batch is created so this won't trigger + // before we have enough docs collected + if unmerged_docs <= self.target_segment_size { + break; + } + } + // If there are any remaining segments in the batch, push them as a level to be + // processed by the standard merge policy + if !batch.is_empty() { + levels.push(batch); + } + } + levels - .iter() + .into_iter() .filter(|level| { level.len() >= self.min_num_segments || self.has_segment_above_deletes_threshold(level) }) - .map(|segments| MergeCandidate(segments.iter().map(|&seg| seg.id()).collect())) - .collect() + .for_each(|level| { + candidates.push(MergeCandidate( + level.into_iter().map(|seg| seg.id()).collect(), + )) + }); + + candidates } } @@ -131,7 +180,7 @@ impl Default for LogMergePolicy { fn default() -> LogMergePolicy { LogMergePolicy { min_num_segments: DEFAULT_MIN_NUM_SEGMENTS_IN_MERGE, - max_docs_before_merge: DEFAULT_MAX_DOCS_BEFORE_MERGE, + target_segment_size: DEFAULT_MAX_DOCS_BEFORE_MERGE, min_layer_size: DEFAULT_MIN_LAYER_SIZE, level_log_size: DEFAULT_LEVEL_LOG_SIZE, del_docs_ratio_before_merge: DEFAULT_DEL_DOCS_RATIO_BEFORE_MERGE, @@ -163,7 +212,7 @@ mod tests { { let mut log_merge_policy = LogMergePolicy::default(); log_merge_policy.set_min_num_segments(1); - log_merge_policy.set_max_docs_before_merge(1); + log_merge_policy.set_target_segment_size(1); log_merge_policy.set_min_layer_size(0); let mut index_writer = index.writer_for_tests()?; @@ -214,7 +263,7 @@ mod tests { fn test_merge_policy() -> LogMergePolicy { let mut log_merge_policy = LogMergePolicy::default(); log_merge_policy.set_min_num_segments(3); - log_merge_policy.set_max_docs_before_merge(100_000); + log_merge_policy.set_target_segment_size(100_000); log_merge_policy.set_min_layer_size(2); log_merge_policy } @@ -318,14 +367,67 @@ mod tests { create_random_segment_meta(1_500_000), ]; let result_list = test_merge_policy().compute_merge_candidates(&test_input); - // Do not include large segments - assert_eq!(result_list.len(), 1); - assert_eq!(result_list[0].0.len(), 3); + // All segments at or above target size, so we expect nothing + assert!(result_list.is_empty()); + } + + #[test] + fn test_skip_merge_large_segments() { + // Test that we skip log merges if there are enough unmerged documents to reach the target + // size + let test_input = vec![ + create_random_segment_meta(50_000), + create_random_segment_meta(50_000), + create_random_segment_meta(49_999), + create_random_segment_meta(49_999), + create_random_segment_meta(49_999), + create_random_segment_meta(49_999), + ]; - // Making sure merge policy points to the correct index of the original input + let result_list = test_merge_policy().compute_merge_candidates(&test_input); + + assert_eq!(result_list.len(), 2); + // First result should be the first 2 segments being merged into a single 100k segment + assert_eq!(result_list[0].0.len(), 2); + assert_eq!(result_list[0].0[0], test_input[0].id()); + assert_eq!(result_list[0].0[1], test_input[1].id()); + + // Second results should be the next 3 segments, excluding the final segment as it will have + // already hit the target + assert_eq!(result_list[1].0.len(), 3); + assert_eq!(result_list[1].0[0], test_input[2].id()); + assert_eq!(result_list[1].0[1], test_input[3].id()); + assert_eq!(result_list[1].0[2], test_input[4].id()); + } + + #[test] + fn test_skip_merge_small_segments() { + // Test that we skip log merges if there are enough unmerged documents to reach the target + // size + let test_input = vec![ + create_random_segment_meta(75_000), + create_random_segment_meta(75_000), + create_random_segment_meta(5_000), + create_random_segment_meta(5_000), + create_random_segment_meta(5_000), + create_random_segment_meta(5_000), + create_random_segment_meta(5_000), + ]; + + let result_list = test_merge_policy().compute_merge_candidates(&test_input); + + // Should have a single merge with all of the small segments and only one of the large + // segments + assert_eq!(result_list.len(), 1); + assert_eq!(result_list[0].0.len(), 6); assert_eq!(result_list[0].0[0], test_input[2].id()); - assert_eq!(result_list[0].0[1], test_input[4].id()); - assert_eq!(result_list[0].0[2], test_input[5].id()); + assert_eq!(result_list[0].0[1], test_input[3].id()); + assert_eq!(result_list[0].0[2], test_input[4].id()); + assert_eq!(result_list[0].0[3], test_input[5].id()); + assert_eq!(result_list[0].0[4], test_input[6].id()); + assert!( + result_list[0].0[5] == test_input[0].id() || result_list[0].0[5] == test_input[1].id() + ); } #[test] From c44b585dd22229e273402ce6078f1301717a1608 Mon Sep 17 00:00:00 2001 From: Kryesh Date: Fri, 18 Apr 2025 21:39:18 +1000 Subject: [PATCH 2/5] Update comment for `set_target_segment_size` --- src/indexer/log_merge_policy.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/indexer/log_merge_policy.rs b/src/indexer/log_merge_policy.rs index 48df82a236..652de2da1e 100644 --- a/src/indexer/log_merge_policy.rs +++ b/src/indexer/log_merge_policy.rs @@ -35,9 +35,9 @@ impl LogMergePolicy { self.min_num_segments = min_num_segments; } - /// Set the maximum number docs in a segment for it to be considered for - /// merging. A segment can still reach more than max_docs, by merging many - /// smaller ones. + /// Set the target number of documents to have in a segment, a segment can have up to + /// `(target_segment_size * 2) - 2` documents, but the policy will try to keep them as close as + /// possible to `target_segment_size` pub fn set_target_segment_size(&mut self, max_docs_merge_size: usize) { self.target_segment_size = max_docs_merge_size; } From 14c164ae3c97cb854c1b877a4020981d33006986 Mon Sep 17 00:00:00 2001 From: Kryesh Date: Fri, 18 Apr 2025 21:43:01 +1000 Subject: [PATCH 3/5] Rename `DEFAULT_MAX_DOCS_BEFORE_MERGE` to `DEFAULT_TARGET_SEGMENT_SIZE` --- src/indexer/log_merge_policy.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/indexer/log_merge_policy.rs b/src/indexer/log_merge_policy.rs index 652de2da1e..8ee33c3145 100644 --- a/src/indexer/log_merge_policy.rs +++ b/src/indexer/log_merge_policy.rs @@ -8,7 +8,7 @@ use crate::index::SegmentMeta; const DEFAULT_LEVEL_LOG_SIZE: f64 = 0.75; const DEFAULT_MIN_LAYER_SIZE: u32 = 10_000; const DEFAULT_MIN_NUM_SEGMENTS_IN_MERGE: usize = 8; -const DEFAULT_MAX_DOCS_BEFORE_MERGE: usize = 10_000_000; +const DEFAULT_TARGET_SEGMENT_SIZE: usize = 10_000_000; // The default value of 1 means that deletes are not taken in account when // identifying merge candidates. This is not a very sensible default: it was // set like that for backward compatibility and might change in the near future. @@ -180,7 +180,7 @@ impl Default for LogMergePolicy { fn default() -> LogMergePolicy { LogMergePolicy { min_num_segments: DEFAULT_MIN_NUM_SEGMENTS_IN_MERGE, - target_segment_size: DEFAULT_MAX_DOCS_BEFORE_MERGE, + target_segment_size: DEFAULT_TARGET_SEGMENT_SIZE, min_layer_size: DEFAULT_MIN_LAYER_SIZE, level_log_size: DEFAULT_LEVEL_LOG_SIZE, del_docs_ratio_before_merge: DEFAULT_DEL_DOCS_RATIO_BEFORE_MERGE, From 96ff2eb883a5ab2dcad469841849b7b8792c1c3c Mon Sep 17 00:00:00 2001 From: Kryesh Date: Mon, 21 Apr 2025 19:56:18 +1000 Subject: [PATCH 4/5] Merge perf_2 branch --- src/indexer/log_merge_policy.rs | 180 ++++++++++++++------------------ 1 file changed, 78 insertions(+), 102 deletions(-) diff --git a/src/indexer/log_merge_policy.rs b/src/indexer/log_merge_policy.rs index 8ee33c3145..c234213583 100644 --- a/src/indexer/log_merge_policy.rs +++ b/src/indexer/log_merge_policy.rs @@ -38,8 +38,8 @@ impl LogMergePolicy { /// Set the target number of documents to have in a segment, a segment can have up to /// `(target_segment_size * 2) - 2` documents, but the policy will try to keep them as close as /// possible to `target_segment_size` - pub fn set_target_segment_size(&mut self, max_docs_merge_size: usize) { - self.target_segment_size = max_docs_merge_size; + pub fn set_target_segment_size(&mut self, target_segment_size: usize) { + self.target_segment_size = target_segment_size; } /// Set the minimum segment size under which all segment belong @@ -76,100 +76,81 @@ impl LogMergePolicy { self.del_docs_ratio_before_merge = del_docs_ratio_before_merge; } - fn has_segment_above_deletes_threshold(&self, level: &[&SegmentMeta]) -> bool { - level - .iter() - .any(|segment| deletes_ratio(segment) > self.del_docs_ratio_before_merge) - } -} - -fn deletes_ratio(segment: &SegmentMeta) -> f32 { - if segment.max_doc() == 0 { - return 0f32; + fn segment_above_deletes_threshold(&self, segment: &SegmentMeta) -> bool { + match segment.max_doc() { + 0 => false, + _ => { + (segment.num_deleted_docs() as f32 / segment.max_doc() as f32) + > self.del_docs_ratio_before_merge + } + } } - segment.num_deleted_docs() as f32 / segment.max_doc() as f32 } impl MergePolicy for LogMergePolicy { fn compute_merge_candidates(&self, segments: &[SegmentMeta]) -> Vec { - // Get segments that are small enough to be merged and sort them by size in descending - // order. - let size_sorted_segments = segments + let mut unmerged_docs = 0; + let mut levels = segments .iter() - .filter(|seg| (seg.num_docs() as usize) < self.target_segment_size) - .sorted_by_key(|seg| std::cmp::Reverse(seg.max_doc())) + .map(|seg| (seg.num_docs() as usize, seg)) + .filter(|(docs, _)| *docs < self.target_segment_size) + .inspect(|(docs, _)| unmerged_docs += docs) + .sorted_by(|(a, _), (b, _)| b.cmp(a)) .collect_vec(); - // If there are no small enough segments, return an empty vector. - if size_sorted_segments.is_empty() { - return Vec::new(); - } - let mut candidates = Vec::new(); - let mut levels = Vec::new(); - let mut unmerged_docs = 0usize; - let mut current_max_log_size = f64::MAX; - for (_, merge_group) in &size_sorted_segments.into_iter().chunk_by(|segment| { - let segment_log_size = f64::from(self.clip_min_size(segment.num_docs())).log2(); - if segment_log_size < (current_max_log_size - self.level_log_size) { - // update current_max_log_size to create a new group - current_max_log_size = segment_log_size; - } - // accumulate the number of documents - unmerged_docs += segment.num_docs() as usize; - // return current_max_log_size to be grouped to the current group - current_max_log_size - }) { - levels.push(merge_group.collect::>()); - } - - // If the total number of unmerged documents is large enough to reach the target size, - // then start collecting segments in ascending size until we reach the target size. if unmerged_docs >= self.target_segment_size { - let mut batch_docs = 0usize; + let mut batch_docs = 0; let mut batch = Vec::new(); // Pop segments segments from levels, smallest first due to sort at start - while let Some(segments) = levels.pop() { - for s in segments { - batch_docs += s.num_docs() as usize; - batch.push(s); - - // If the current batch has enough documents to be merged, create a merge - // candidate and push it to candidates - if batch_docs >= self.target_segment_size { - unmerged_docs -= batch_docs; - batch_docs = 0; - candidates.push(MergeCandidate( - // drain to reuse the buffer - batch.drain(..).map(|seg| seg.id()).collect(), - )); + while let Some((docs, seg)) = levels.pop() { + batch_docs += docs; + batch.push(seg); + + // If the current batch has enough documents to be merged, create a merge + // candidate and push it to candidates + if batch_docs >= self.target_segment_size { + unmerged_docs -= batch_docs; + batch_docs = 0; + candidates.push(MergeCandidate( + // drain to reuse the buffer + batch.drain(..).map(|seg| seg.id()).collect(), + )); + if unmerged_docs <= self.target_segment_size { + break; } } - - // If there are no longer enough documents to create a skip merge, break the loop - // unmerged_docs is only updated when a batch is created so this won't trigger - // before we have enough docs collected - if unmerged_docs <= self.target_segment_size { - break; - } - } - // If there are any remaining segments in the batch, push them as a level to be - // processed by the standard merge policy - if !batch.is_empty() { - levels.push(batch); } } + let mut current_max_log_size = f64::MAX; + let mut batch = Vec::new(); levels - .into_iter() - .filter(|level| { - level.len() >= self.min_num_segments - || self.has_segment_above_deletes_threshold(level) + .iter() + .map(|(docs, seg)| { + let segment_log_size = f64::from(self.clip_min_size(*docs as u32)).log2(); + if segment_log_size < (current_max_log_size - self.level_log_size) { + // update current_max_log_size to create a new group + current_max_log_size = segment_log_size; + } + (current_max_log_size, seg) }) - .for_each(|level| { - candidates.push(MergeCandidate( - level.into_iter().map(|seg| seg.id()).collect(), - )) + .chunk_by(|(level, _)| *level) + .into_iter() + .for_each(|(_, group)| { + let mut hit_delete_threshold = false; + group.into_iter().for_each(|(_, seg)| { + batch.push(seg.id()); + if !hit_delete_threshold && self.segment_above_deletes_threshold(seg) { + hit_delete_threshold = true; + } + }); + + if batch.len() >= self.min_num_segments || hit_delete_threshold { + candidates.push(MergeCandidate(std::mem::take(&mut batch))); + } else { + batch.clear(); + } }); candidates @@ -373,31 +354,25 @@ mod tests { #[test] fn test_skip_merge_large_segments() { - // Test that we skip log merges if there are enough unmerged documents to reach the target - // size - let test_input = vec![ - create_random_segment_meta(50_000), - create_random_segment_meta(50_000), - create_random_segment_meta(49_999), + let test_input_merge_all = vec![ create_random_segment_meta(49_999), create_random_segment_meta(49_999), create_random_segment_meta(49_999), ]; - let result_list = test_merge_policy().compute_merge_candidates(&test_input); + let test_input_merge_two = vec![ + create_random_segment_meta(50_000), + create_random_segment_meta(50_000), + create_random_segment_meta(50_000), + ]; - assert_eq!(result_list.len(), 2); - // First result should be the first 2 segments being merged into a single 100k segment - assert_eq!(result_list[0].0.len(), 2); - assert_eq!(result_list[0].0[0], test_input[0].id()); - assert_eq!(result_list[0].0[1], test_input[1].id()); - - // Second results should be the next 3 segments, excluding the final segment as it will have - // already hit the target - assert_eq!(result_list[1].0.len(), 3); - assert_eq!(result_list[1].0[0], test_input[2].id()); - assert_eq!(result_list[1].0[1], test_input[3].id()); - assert_eq!(result_list[1].0[2], test_input[4].id()); + let result_list_merge_all = + test_merge_policy().compute_merge_candidates(&test_input_merge_all); + let result_list_merge_two = + test_merge_policy().compute_merge_candidates(&test_input_merge_two); + + assert_eq!(result_list_merge_two[0].0.len(), 2); + assert_eq!(result_list_merge_all[0].0.len(), 3); } #[test] @@ -420,13 +395,14 @@ mod tests { // segments assert_eq!(result_list.len(), 1); assert_eq!(result_list[0].0.len(), 6); - assert_eq!(result_list[0].0[0], test_input[2].id()); - assert_eq!(result_list[0].0[1], test_input[3].id()); - assert_eq!(result_list[0].0[2], test_input[4].id()); - assert_eq!(result_list[0].0[3], test_input[5].id()); - assert_eq!(result_list[0].0[4], test_input[6].id()); + assert!(result_list[0].0.contains(&test_input[2].id())); + assert!(result_list[0].0.contains(&test_input[3].id())); + assert!(result_list[0].0.contains(&test_input[4].id())); + assert!(result_list[0].0.contains(&test_input[5].id())); + assert!(result_list[0].0.contains(&test_input[6].id())); assert!( - result_list[0].0[5] == test_input[0].id() || result_list[0].0[5] == test_input[1].id() + result_list[0].0.contains(&test_input[0].id()) + || result_list[0].0.contains(&test_input[1].id()) ); } From f42abec326495030b90e3cb3418ab6dafc42610a Mon Sep 17 00:00:00 2001 From: Kryesh Date: Mon, 21 Apr 2025 20:17:51 +1000 Subject: [PATCH 5/5] Clean up and add comments --- src/indexer/log_merge_policy.rs | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/src/indexer/log_merge_policy.rs b/src/indexer/log_merge_policy.rs index c234213583..d02d3b9ccf 100644 --- a/src/indexer/log_merge_policy.rs +++ b/src/indexer/log_merge_policy.rs @@ -89,6 +89,8 @@ impl LogMergePolicy { impl MergePolicy for LogMergePolicy { fn compute_merge_candidates(&self, segments: &[SegmentMeta]) -> Vec { + // Filter for segments that have less than the target number of docs, count total unmerged + // docs, and sort in descending order let mut unmerged_docs = 0; let mut levels = segments .iter() @@ -98,11 +100,13 @@ impl MergePolicy for LogMergePolicy { .sorted_by(|(a, _), (b, _)| b.cmp(a)) .collect_vec(); + // If there are enough unmerged documents to create a new segment of the target size, + // then create a merge candidate for them. let mut candidates = Vec::new(); if unmerged_docs >= self.target_segment_size { let mut batch_docs = 0; let mut batch = Vec::new(); - // Pop segments segments from levels, smallest first due to sort at start + // Start with the smallest segments and add them to the batch until we reach the target while let Some((docs, seg)) = levels.pop() { batch_docs += docs; batch.push(seg); @@ -116,6 +120,8 @@ impl MergePolicy for LogMergePolicy { // drain to reuse the buffer batch.drain(..).map(|seg| seg.id()).collect(), )); + // If there aren't enough documents to create another segment of the target size + // then break if unmerged_docs <= self.target_segment_size { break; } @@ -127,19 +133,18 @@ impl MergePolicy for LogMergePolicy { let mut batch = Vec::new(); levels .iter() - .map(|(docs, seg)| { + .chunk_by(|(docs, _)| { let segment_log_size = f64::from(self.clip_min_size(*docs as u32)).log2(); if segment_log_size < (current_max_log_size - self.level_log_size) { // update current_max_log_size to create a new group current_max_log_size = segment_log_size; } - (current_max_log_size, seg) + current_max_log_size }) - .chunk_by(|(level, _)| *level) .into_iter() .for_each(|(_, group)| { let mut hit_delete_threshold = false; - group.into_iter().for_each(|(_, seg)| { + group.for_each(|(_, seg)| { batch.push(seg.id()); if !hit_delete_threshold && self.segment_above_deletes_threshold(seg) { hit_delete_threshold = true; @@ -354,12 +359,14 @@ mod tests { #[test] fn test_skip_merge_large_segments() { + // All of these should be merged into a single segment since 2 * 49_999 < 100_000 let test_input_merge_all = vec![ create_random_segment_meta(49_999), create_random_segment_meta(49_999), create_random_segment_meta(49_999), ]; + // Only two of these should be merged since 2 * 50_000 >= 100_000, then the third is left let test_input_merge_two = vec![ create_random_segment_meta(50_000), create_random_segment_meta(50_000), @@ -371,8 +378,8 @@ mod tests { let result_list_merge_two = test_merge_policy().compute_merge_candidates(&test_input_merge_two); - assert_eq!(result_list_merge_two[0].0.len(), 2); assert_eq!(result_list_merge_all[0].0.len(), 3); + assert_eq!(result_list_merge_two[0].0.len(), 2); } #[test]