diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/read/ValuesSourceReaderOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/read/ValuesSourceReaderOperatorTests.java index c9b46eb764580..0c227b5411e25 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/read/ValuesSourceReaderOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/read/ValuesSourceReaderOperatorTests.java @@ -18,6 +18,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.TieredMergePolicy; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; @@ -390,11 +391,11 @@ private IndexReader initIndex(Directory directory, int size, int commitEvery) th return DirectoryReader.open(directory); } - private IndexReader initIndexLongField(Directory directory, int size, int commitEvery) throws IOException { + private IndexReader initIndexLongField(Directory directory, int size, int commitEvery, boolean forceMerge) throws IOException { try ( IndexWriter writer = new IndexWriter( directory, - newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE).setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH) + newIndexWriterConfig().setMergePolicy(new TieredMergePolicy()).setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH) ) ) { for (int d = 0; d < size; d++) { @@ -413,6 +414,10 @@ private IndexReader initIndexLongField(Directory directory, int size, int commit writer.commit(); } } + + if (forceMerge) { + writer.forceMerge(1); + } } return DirectoryReader.open(directory); } @@ -924,7 +929,7 @@ private void testLoadLong(boolean shuffle, boolean manySegments) throws IOExcept int numDocs = between(10, 500); initMapping(); keyToTags.clear(); - reader = initIndexLongField(directory, numDocs, manySegments ? commitEvery(numDocs) : numDocs); + reader = initIndexLongField(directory, numDocs, manySegments ? commitEvery(numDocs) : numDocs, manySegments == false); DriverContext driverContext = driverContext(); List input = CannedSourceOperator.collectPages(sourceOperator(driverContext, numDocs));