Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Add a dynamic setting to change skip_cache_factor and min_frequency for querycache ([#18351](https://github.com/opensearch-project/OpenSearch/issues/18351))
- Add overload constructor for Translog to accept Channel Factory as a parameter ([#18918](https://github.com/opensearch-project/OpenSearch/pull/18918))
- Add subdirectory-aware store module with recovery support ([#19132](https://github.com/opensearch-project/OpenSearch/pull/19132))
- Add an `item_count` metric for field data cache API ([#19174](https://github.com/opensearch-project/OpenSearch/pull/19174))

### Changed
- Add CompletionStage variants to methods in the Client Interface and default to ActionListener impl ([#18998](https://github.com/opensearch-project/OpenSearch/pull/18998))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@

import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs;
import org.opensearch.action.DocWriteResponse;
import org.opensearch.action.admin.cluster.node.stats.NodeStats;
import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder;
import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.opensearch.action.admin.indices.create.CreateIndexRequest;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse;
Expand Down Expand Up @@ -70,6 +72,7 @@
import org.opensearch.index.VersionType;
import org.opensearch.index.cache.query.QueryCacheStats;
import org.opensearch.index.engine.VersionConflictEngineException;
import org.opensearch.index.fielddata.FieldDataStats;
import org.opensearch.index.query.QueryBuilders;
import org.opensearch.index.remote.RemoteSegmentStats;
import org.opensearch.index.shard.IndexShard;
Expand Down Expand Up @@ -116,7 +119,6 @@
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThan;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;

Expand Down Expand Up @@ -165,7 +167,7 @@ private Settings.Builder settingsBuilder() {
return Settings.builder().put(indexSettings());
}

public void testFieldDataStats() throws InterruptedException {
public void testFieldDataStats() throws Exception {
assertAcked(
client().admin()
.indices()
Expand All @@ -175,117 +177,86 @@ public void testFieldDataStats() throws InterruptedException {
.get()
);
ensureGreen();
client().prepareIndex("test").setId("1").setSource("field", "value1", "field2", "value1").execute().actionGet();
client().prepareIndex("test").setId("2").setSource("field", "value2", "field2", "value2").execute().actionGet();
// Index enough docs to be sure neither primary shard is empty
for (int i = 0; i < 100; i++) {
client().prepareIndex("test")
.setId(Integer.toString(i))
.setSource("field", "value" + i, "field2", "value" + i)
.execute()
.actionGet();
}
refreshAndWaitForReplication();
indexRandomForConcurrentSearch("test");
// Force merge to 1 segment so we can predict counts
client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet();
refreshAndWaitForReplication();

NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats("data:true").setIndices(true).execute().actionGet();
assertThat(
nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()
.get(1)
.getIndices()
.getFieldData()
.getMemorySizeInBytes(),
equalTo(0L)
);
IndicesStatsResponse indicesStats = client().admin()
.indices()
.prepareStats("test")
.clear()
.setFieldData(true)
.execute()
.actionGet();
assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0L));
for (FieldDataStats totalStats : List.of(getTotalFieldDataStats(false), getIndicesFieldDataStats(false))) {
assertEquals(0, totalStats.getMemorySizeInBytes());
assertEquals(0, totalStats.getItemCount());
}

// sort to load it to field data...
client().prepareSearch().addSort("field", SortOrder.ASC).execute().actionGet();
client().prepareSearch().addSort("field", SortOrder.ASC).execute().actionGet();

nodesStats = client().admin().cluster().prepareNodesStats("data:true").setIndices(true).execute().actionGet();
assertThat(
nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()
.get(1)
.getIndices()
.getFieldData()
.getMemorySizeInBytes(),
greaterThan(0L)
);
indicesStats = client().admin().indices().prepareStats("test").clear().setFieldData(true).execute().actionGet();
assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0L));
for (FieldDataStats totalStats : List.of(getTotalFieldDataStats(false), getIndicesFieldDataStats(false))) {
assertTrue(totalStats.getMemorySizeInBytes() > 0);
// The search should have hit 2 shards of the total 4 shards, each of which has 1 segment. So we expect 2 entries.
assertEquals(2, totalStats.getItemCount());
}

// sort to load it to field data...
client().prepareSearch().addSort("field2", SortOrder.ASC).execute().actionGet();
client().prepareSearch().addSort("field2", SortOrder.ASC).execute().actionGet();

// now check the per field stats
nodesStats = client().admin()
.cluster()
.prepareNodesStats("data:true")
.setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.FieldData, true).fieldDataFields("*"))
.execute()
.actionGet();
assertThat(
nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()
.get(1)
.getIndices()
.getFieldData()
.getMemorySizeInBytes(),
greaterThan(0L)
);
assertThat(
nodesStats.getNodes().get(0).getIndices().getFieldData().getFields().get("field") + nodesStats.getNodes()
.get(1)
.getIndices()
.getFieldData()
.getFields()
.get("field"),
greaterThan(0L)
);
assertThat(
nodesStats.getNodes().get(0).getIndices().getFieldData().getFields().get("field") + nodesStats.getNodes()
.get(1)
.getIndices()
.getFieldData()
.getFields()
.get("field"),
lessThan(
nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()
.get(1)
.getIndices()
.getFieldData()
.getMemorySizeInBytes()
)
);
// Now we expect 4 total entries, one per searched segment per field
assertEquals(4, getTotalFieldDataStats(false).getItemCount());

indicesStats = client().admin()
.indices()
.prepareStats("test")
.clear()
.setFieldData(true)
.setFieldDataFields("*")
.execute()
.actionGet();
assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0L));
assertThat(indicesStats.getTotal().getFieldData().getFields().get("field"), greaterThan(0L));
assertThat(
indicesStats.getTotal().getFieldData().getFields().get("field"),
lessThan(indicesStats.getTotal().getFieldData().getMemorySizeInBytes())
);
// now check the per field stats
for (FieldDataStats totalStats : List.of(getTotalFieldDataStats(true), getIndicesFieldDataStats(true))) {
assertTrue(totalStats.getMemorySizeInBytes() > 0);
for (String fieldName : List.of("field", "field2")) {
assertTrue(totalStats.getFields().get(fieldName) > 0);
assertEquals(2, totalStats.getFieldItemCounts().get(fieldName));
assertTrue(totalStats.getFields().get(fieldName) < totalStats.getMemorySizeInBytes());
}
}

client().admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
nodesStats = client().admin().cluster().prepareNodesStats("data:true").setIndices(true).execute().actionGet();
assertThat(
nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()
.get(1)
.getIndices()
.getFieldData()
.getMemorySizeInBytes(),
equalTo(0L)
);
indicesStats = client().admin().indices().prepareStats("test").clear().setFieldData(true).execute().actionGet();
assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0L));
assertBusy(() -> {
for (FieldDataStats postClearStats : List.of(getTotalFieldDataStats(true), getIndicesFieldDataStats(true))) {
assertEquals(0, postClearStats.getMemorySizeInBytes());
assertEquals(0, postClearStats.getItemCount());
for (long fieldMemorySize : postClearStats.getFields().getStats().values()) {
assertEquals(0, fieldMemorySize);
}
for (long fieldItemCount : postClearStats.getFieldItemCounts().getStats().values()) {
assertEquals(0, fieldItemCount);
}
}
});
}

private FieldDataStats getTotalFieldDataStats(boolean setFieldDataFields) {
NodesStatsRequestBuilder builder = client().admin().cluster().prepareNodesStats("data:true");
if (setFieldDataFields) {
builder.setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.FieldData, true).fieldDataFields("*"));
} else {
builder.setIndices(true);
}
NodesStatsResponse nodesStats = builder.execute().actionGet();
FieldDataStats total = new FieldDataStats();
for (NodeStats node : nodesStats.getNodes()) {
total.add(node.getIndices().getFieldData());
}
return total;
}

private FieldDataStats getIndicesFieldDataStats(boolean setFieldDataFields) {
IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats("test").clear().setFieldData(true);
if (setFieldDataFields) {
builder.setFieldDataFields("*");
}
return builder.execute().actionGet().getTotal().getFieldData();
}

public void testClearAllCaches() throws Exception {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@
*/
@PublicApi(since = "1.0.0")
public final class FieldMemoryStats implements Writeable, Iterable<Map.Entry<String, Long>> {

private final Map<String, Long> stats;

/**
Expand Down Expand Up @@ -101,6 +100,10 @@ public void toXContent(XContentBuilder builder, String key, String rawKey, Strin
builder.endObject();
}

public Map<String, Long> getStats() {
return stats;
}

/**
* Creates a deep copy of this stats instance
*/
Expand Down
Loading
Loading