From ed65482b8ae9b3285271e56cf0dca04786fb2d4a Mon Sep 17 00:00:00 2001 From: Ruirui Zhang Date: Thu, 22 Aug 2024 16:53:13 -0700 Subject: [PATCH 01/21] Add Delete QueryGroup API Logic (#14735) * Add Delete QueryGroup API Logic Signed-off-by: Ruirui Zhang * modify changelog Signed-off-by: Ruirui Zhang * include comments from create pr Signed-off-by: Ruirui Zhang * remove delete all Signed-off-by: Ruirui Zhang * rebase and address comments Signed-off-by: Ruirui Zhang * rebase Signed-off-by: Ruirui Zhang * address comments Signed-off-by: Ruirui Zhang * address comments Signed-off-by: Ruirui Zhang * address comments Signed-off-by: Ruirui Zhang * add UT coverage Signed-off-by: Ruirui Zhang --- CHANGELOG.md | 1 + .../plugin/wlm/WorkloadManagementPlugin.java | 15 ++- .../wlm/WorkloadManagementPluginModule.java | 31 +++++++ .../wlm/action/DeleteQueryGroupAction.java | 38 ++++++++ .../wlm/action/DeleteQueryGroupRequest.java | 65 +++++++++++++ .../TransportCreateQueryGroupAction.java | 8 +- .../TransportDeleteQueryGroupAction.java | 91 +++++++++++++++++++ .../wlm/rest/RestDeleteQueryGroupAction.java | 57 ++++++++++++ .../service/QueryGroupPersistenceService.java | 52 +++++++++++ .../action/CreateQueryGroupResponseTests.java | 4 +- .../action/DeleteQueryGroupRequestTests.java | 42 +++++++++ .../TransportDeleteQueryGroupActionTests.java | 63 +++++++++++++ .../rest/RestDeleteQueryGroupActionTests.java | 85 +++++++++++++++++ .../QueryGroupPersistenceServiceTests.java | 58 ++++++++++++ .../api/delete_query_group_context.json | 22 +++++ .../rest-api-spec/test/wlm/10_query_group.yml | 6 ++ .../opensearch/cluster/metadata/Metadata.java | 13 ++- 17 files changed, 636 insertions(+), 15 deletions(-) create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java create mode 100644 plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json diff --git a/CHANGELOG.md b/CHANGELOG.md index a1f3d9287e4a8..cd02af4f625b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix for hasInitiatedFetching to fix allocation explain and manual reroute APIs (([#14972](https://github.com/opensearch-project/OpenSearch/pull/14972)) - [Workload Management] Add queryGroupId to Task ([14708](https://github.com/opensearch-project/OpenSearch/pull/14708)) - Add setting to ignore throttling nodes for allocation of unassigned primaries in remote restore ([#14991](https://github.com/opensearch-project/OpenSearch/pull/14991)) +- [Workload Management] Add Delete QueryGroup API Logic ([#14735](https://github.com/opensearch-project/OpenSearch/pull/14735)) - [Streaming Indexing] Enhance RestClient with a new streaming API support ([#14437](https://github.com/opensearch-project/OpenSearch/pull/14437)) - Add basic aggregation support for derived fields ([#14618](https://github.com/opensearch-project/OpenSearch/pull/14618)) - [Workload Management] Add Create QueryGroup API Logic ([#14680](https://github.com/opensearch-project/OpenSearch/pull/14680))- [Workload Management] Add Create QueryGroup API Logic ([#14680](https://github.com/opensearch-project/OpenSearch/pull/14680)) diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java index 6b4496af76dc3..64f510fa1db67 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java @@ -11,6 +11,7 @@ import org.opensearch.action.ActionRequest; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.inject.Module; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; @@ -18,10 +19,13 @@ import org.opensearch.common.settings.SettingsFilter; import org.opensearch.core.action.ActionResponse; import org.opensearch.plugin.wlm.action.CreateQueryGroupAction; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; import org.opensearch.plugin.wlm.action.GetQueryGroupAction; import org.opensearch.plugin.wlm.action.TransportCreateQueryGroupAction; +import org.opensearch.plugin.wlm.action.TransportDeleteQueryGroupAction; import org.opensearch.plugin.wlm.action.TransportGetQueryGroupAction; import org.opensearch.plugin.wlm.rest.RestCreateQueryGroupAction; +import org.opensearch.plugin.wlm.rest.RestDeleteQueryGroupAction; import org.opensearch.plugin.wlm.rest.RestGetQueryGroupAction; import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; import org.opensearch.plugins.ActionPlugin; @@ -29,6 +33,7 @@ import org.opensearch.rest.RestController; import org.opensearch.rest.RestHandler; +import java.util.Collection; import java.util.List; import java.util.function.Supplier; @@ -46,7 +51,8 @@ public WorkloadManagementPlugin() {} public List> getActions() { return List.of( new ActionPlugin.ActionHandler<>(CreateQueryGroupAction.INSTANCE, TransportCreateQueryGroupAction.class), - new ActionPlugin.ActionHandler<>(GetQueryGroupAction.INSTANCE, TransportGetQueryGroupAction.class) + new ActionPlugin.ActionHandler<>(GetQueryGroupAction.INSTANCE, TransportGetQueryGroupAction.class), + new ActionPlugin.ActionHandler<>(DeleteQueryGroupAction.INSTANCE, TransportDeleteQueryGroupAction.class) ); } @@ -60,11 +66,16 @@ public List getRestHandlers( IndexNameExpressionResolver indexNameExpressionResolver, Supplier nodesInCluster ) { - return List.of(new RestCreateQueryGroupAction(), new RestGetQueryGroupAction()); + return List.of(new RestCreateQueryGroupAction(), new RestGetQueryGroupAction(), new RestDeleteQueryGroupAction()); } @Override public List> getSettings() { return List.of(QueryGroupPersistenceService.MAX_QUERY_GROUP_COUNT); } + + @Override + public Collection createGuiceModules() { + return List.of(new WorkloadManagementPluginModule()); + } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java new file mode 100644 index 0000000000000..b7c7805639eb2 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm; + +import org.opensearch.common.inject.AbstractModule; +import org.opensearch.common.inject.Singleton; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; + +/** + * Guice Module to manage WorkloadManagement related objects + */ +public class WorkloadManagementPluginModule extends AbstractModule { + + /** + * Constructor for WorkloadManagementPluginModule + */ + public WorkloadManagementPluginModule() {} + + @Override + protected void configure() { + // Bind QueryGroupPersistenceService as a singleton to ensure a single instance is used, + // preventing multiple throttling key registrations in the constructor. + bind(QueryGroupPersistenceService.class).in(Singleton.class); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java new file mode 100644 index 0000000000000..c78952a2f89ad --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionType; +import org.opensearch.action.support.master.AcknowledgedResponse; + +/** + * Transport action for delete QueryGroup + * + * @opensearch.experimental + */ +public class DeleteQueryGroupAction extends ActionType { + + /** + /** + * An instance of DeleteQueryGroupAction + */ + public static final DeleteQueryGroupAction INSTANCE = new DeleteQueryGroupAction(); + + /** + * Name for DeleteQueryGroupAction + */ + public static final String NAME = "cluster:admin/opensearch/wlm/query_group/_delete"; + + /** + * Default constructor + */ + private DeleteQueryGroupAction() { + super(NAME, AcknowledgedResponse::new); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java new file mode 100644 index 0000000000000..e514943c2c7e9 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Request for delete QueryGroup + * + * @opensearch.experimental + */ +public class DeleteQueryGroupRequest extends AcknowledgedRequest { + private final String name; + + /** + * Default constructor for DeleteQueryGroupRequest + * @param name - name for the QueryGroup to get + */ + public DeleteQueryGroupRequest(String name) { + this.name = name; + } + + /** + * Constructor for DeleteQueryGroupRequest + * @param in - A {@link StreamInput} object + */ + public DeleteQueryGroupRequest(StreamInput in) throws IOException { + super(in); + name = in.readOptionalString(); + } + + @Override + public ActionRequestValidationException validate() { + if (name == null) { + ActionRequestValidationException actionRequestValidationException = new ActionRequestValidationException(); + actionRequestValidationException.addValidationError("QueryGroup name is missing"); + return actionRequestValidationException; + } + return null; + } + + /** + * Name getter + */ + public String getName() { + return name; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalString(name); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java index 01aa8cfb5e610..190ff17261bb4 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java @@ -14,7 +14,6 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; import org.opensearch.tasks.Task; -import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; /** @@ -24,7 +23,6 @@ */ public class TransportCreateQueryGroupAction extends HandledTransportAction { - private final ThreadPool threadPool; private final QueryGroupPersistenceService queryGroupPersistenceService; /** @@ -33,7 +31,6 @@ public class TransportCreateQueryGroupAction extends HandledTransportAction listener) { - threadPool.executor(ThreadPool.Names.SAME) - .execute(() -> queryGroupPersistenceService.persistInClusterStateMetadata(request.getQueryGroup(), listener)); + queryGroupPersistenceService.persistInClusterStateMetadata(request.getQueryGroup(), listener); } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java new file mode 100644 index 0000000000000..e4d3908d4a208 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for delete QueryGroup + * + * @opensearch.experimental + */ +public class TransportDeleteQueryGroupAction extends TransportClusterManagerNodeAction { + + private final QueryGroupPersistenceService queryGroupPersistenceService; + + /** + * Constructor for TransportDeleteQueryGroupAction + * + * @param clusterService - a {@link ClusterService} object + * @param transportService - a {@link TransportService} object + * @param actionFilters - a {@link ActionFilters} object + * @param threadPool - a {@link ThreadPool} object + * @param indexNameExpressionResolver - a {@link IndexNameExpressionResolver} object + * @param queryGroupPersistenceService - a {@link QueryGroupPersistenceService} object + */ + @Inject + public TransportDeleteQueryGroupAction( + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + ThreadPool threadPool, + IndexNameExpressionResolver indexNameExpressionResolver, + QueryGroupPersistenceService queryGroupPersistenceService + ) { + super( + DeleteQueryGroupAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + DeleteQueryGroupRequest::new, + indexNameExpressionResolver + ); + this.queryGroupPersistenceService = queryGroupPersistenceService; + } + + @Override + protected void clusterManagerOperation( + DeleteQueryGroupRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + queryGroupPersistenceService.deleteInClusterStateMetadata(request, listener); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected AcknowledgedResponse read(StreamInput in) throws IOException { + return new AcknowledgedResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(DeleteQueryGroupRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java new file mode 100644 index 0000000000000..8ad621cf8a1e4 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.client.node.NodeClient; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static org.opensearch.rest.RestRequest.Method.DELETE; + +/** + * Rest action to delete a QueryGroup + * + * @opensearch.experimental + */ +public class RestDeleteQueryGroupAction extends BaseRestHandler { + + /** + * Constructor for RestDeleteQueryGroupAction + */ + public RestDeleteQueryGroupAction() {} + + @Override + public String getName() { + return "delete_query_group"; + } + + /** + * The list of {@link Route}s that this RestHandler is responsible for handling. + */ + @Override + public List routes() { + return List.of(new Route(DELETE, "_wlm/query_group/{name}")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + DeleteQueryGroupRequest deleteQueryGroupRequest = new DeleteQueryGroupRequest(request.param("name")); + deleteQueryGroupRequest.clusterManagerNodeTimeout( + request.paramAsTime("cluster_manager_timeout", deleteQueryGroupRequest.clusterManagerNodeTimeout()) + ); + deleteQueryGroupRequest.timeout(request.paramAsTime("timeout", deleteQueryGroupRequest.timeout())); + return channel -> client.execute(DeleteQueryGroupAction.INSTANCE, deleteQueryGroupRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java index fe7080da78bbe..ba5161a2c855e 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java @@ -10,10 +10,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterManagerTaskThrottler.ThrottlingKey; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; @@ -24,6 +28,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.rest.RestStatus; import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; import org.opensearch.search.ResourceType; import java.util.Collection; @@ -38,6 +43,7 @@ public class QueryGroupPersistenceService { static final String SOURCE = "query-group-persistence-service"; private static final String CREATE_QUERY_GROUP_THROTTLING_KEY = "create-query-group"; + private static final String DELETE_QUERY_GROUP_THROTTLING_KEY = "delete-query-group"; private static final Logger logger = LogManager.getLogger(QueryGroupPersistenceService.class); /** * max QueryGroup count setting name @@ -65,6 +71,7 @@ public class QueryGroupPersistenceService { private final ClusterService clusterService; private volatile int maxQueryGroupCount; final ThrottlingKey createQueryGroupThrottlingKey; + final ThrottlingKey deleteQueryGroupThrottlingKey; /** * Constructor for QueryGroupPersistenceService @@ -81,6 +88,7 @@ public QueryGroupPersistenceService( ) { this.clusterService = clusterService; this.createQueryGroupThrottlingKey = clusterService.registerClusterManagerTask(CREATE_QUERY_GROUP_THROTTLING_KEY, true); + this.deleteQueryGroupThrottlingKey = clusterService.registerClusterManagerTask(DELETE_QUERY_GROUP_THROTTLING_KEY, true); setMaxQueryGroupCount(MAX_QUERY_GROUP_COUNT.get(settings)); clusterSettings.addSettingsUpdateConsumer(MAX_QUERY_GROUP_COUNT, this::setMaxQueryGroupCount); } @@ -212,6 +220,50 @@ public static Collection getFromClusterStateMetadata(String name, Cl .collect(Collectors.toList()); } + /** + * Modify cluster state to delete the QueryGroup + * @param deleteQueryGroupRequest - request to delete a QueryGroup + * @param listener - ActionListener for AcknowledgedResponse + */ + public void deleteInClusterStateMetadata( + DeleteQueryGroupRequest deleteQueryGroupRequest, + ActionListener listener + ) { + clusterService.submitStateUpdateTask(SOURCE, new AckedClusterStateUpdateTask<>(deleteQueryGroupRequest, listener) { + @Override + public ClusterState execute(ClusterState currentState) { + return deleteQueryGroupInClusterState(deleteQueryGroupRequest.getName(), currentState); + } + + @Override + public ClusterManagerTaskThrottler.ThrottlingKey getClusterManagerThrottlingKey() { + return deleteQueryGroupThrottlingKey; + } + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + }); + } + + /** + * Modify cluster state to delete the QueryGroup, and return the new cluster state + * @param name - the name for QueryGroup to be deleted + * @param currentClusterState - current cluster state + */ + ClusterState deleteQueryGroupInClusterState(final String name, final ClusterState currentClusterState) { + final Metadata metadata = currentClusterState.metadata(); + final QueryGroup queryGroupToRemove = metadata.queryGroups() + .values() + .stream() + .filter(queryGroup -> queryGroup.getName().equals(name)) + .findAny() + .orElseThrow(() -> new ResourceNotFoundException("No QueryGroup exists with the provided name: " + name)); + + return ClusterState.builder(currentClusterState).metadata(Metadata.builder(metadata).remove(queryGroupToRemove).build()).build(); + } + /** * maxQueryGroupCount getter */ diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java index 038f015713c5b..ecb9a6b2dc0d2 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java @@ -27,7 +27,7 @@ public class CreateQueryGroupResponseTests extends OpenSearchTestCase { /** - * Test case to verify the serialization and deserialization of CreateQueryGroupResponse. + * Test case to verify serialization and deserialization of CreateQueryGroupResponse. */ public void testSerialization() throws IOException { CreateQueryGroupResponse response = new CreateQueryGroupResponse(QueryGroupTestUtils.queryGroupOne, RestStatus.OK); @@ -46,7 +46,7 @@ public void testSerialization() throws IOException { } /** - * Test case to verify the toXContent method of CreateQueryGroupResponse. + * Test case to validate the toXContent method of CreateQueryGroupResponse. */ public void testToXContentCreateQueryGroup() throws IOException { XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java new file mode 100644 index 0000000000000..bc2e4f0faca4c --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class DeleteQueryGroupRequestTests extends OpenSearchTestCase { + + /** + * Test case to verify the serialization and deserialization of DeleteQueryGroupRequest. + */ + public void testSerialization() throws IOException { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest(QueryGroupTestUtils.NAME_ONE); + assertEquals(QueryGroupTestUtils.NAME_ONE, request.getName()); + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + DeleteQueryGroupRequest otherRequest = new DeleteQueryGroupRequest(streamInput); + assertEquals(request.getName(), otherRequest.getName()); + } + + /** + * Test case to validate a DeleteQueryGroupRequest. + */ + public void testSerializationWithNull() throws IOException { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest((String) null); + ActionRequestValidationException actionRequestValidationException = request.validate(); + assertFalse(actionRequestValidationException.getMessage().isEmpty()); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java new file mode 100644 index 0000000000000..253d65f8da80f --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class TransportDeleteQueryGroupActionTests extends OpenSearchTestCase { + + ClusterService clusterService = mock(ClusterService.class); + TransportService transportService = mock(TransportService.class); + ActionFilters actionFilters = mock(ActionFilters.class); + ThreadPool threadPool = mock(ThreadPool.class); + IndexNameExpressionResolver indexNameExpressionResolver = mock(IndexNameExpressionResolver.class); + QueryGroupPersistenceService queryGroupPersistenceService = mock(QueryGroupPersistenceService.class); + + TransportDeleteQueryGroupAction action = new TransportDeleteQueryGroupAction( + clusterService, + transportService, + actionFilters, + threadPool, + indexNameExpressionResolver, + queryGroupPersistenceService + ); + + /** + * Test case to validate the construction for TransportDeleteQueryGroupAction + */ + public void testConstruction() { + assertNotNull(action); + assertEquals(ThreadPool.Names.SAME, action.executor()); + } + + /** + * Test case to validate the clusterManagerOperation function in TransportDeleteQueryGroupAction + */ + public void testClusterManagerOperation() throws Exception { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest("testGroup"); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + ClusterState clusterState = mock(ClusterState.class); + action.clusterManagerOperation(request, clusterState, listener); + verify(queryGroupPersistenceService).deleteInClusterStateMetadata(eq(request), eq(listener)); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java new file mode 100644 index 0000000000000..72191e076bb87 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.List; + +import org.mockito.ArgumentCaptor; + +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; +import static org.opensearch.rest.RestRequest.Method.DELETE; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + +public class RestDeleteQueryGroupActionTests extends OpenSearchTestCase { + /** + * Test case to validate the construction for RestDeleteQueryGroupAction + */ + public void testConstruction() { + RestDeleteQueryGroupAction action = new RestDeleteQueryGroupAction(); + assertNotNull(action); + assertEquals("delete_query_group", action.getName()); + List routes = action.routes(); + assertEquals(1, routes.size()); + RestHandler.Route route = routes.get(0); + assertEquals(DELETE, route.getMethod()); + assertEquals("_wlm/query_group/{name}", route.getPath()); + } + + /** + * Test case to validate the prepareRequest logic for RestDeleteQueryGroupAction + */ + @SuppressWarnings("unchecked") + public void testPrepareRequest() throws Exception { + RestDeleteQueryGroupAction restDeleteQueryGroupAction = new RestDeleteQueryGroupAction(); + NodeClient nodeClient = mock(NodeClient.class); + RestRequest realRequest = new FakeRestRequest(); + realRequest.params().put("name", NAME_ONE); + ; + RestRequest spyRequest = spy(realRequest); + + doReturn(TimeValue.timeValueSeconds(30)).when(spyRequest).paramAsTime(eq("cluster_manager_timeout"), any(TimeValue.class)); + doReturn(TimeValue.timeValueSeconds(60)).when(spyRequest).paramAsTime(eq("timeout"), any(TimeValue.class)); + + CheckedConsumer consumer = restDeleteQueryGroupAction.prepareRequest(spyRequest, nodeClient); + assertNotNull(consumer); + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(DeleteQueryGroupRequest.class); + ArgumentCaptor> listenerCaptor = ArgumentCaptor.forClass(RestToXContentListener.class); + doNothing().when(nodeClient).execute(eq(DeleteQueryGroupAction.INSTANCE), requestCaptor.capture(), listenerCaptor.capture()); + + consumer.accept(mock(RestChannel.class)); + DeleteQueryGroupRequest capturedRequest = requestCaptor.getValue(); + assertEquals(NAME_ONE, capturedRequest.getName()); + assertEquals(TimeValue.timeValueSeconds(30), capturedRequest.clusterManagerNodeTimeout()); + assertEquals(TimeValue.timeValueSeconds(60), capturedRequest.timeout()); + verify(nodeClient).execute( + eq(DeleteQueryGroupAction.INSTANCE), + any(DeleteQueryGroupRequest.class), + any(RestToXContentListener.class) + ); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java index 2aa3b9e168852..a516ffdde839e 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java @@ -8,6 +8,9 @@ package org.opensearch.plugin.wlm.service; +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; @@ -20,6 +23,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.plugin.wlm.QueryGroupTestUtils; import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; import org.opensearch.search.ResourceType; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; @@ -39,6 +43,7 @@ import static org.opensearch.plugin.wlm.QueryGroupTestUtils.MONITOR_STRING; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_NONE_EXISTED; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_TWO; import static org.opensearch.plugin.wlm.QueryGroupTestUtils._ID_ONE; import static org.opensearch.plugin.wlm.QueryGroupTestUtils._ID_TWO; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.assertEqualQueryGroups; @@ -48,12 +53,14 @@ import static org.opensearch.plugin.wlm.QueryGroupTestUtils.preparePersistenceServiceSetup; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupList; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupOne; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupPersistenceService; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupTwo; import static org.opensearch.plugin.wlm.service.QueryGroupPersistenceService.QUERY_GROUP_COUNT_SETTING_NAME; import static org.opensearch.plugin.wlm.service.QueryGroupPersistenceService.SOURCE; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.argThat; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -298,4 +305,55 @@ public void testMaxQueryGroupCount() { queryGroupPersistenceService.setMaxQueryGroupCount(50); assertEquals(50, queryGroupPersistenceService.getMaxQueryGroupCount()); } + + /** + * Tests delete a single QueryGroup + */ + public void testDeleteSingleQueryGroup() { + ClusterState newClusterState = queryGroupPersistenceService().deleteQueryGroupInClusterState(NAME_TWO, clusterState()); + Map afterDeletionGroups = newClusterState.getMetadata().queryGroups(); + assertFalse(afterDeletionGroups.containsKey(_ID_TWO)); + assertEquals(1, afterDeletionGroups.size()); + List oldQueryGroups = new ArrayList<>(); + oldQueryGroups.add(queryGroupOne); + assertEqualQueryGroups(new ArrayList<>(afterDeletionGroups.values()), oldQueryGroups); + } + + /** + * Tests delete a QueryGroup with invalid name + */ + public void testDeleteNonExistedQueryGroup() { + assertThrows( + ResourceNotFoundException.class, + () -> queryGroupPersistenceService().deleteQueryGroupInClusterState(NAME_NONE_EXISTED, clusterState()) + ); + } + + /** + * Tests DeleteInClusterStateMetadata function + */ + @SuppressWarnings("unchecked") + public void testDeleteInClusterStateMetadata() throws Exception { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest(NAME_ONE); + ClusterService clusterService = mock(ClusterService.class); + + ActionListener listener = mock(ActionListener.class); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + QueryGroupTestUtils.settings(), + clusterSettings() + ); + doAnswer(invocation -> { + AckedClusterStateUpdateTask task = invocation.getArgument(1); + ClusterState initialState = clusterState(); + ClusterState newState = task.execute(initialState); + assertNotNull(newState); + assertEquals(queryGroupPersistenceService.deleteQueryGroupThrottlingKey, task.getClusterManagerThrottlingKey()); + task.onAllNodesAcked(null); + verify(listener).onResponse(argThat(response -> response.isAcknowledged())); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any()); + queryGroupPersistenceService.deleteInClusterStateMetadata(request, listener); + verify(clusterService).submitStateUpdateTask(eq(SOURCE), any(AckedClusterStateUpdateTask.class)); + } } diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json new file mode 100644 index 0000000000000..16930427fc2fe --- /dev/null +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json @@ -0,0 +1,22 @@ +{ + "delete_query_group_context": { + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_wlm/query_group/{name}", + "methods": [ + "DELETE" + ], + "parts": { + "name": { + "type": "string", + "description": "QueryGroup name" + } + } + } + ] + }, + "params":{} + } +} diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml index a22dfa2f4477e..a00314986a5cf 100644 --- a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml @@ -106,3 +106,9 @@ - match: { query_groups.0.resiliency_mode: "monitor" } - match: { query_groups.0.resource_limits.cpu: 0.35 } - match: { query_groups.0.resource_limits.memory: 0.25 } + + - do: + delete_query_group_context: + name: "analytics2" + + - match: { acknowledged: true } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 4da6c68b40733..6163fd624c838 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -1397,7 +1397,14 @@ public Builder put(final QueryGroup queryGroup) { return queryGroups(existing); } - public Map getQueryGroups() { + public Builder remove(final QueryGroup queryGroup) { + Objects.requireNonNull(queryGroup, "queryGroup should not be null"); + Map existing = new HashMap<>(getQueryGroups()); + existing.remove(queryGroup.get_id()); + return queryGroups(existing); + } + + private Map getQueryGroups() { return Optional.ofNullable(this.customs.get(QueryGroupMetadata.TYPE)) .map(o -> (QueryGroupMetadata) o) .map(QueryGroupMetadata::queryGroups) @@ -1830,9 +1837,7 @@ static void validateDataStreams(SortedMap indicesLooku if (dsMetadata != null) { for (DataStream ds : dsMetadata.dataStreams().values()) { String prefix = DataStream.BACKING_INDEX_PREFIX + ds.getName() + "-"; - Set conflicts = indicesLookup.subMap(prefix, DataStream.BACKING_INDEX_PREFIX + ds.getName() + ".") // '.' is the - // char after - // '-' + Set conflicts = indicesLookup.subMap(prefix, DataStream.BACKING_INDEX_PREFIX + ds.getName() + ".") .keySet() .stream() .filter(s -> NUMBER_PATTERN.matcher(s.substring(prefix.length())).matches()) From 9e5604b924fc3ad9f8fb61b8782d67f2fc720eb2 Mon Sep 17 00:00:00 2001 From: Sarthak Aggarwal Date: Fri, 23 Aug 2024 17:05:21 +0530 Subject: [PATCH 02/21] [Star Tree] Lucene Abstractions for Star Tree File Formats (#15278) --------- Signed-off-by: Sarthak Aggarwal --- .../Lucene90DocValuesConsumerWrapper.java | 46 +++++++ .../Lucene90DocValuesProducerWrapper.java | 46 +++++++ .../SortedNumericDocValuesWriterWrapper.java | 53 ++++++++ .../composite/CompositeCodecFactory.java | 5 + .../LuceneDocValuesConsumerFactory.java | 50 +++++++ .../LuceneDocValuesProducerFactory.java | 60 +++++++++ .../{ => composite99}/Composite99Codec.java | 2 +- .../Composite99DocValuesFormat.java | 2 +- .../Composite99DocValuesReader.java | 5 +- .../Composite99DocValuesWriter.java | 5 +- .../composite/composite99/package-info.java | 12 ++ .../services/org.apache.lucene.codecs.Codec | 2 +- .../opensearch/index/codec/CodecTests.java | 2 +- .../LuceneDocValuesConsumerFactoryTests.java | 85 ++++++++++++ .../LuceneDocValuesProducerFactoryTests.java | 124 ++++++++++++++++++ ...tedNumericDocValuesWriterWrapperTests.java | 94 +++++++++++++ .../StarTreeDocValuesFormatTests.java | 2 +- 17 files changed, 588 insertions(+), 7 deletions(-) create mode 100644 server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java create mode 100644 server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java create mode 100644 server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactory.java create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactory.java rename server/src/main/java/org/opensearch/index/codec/composite/{ => composite99}/Composite99Codec.java (97%) rename server/src/main/java/org/opensearch/index/codec/composite/{ => composite99}/Composite99DocValuesFormat.java (97%) rename server/src/main/java/org/opensearch/index/codec/composite/{ => composite99}/Composite99DocValuesReader.java (91%) rename server/src/main/java/org/opensearch/index/codec/composite/{ => composite99}/Composite99DocValuesWriter.java (97%) create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/composite99/package-info.java create mode 100644 server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactoryTests.java create mode 100644 server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactoryTests.java create mode 100644 server/src/test/java/org/opensearch/index/codec/composite/SortedNumericDocValuesWriterWrapperTests.java diff --git a/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java new file mode 100644 index 0000000000000..67ee45f4c9306 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.codecs.lucene90; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.index.SegmentWriteState; + +import java.io.Closeable; +import java.io.IOException; + +/** + * This class is an abstraction of the {@link DocValuesConsumer} for the Star Tree index structure. + * It is responsible to consume various types of document values (numeric, binary, sorted, sorted numeric, + * and sorted set) for fields in the Star Tree index. + * + * @opensearch.experimental + */ +public class Lucene90DocValuesConsumerWrapper implements Closeable { + + private final Lucene90DocValuesConsumer lucene90DocValuesConsumer; + + public Lucene90DocValuesConsumerWrapper( + SegmentWriteState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + lucene90DocValuesConsumer = new Lucene90DocValuesConsumer(state, dataCodec, dataExtension, metaCodec, metaExtension); + } + + public Lucene90DocValuesConsumer getLucene90DocValuesConsumer() { + return lucene90DocValuesConsumer; + } + + @Override + public void close() throws IOException { + lucene90DocValuesConsumer.close(); + } +} diff --git a/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java new file mode 100644 index 0000000000000..a213852c59094 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.codecs.lucene90; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.SegmentReadState; + +import java.io.Closeable; +import java.io.IOException; + +/** + * This class is a custom abstraction of the {@link DocValuesProducer} for the Star Tree index structure. + * It is responsible for providing access to various types of document values (numeric, binary, sorted, sorted numeric, + * and sorted set) for fields in the Star Tree index. + * + * @opensearch.experimental + */ +public class Lucene90DocValuesProducerWrapper implements Closeable { + + private final Lucene90DocValuesProducer lucene90DocValuesProducer; + + public Lucene90DocValuesProducerWrapper( + SegmentReadState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + lucene90DocValuesProducer = new Lucene90DocValuesProducer(state, dataCodec, dataExtension, metaCodec, metaExtension); + } + + public DocValuesProducer getLucene90DocValuesProducer() { + return lucene90DocValuesProducer; + } + + @Override + public void close() throws IOException { + lucene90DocValuesProducer.close(); + } +} diff --git a/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java b/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java new file mode 100644 index 0000000000000..f7759fcced284 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.index; + +import org.apache.lucene.util.Counter; + +/** + * A wrapper class for writing sorted numeric doc values. + *

+ * This class provides a convenient way to add sorted numeric doc values to a field + * and retrieve the corresponding {@link SortedNumericDocValues} instance. + * + * @opensearch.experimental + */ +public class SortedNumericDocValuesWriterWrapper { + + private final SortedNumericDocValuesWriter sortedNumericDocValuesWriter; + + /** + * Sole constructor. Constructs a new {@link SortedNumericDocValuesWriterWrapper} instance. + * + * @param fieldInfo the field information for the field being written + * @param counter a counter for tracking memory usage + */ + public SortedNumericDocValuesWriterWrapper(FieldInfo fieldInfo, Counter counter) { + sortedNumericDocValuesWriter = new SortedNumericDocValuesWriter(fieldInfo, counter); + } + + /** + * Adds a value to the sorted numeric doc values for the specified document. + * + * @param docID the document ID + * @param value the value to add + */ + public void addValue(int docID, long value) { + sortedNumericDocValuesWriter.addValue(docID, value); + } + + /** + * Returns the {@link SortedNumericDocValues} instance containing the sorted numeric doc values + * + * @return the {@link SortedNumericDocValues} instance + */ + public SortedNumericDocValues getDocValues() { + return sortedNumericDocValuesWriter.getDocValues(); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java b/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java index 3acedc6a27d7f..99691d7061ac9 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java @@ -12,6 +12,7 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene99.Lucene99Codec; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; import org.opensearch.index.mapper.MapperService; import java.util.HashMap; @@ -29,6 +30,10 @@ */ @ExperimentalApi public class CompositeCodecFactory { + + // we can use this to track the latest composite codec + public static final String COMPOSITE_CODEC = Composite99Codec.COMPOSITE_INDEX_CODEC_NAME; + public CompositeCodecFactory() {} public Map getCompositeIndexCodecs(MapperService mapperService, Logger logger) { diff --git a/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactory.java b/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactory.java new file mode 100644 index 0000000000000..1ed672870337e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactory.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.codecs.lucene90.Lucene90DocValuesConsumerWrapper; +import org.apache.lucene.index.SegmentWriteState; + +import java.io.IOException; + +/** + * A factory class that provides a factory method for creating {@link DocValuesConsumer} instances + * for the latest composite codec. + *

+ * The segments are written using the latest composite codec. The codec + * internally manages calling the appropriate consumer factory for its abstractions. + *

+ * This design ensures forward compatibility for writing operations + * + * @opensearch.experimental + */ +public class LuceneDocValuesConsumerFactory { + + public static DocValuesConsumer getDocValuesConsumerForCompositeCodec( + SegmentWriteState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + try ( + Lucene90DocValuesConsumerWrapper lucene90DocValuesConsumerWrapper = new Lucene90DocValuesConsumerWrapper( + state, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ) + ) { + return lucene90DocValuesConsumerWrapper.getLucene90DocValuesConsumer(); + } + } + +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactory.java b/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactory.java new file mode 100644 index 0000000000000..611a97ffeb834 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactory.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.codecs.lucene90.Lucene90DocValuesProducerWrapper; +import org.apache.lucene.index.SegmentReadState; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; + +import java.io.IOException; + +/** + * A factory class that provides a factory method for creating {@link DocValuesProducer} instances + * based on the specified composite codec. + *

+ * In producers, we want to ensure compatibility with older codec versions during the segment reads. + * This approach allows for writing with only the latest codec while maintaining + * the ability to read data encoded with any codec version present in the segment. + *

+ * This design ensures backward compatibility for reads across different codec versions. + * + * @opensearch.experimental + */ +public class LuceneDocValuesProducerFactory { + + public static DocValuesProducer getDocValuesProducerForCompositeCodec( + String compositeCodec, + SegmentReadState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + + switch (compositeCodec) { + case Composite99Codec.COMPOSITE_INDEX_CODEC_NAME: + try ( + Lucene90DocValuesProducerWrapper lucene90DocValuesProducerWrapper = new Lucene90DocValuesProducerWrapper( + state, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ) + ) { + return lucene90DocValuesProducerWrapper.getLucene90DocValuesProducer(); + } + default: + throw new IllegalStateException("Invalid composite codec " + "[" + compositeCodec + "]"); + } + + } + +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99Codec.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99Codec.java similarity index 97% rename from server/src/main/java/org/opensearch/index/codec/composite/Composite99Codec.java rename to server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99Codec.java index de04944e67cd2..8422932e937c2 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99Codec.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99Codec.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.index.codec.composite; +package org.opensearch.index.codec.composite.composite99; import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesFormat.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesFormat.java similarity index 97% rename from server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesFormat.java rename to server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesFormat.java index 216ed4f68f333..e8c69b11b7c88 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesFormat.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesFormat.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.index.codec.composite; +package org.opensearch.index.codec.composite.composite99; import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.codecs.DocValuesFormat; diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesReader.java similarity index 91% rename from server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java rename to server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesReader.java index df5008a7f294e..e3bfe01cfa2d5 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesReader.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.index.codec.composite; +package org.opensearch.index.codec.composite.composite99; import org.apache.lucene.codecs.DocValuesProducer; import org.apache.lucene.index.BinaryDocValues; @@ -17,6 +17,9 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.codec.composite.CompositeIndexValues; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesWriter.java similarity index 97% rename from server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java rename to server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesWriter.java index 6ed1a8c42e380..da784e1232800 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesWriter.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.index.codec.composite; +package org.opensearch.index.codec.composite.composite99; import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.codecs.DocValuesProducer; @@ -18,6 +18,9 @@ import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.SortedNumericDocValues; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.codec.composite.CompositeIndexValues; import org.opensearch.index.codec.composite.datacube.startree.StarTreeValues; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.builder.StarTreesBuilder; diff --git a/server/src/main/java/org/opensearch/index/codec/composite/composite99/package-info.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/package-info.java new file mode 100644 index 0000000000000..3d6f130b9a7c8 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Responsible for handling all composite index codecs and operations associated with Composite99 codec + */ +package org.opensearch.index.codec.composite.composite99; diff --git a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec index e030a813373c1..f51452c57f975 100644 --- a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec +++ b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec @@ -1 +1 @@ -org.opensearch.index.codec.composite.Composite99Codec +org.opensearch.index.codec.composite.composite99.Composite99Codec diff --git a/server/src/test/java/org/opensearch/index/codec/CodecTests.java b/server/src/test/java/org/opensearch/index/codec/CodecTests.java index 7146b7dc51753..bbf98b5c32918 100644 --- a/server/src/test/java/org/opensearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/opensearch/index/codec/CodecTests.java @@ -48,7 +48,7 @@ import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.IndexAnalyzers; -import org.opensearch.index.codec.composite.Composite99Codec; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.similarity.SimilarityService; diff --git a/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactoryTests.java b/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactoryTests.java new file mode 100644 index 0000000000000..7fb8fe7f68f45 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactoryTests.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.SegmentInfo; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.InfoStream; +import org.apache.lucene.util.Version; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.UUID; + +public class LuceneDocValuesConsumerFactoryTests extends OpenSearchTestCase { + + private Directory directory; + private final String dataCodec = "data_codec"; + private final String dataExtension = "data_extension"; + private final String metaCodec = "meta_codec"; + private final String metaExtension = "meta_extension"; + + @Before + public void setup() { + directory = newDirectory(); + } + + public void testGetDocValuesConsumerForCompositeCodec() throws IOException { + SegmentInfo segmentInfo = new SegmentInfo( + directory, + Version.LATEST, + Version.LUCENE_9_11_0, + "test_segment", + randomInt(), + false, + false, + new Lucene99Codec(), + new HashMap<>(), + UUID.randomUUID().toString().substring(0, 16).getBytes(StandardCharsets.UTF_8), + new HashMap<>(), + null + ); + SegmentWriteState state = new SegmentWriteState( + InfoStream.getDefault(), + segmentInfo.dir, + segmentInfo, + new FieldInfos(new FieldInfo[0]), + null, + newIOContext(random()) + ); + + DocValuesConsumer consumer = LuceneDocValuesConsumerFactory.getDocValuesConsumerForCompositeCodec( + state, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ); + + assertEquals("org.apache.lucene.codecs.lucene90.Lucene90DocValuesConsumer", consumer.getClass().getName()); + assertEquals(CompositeCodecFactory.COMPOSITE_CODEC, Composite99Codec.COMPOSITE_INDEX_CODEC_NAME); + consumer.close(); + } + + @After + public void teardown() throws Exception { + super.tearDown(); + directory.close(); + } +} diff --git a/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactoryTests.java b/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactoryTests.java new file mode 100644 index 0000000000000..55d637dfb9cae --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactoryTests.java @@ -0,0 +1,124 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.SegmentInfo; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.InfoStream; +import org.apache.lucene.util.Version; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.UUID; + +import static org.mockito.Mockito.mock; + +public class LuceneDocValuesProducerFactoryTests extends OpenSearchTestCase { + + private Directory directory; + private final String dataCodec = "data_codec"; + private final String dataExtension = "data_extension"; + private final String metaCodec = "meta_codec"; + private final String metaExtension = "meta_extension"; + + @Before + public void setup() { + directory = newDirectory(); + } + + public void testGetDocValuesProducerForCompositeCodec99() throws IOException { + SegmentInfo segmentInfo = new SegmentInfo( + directory, + Version.LATEST, + Version.LUCENE_9_11_0, + "test_segment", + randomInt(), + false, + false, + new Lucene99Codec(), + new HashMap<>(), + UUID.randomUUID().toString().substring(0, 16).getBytes(StandardCharsets.UTF_8), + new HashMap<>(), + null + ); + + // open an consumer first in order for the producer to find the file + SegmentWriteState state = new SegmentWriteState( + InfoStream.getDefault(), + segmentInfo.dir, + segmentInfo, + new FieldInfos(new FieldInfo[0]), + null, + newIOContext(random()) + ); + DocValuesConsumer consumer = LuceneDocValuesConsumerFactory.getDocValuesConsumerForCompositeCodec( + state, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ); + consumer.close(); + + SegmentReadState segmentReadState = new SegmentReadState( + segmentInfo.dir, + segmentInfo, + new FieldInfos(new FieldInfo[0]), + newIOContext(random()) + ); + DocValuesProducer producer = LuceneDocValuesProducerFactory.getDocValuesProducerForCompositeCodec( + Composite99Codec.COMPOSITE_INDEX_CODEC_NAME, + segmentReadState, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ); + + assertNotNull(producer); + assertEquals("org.apache.lucene.codecs.lucene90.Lucene90DocValuesProducer", producer.getClass().getName()); + producer.close(); + } + + public void testGetDocValuesProducerForCompositeCodec_InvalidCodec() { + SegmentReadState mockSegmentReadState = mock(SegmentReadState.class); + + IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { + LuceneDocValuesProducerFactory.getDocValuesProducerForCompositeCodec( + "invalid_codec", + mockSegmentReadState, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ); + }); + + assertNotNull(exception); + assertTrue(exception.getMessage().contains("Invalid composite codec")); + } + + @After + public void teardown() throws Exception { + super.tearDown(); + directory.close(); + } +} diff --git a/server/src/test/java/org/opensearch/index/codec/composite/SortedNumericDocValuesWriterWrapperTests.java b/server/src/test/java/org/opensearch/index/codec/composite/SortedNumericDocValuesWriterWrapperTests.java new file mode 100644 index 0000000000000..54eead20ef354 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/composite/SortedNumericDocValuesWriterWrapperTests.java @@ -0,0 +1,94 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedNumericDocValuesWriterWrapper; +import org.apache.lucene.index.VectorEncoding; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.apache.lucene.util.Counter; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Collections; + +public class SortedNumericDocValuesWriterWrapperTests extends OpenSearchTestCase { + + private SortedNumericDocValuesWriterWrapper wrapper; + private FieldInfo fieldInfo; + private Counter counter; + + @Override + public void setUp() throws Exception { + super.setUp(); + fieldInfo = new FieldInfo( + "field", + 1, + false, + false, + true, + IndexOptions.NONE, + DocValuesType.NONE, + -1, + Collections.emptyMap(), + 0, + 0, + 0, + 0, + VectorEncoding.FLOAT32, + VectorSimilarityFunction.EUCLIDEAN, + false, + false + ); + counter = Counter.newCounter(); + wrapper = new SortedNumericDocValuesWriterWrapper(fieldInfo, counter); + } + + public void testAddValue() throws IOException { + wrapper.addValue(0, 10); + wrapper.addValue(1, 20); + wrapper.addValue(2, 30); + + SortedNumericDocValues docValues = wrapper.getDocValues(); + assertNotNull(docValues); + + assertEquals(0, docValues.nextDoc()); + assertEquals(10, docValues.nextValue()); + assertEquals(1, docValues.nextDoc()); + assertEquals(20, docValues.nextValue()); + assertEquals(2, docValues.nextDoc()); + assertEquals(30, docValues.nextValue()); + } + + public void testGetDocValues() { + SortedNumericDocValues docValues = wrapper.getDocValues(); + assertNotNull(docValues); + } + + public void testMultipleValues() throws IOException { + wrapper.addValue(0, 10); + wrapper.addValue(0, 20); + wrapper.addValue(1, 30); + + SortedNumericDocValues docValues = wrapper.getDocValues(); + assertNotNull(docValues); + + assertEquals(0, docValues.nextDoc()); + assertEquals(10, docValues.nextValue()); + assertEquals(20, docValues.nextValue()); + assertThrows(IllegalStateException.class, docValues::nextValue); + + assertEquals(1, docValues.nextDoc()); + assertEquals(30, docValues.nextValue()); + assertThrows(IllegalStateException.class, docValues::nextValue); + } +} diff --git a/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java b/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java index 6fa88215cad48..54a9cc035d7a9 100644 --- a/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java +++ b/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java @@ -29,7 +29,7 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.MapperTestUtils; -import org.opensearch.index.codec.composite.Composite99Codec; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.IndicesModule; import org.junit.After; From dac646011f6c630f291fd10c1609dc3206480943 Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Fri, 23 Aug 2024 19:48:16 +0530 Subject: [PATCH 03/21] [Star tree] Changes to handle derived metrics such as avg as part of star tree mapping (#15152) --------- Signed-off-by: Bharathwaj G --- .../index/mapper/StarTreeMapperIT.java | 20 +- .../compositeindex/datacube/MetricStat.java | 27 ++- .../startree/StarTreeIndexSettings.java | 13 +- .../startree/builder/BaseStarTreeBuilder.java | 3 + .../index/mapper/StarTreeMapper.java | 48 ++++- .../index/mapper/StarTreeMapperTests.java | 183 ++++++++++++++++-- 6 files changed, 247 insertions(+), 47 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java index 6f5b4bba481dd..c461f83657340 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java @@ -265,13 +265,9 @@ public void testValidCompositeIndex() { assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("numeric_dv", starTreeFieldType.getDimensions().get(1).getField()); assertEquals("numeric_dv", starTreeFieldType.getMetrics().get(0).getField()); - List expectedMetrics = Arrays.asList( - MetricStat.AVG, - MetricStat.VALUE_COUNT, - MetricStat.SUM, - MetricStat.MAX, - MetricStat.MIN - ); + + // Assert default metrics + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals( @@ -349,13 +345,9 @@ public void testUpdateIndexWhenMappingIsSame() { assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("numeric_dv", starTreeFieldType.getDimensions().get(1).getField()); assertEquals("numeric_dv", starTreeFieldType.getMetrics().get(0).getField()); - List expectedMetrics = Arrays.asList( - MetricStat.AVG, - MetricStat.VALUE_COUNT, - MetricStat.SUM, - MetricStat.MAX, - MetricStat.MIN - ); + + // Assert default metrics + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals( diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java index df3b2229d2c5b..84eaaeb637962 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java @@ -10,6 +10,9 @@ import org.opensearch.common.annotation.ExperimentalApi; +import java.util.Arrays; +import java.util.List; + /** * Supported metric types for composite index * @@ -18,21 +21,39 @@ @ExperimentalApi public enum MetricStat { VALUE_COUNT("value_count"), - AVG("avg"), SUM("sum"), MIN("min"), - MAX("max"); + MAX("max"), + AVG("avg", VALUE_COUNT, SUM); private final String typeName; + private final MetricStat[] baseMetrics; - MetricStat(String typeName) { + MetricStat(String typeName, MetricStat... baseMetrics) { this.typeName = typeName; + this.baseMetrics = baseMetrics; } public String getTypeName() { return typeName; } + /** + * Return the list of metrics that this metric is derived from + * For example, AVG is derived from COUNT and SUM + */ + public List getBaseMetrics() { + return Arrays.asList(baseMetrics); + } + + /** + * Return true if this metric is derived from other metrics + * For example, AVG is derived from COUNT and SUM + */ + public boolean isDerivedMetric() { + return baseMetrics != null && baseMetrics.length > 0; + } + public static MetricStat fromTypeName(String typeName) { for (MetricStat metric : MetricStat.values()) { if (metric.getTypeName().equalsIgnoreCase(typeName)) { diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java index 6535f8ed11da3..ce389a99b3626 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java @@ -15,6 +15,7 @@ import java.util.Arrays; import java.util.List; +import java.util.function.Function; /** * Index settings for star tree fields. The settings are final as right now @@ -93,16 +94,10 @@ public class StarTreeIndexSettings { /** * Default metrics for metrics as part of star tree fields */ - public static final Setting> DEFAULT_METRICS_LIST = Setting.listSetting( + public static final Setting> DEFAULT_METRICS_LIST = Setting.listSetting( "index.composite_index.star_tree.field.default.metrics", - Arrays.asList( - MetricStat.AVG.toString(), - MetricStat.VALUE_COUNT.toString(), - MetricStat.SUM.toString(), - MetricStat.MAX.toString(), - MetricStat.MIN.toString() - ), - MetricStat::fromTypeName, + Arrays.asList(MetricStat.VALUE_COUNT.toString(), MetricStat.SUM.toString()), + Function.identity(), Setting.Property.IndexScope, Setting.Property.Final ); diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java index 90b2d0727d572..3fc8d24e6e0d2 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java @@ -118,6 +118,9 @@ public List generateMetricAggregatorInfos(MapperService ma List metricAggregatorInfos = new ArrayList<>(); for (Metric metric : this.starTreeField.getMetrics()) { for (MetricStat metricStat : metric.getMetrics()) { + if (metricStat.isDerivedMetric()) { + continue; + } IndexNumericFieldData.NumericType numericType; Mapper fieldMapper = mapperService.documentMapper().mappers().getMapper(metric.getField()); if (fieldMapper instanceof NumberFieldMapper) { diff --git a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java index d9539f9dc0c82..93764e93ae30d 100644 --- a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java @@ -28,6 +28,7 @@ import java.util.Locale; import java.util.Map; import java.util.Optional; +import java.util.Queue; import java.util.Set; import java.util.stream.Collectors; @@ -262,17 +263,50 @@ private Metric getMetric(String name, Map metric, Mapper.TypePar .collect(Collectors.toList()); metric.remove(STATS); if (metricStrings.isEmpty()) { - metricTypes = new ArrayList<>(StarTreeIndexSettings.DEFAULT_METRICS_LIST.get(context.getSettings())); - } else { - Set metricSet = new LinkedHashSet<>(); - for (String metricString : metricStrings) { - metricSet.add(MetricStat.fromTypeName(metricString)); - } - metricTypes = new ArrayList<>(metricSet); + metricStrings = new ArrayList<>(StarTreeIndexSettings.DEFAULT_METRICS_LIST.get(context.getSettings())); + } + // Add all required metrics initially + Set metricSet = new LinkedHashSet<>(); + for (String metricString : metricStrings) { + MetricStat metricStat = MetricStat.fromTypeName(metricString); + metricSet.add(metricStat); + addBaseMetrics(metricStat, metricSet); } + addEligibleDerivedMetrics(metricSet); + metricTypes = new ArrayList<>(metricSet); return new Metric(name, metricTypes); } + /** + * Add base metrics of derived metric to metric set + */ + private void addBaseMetrics(MetricStat metricStat, Set metricSet) { + if (metricStat.isDerivedMetric()) { + Queue metricQueue = new LinkedList<>(metricStat.getBaseMetrics()); + while (metricQueue.isEmpty() == false) { + MetricStat metric = metricQueue.poll(); + if (metric.isDerivedMetric() && !metricSet.contains(metric)) { + metricQueue.addAll(metric.getBaseMetrics()); + } + metricSet.add(metric); + } + } + } + + /** + * Add derived metrics if all associated base metrics are present + */ + private void addEligibleDerivedMetrics(Set metricStats) { + for (MetricStat metric : MetricStat.values()) { + if (metric.isDerivedMetric() && !metricStats.contains(metric)) { + List sourceMetrics = metric.getBaseMetrics(); + if (metricStats.containsAll(sourceMetrics)) { + metricStats.add(metric); + } + } + } + } + @Override protected List> getParameters() { return List.of(config); diff --git a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java index 3fa97825cdfc6..6b3b87da89915 100644 --- a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java @@ -52,7 +52,7 @@ public void teardown() { } public void testValidStarTree() throws IOException { - MapperService mapperService = createMapperService(getExpandedMapping("status", "size")); + MapperService mapperService = createMapperService(getExpandedMappingWithJustAvg("status", "size")); Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); for (CompositeMappedFieldType type : compositeFieldTypes) { StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) type; @@ -66,7 +66,65 @@ public void testValidStarTree() throws IOException { assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); assertEquals("size", starTreeFieldType.getMetrics().get(0).getField()); - List expectedMetrics = Arrays.asList(MetricStat.SUM, MetricStat.AVG); + + // Assert COUNT and SUM gets added when AVG is defined + List expectedMetrics = Arrays.asList(MetricStat.AVG, MetricStat.VALUE_COUNT, MetricStat.SUM); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + assertEquals(100, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); + assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); + assertEquals( + new HashSet<>(Arrays.asList("@timestamp", "status")), + starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims() + ); + } + } + + public void testMetricsWithJustSum() throws IOException { + MapperService mapperService = createMapperService(getExpandedMappingWithJustSum("status", "size")); + Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); + for (CompositeMappedFieldType type : compositeFieldTypes) { + StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) type; + assertEquals("@timestamp", starTreeFieldType.getDimensions().get(0).getField()); + assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); + DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); + List expectedTimeUnits = Arrays.asList( + Rounding.DateTimeUnit.DAY_OF_MONTH, + Rounding.DateTimeUnit.MONTH_OF_YEAR + ); + assertEquals(expectedTimeUnits, dateDim.getIntervals()); + assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals("size", starTreeFieldType.getMetrics().get(0).getField()); + + // Assert AVG gets added when both of its base metrics is already present + List expectedMetrics = List.of(MetricStat.SUM); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + assertEquals(100, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); + assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); + assertEquals( + new HashSet<>(Arrays.asList("@timestamp", "status")), + starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims() + ); + } + } + + public void testMetricsWithCountAndSum() throws IOException { + MapperService mapperService = createMapperService(getExpandedMappingWithSumAndCount("status", "size")); + Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); + for (CompositeMappedFieldType type : compositeFieldTypes) { + StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) type; + assertEquals("@timestamp", starTreeFieldType.getDimensions().get(0).getField()); + assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); + DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); + List expectedTimeUnits = Arrays.asList( + Rounding.DateTimeUnit.DAY_OF_MONTH, + Rounding.DateTimeUnit.MONTH_OF_YEAR + ); + assertEquals(expectedTimeUnits, dateDim.getIntervals()); + assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals("size", starTreeFieldType.getMetrics().get(0).getField()); + + // Assert AVG gets added when both of its base metrics is already present + List expectedMetrics = List.of(MetricStat.SUM, MetricStat.VALUE_COUNT, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(100, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); @@ -92,13 +150,7 @@ public void testValidStarTreeDefaults() throws IOException { assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); assertEquals("status", starTreeFieldType.getMetrics().get(0).getField()); - List expectedMetrics = Arrays.asList( - MetricStat.AVG, - MetricStat.VALUE_COUNT, - MetricStat.SUM, - MetricStat.MAX, - MetricStat.MIN - ); + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); @@ -109,7 +161,7 @@ public void testValidStarTreeDefaults() throws IOException { public void testInvalidDim() { MapperParsingException ex = expectThrows( MapperParsingException.class, - () -> createMapperService(getExpandedMapping("invalid", "size")) + () -> createMapperService(getExpandedMappingWithJustAvg("invalid", "size")) ); assertEquals("Failed to parse mapping [_doc]: unknown dimension field [invalid]", ex.getMessage()); } @@ -117,7 +169,7 @@ public void testInvalidDim() { public void testInvalidMetric() { MapperParsingException ex = expectThrows( MapperParsingException.class, - () -> createMapperService(getExpandedMapping("status", "invalid")) + () -> createMapperService(getExpandedMappingWithJustAvg("status", "invalid")) ); assertEquals("Failed to parse mapping [_doc]: unknown metric field [invalid]", ex.getMessage()); } @@ -232,6 +284,9 @@ public void testMetric() { assertEquals(MetricStat.MIN, MetricStat.fromTypeName("min")); assertEquals(MetricStat.SUM, MetricStat.fromTypeName("sum")); assertEquals(MetricStat.AVG, MetricStat.fromTypeName("avg")); + + assertEquals(List.of(MetricStat.VALUE_COUNT, MetricStat.SUM), MetricStat.AVG.getBaseMetrics()); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> MetricStat.fromTypeName("invalid")); assertEquals("Invalid metric stat: invalid", ex.getMessage()); } @@ -310,7 +365,7 @@ public void testStarTreeField() { } public void testValidations() throws IOException { - MapperService mapperService = createMapperService(getExpandedMapping("status", "size")); + MapperService mapperService = createMapperService(getExpandedMappingWithJustAvg("status", "size")); Settings settings = Settings.builder().put(CompositeIndexSettings.STAR_TREE_INDEX_ENABLED_SETTING.getKey(), true).build(); CompositeIndexSettings enabledCompositeIndexSettings = new CompositeIndexSettings( settings, @@ -370,7 +425,7 @@ public void testValidations() throws IOException { ); } - private XContentBuilder getExpandedMapping(String dim, String metric) throws IOException { + private XContentBuilder getExpandedMappingWithJustAvg(String dim, String metric) throws IOException { return topMapping(b -> { b.startObject("composite"); b.startObject("startree"); @@ -399,7 +454,6 @@ private XContentBuilder getExpandedMapping(String dim, String metric) throws IOE b.startObject(); b.field("name", metric); b.startArray("stats"); - b.value("sum"); b.value("avg"); b.endArray(); b.endObject(); @@ -421,6 +475,107 @@ private XContentBuilder getExpandedMapping(String dim, String metric) throws IOE }); } + private XContentBuilder getExpandedMappingWithJustSum(String dim, String metric) throws IOException { + return topMapping(b -> { + b.startObject("composite"); + b.startObject("startree"); + b.field("type", "star_tree"); + b.startObject("config"); + b.field("max_leaf_docs", 100); + b.startArray("skip_star_node_creation_for_dimensions"); + { + b.value("@timestamp"); + b.value("status"); + } + b.endArray(); + b.startArray("ordered_dimensions"); + b.startObject(); + b.field("name", "@timestamp"); + b.startArray("calendar_intervals"); + b.value("day"); + b.value("month"); + b.endArray(); + b.endObject(); + b.startObject(); + b.field("name", dim); + b.endObject(); + b.endArray(); + b.startArray("metrics"); + b.startObject(); + b.field("name", metric); + b.startArray("stats"); + b.value("sum"); + b.endArray(); + b.endObject(); + b.endArray(); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("properties"); + b.startObject("@timestamp"); + b.field("type", "date"); + b.endObject(); + b.startObject("status"); + b.field("type", "integer"); + b.endObject(); + b.startObject("size"); + b.field("type", "integer"); + b.endObject(); + b.endObject(); + }); + } + + private XContentBuilder getExpandedMappingWithSumAndCount(String dim, String metric) throws IOException { + return topMapping(b -> { + b.startObject("composite"); + b.startObject("startree"); + b.field("type", "star_tree"); + b.startObject("config"); + b.field("max_leaf_docs", 100); + b.startArray("skip_star_node_creation_for_dimensions"); + { + b.value("@timestamp"); + b.value("status"); + } + b.endArray(); + b.startArray("ordered_dimensions"); + b.startObject(); + b.field("name", "@timestamp"); + b.startArray("calendar_intervals"); + b.value("day"); + b.value("month"); + b.endArray(); + b.endObject(); + b.startObject(); + b.field("name", dim); + b.endObject(); + b.endArray(); + b.startArray("metrics"); + b.startObject(); + b.field("name", metric); + b.startArray("stats"); + b.value("sum"); + b.value("value_count"); + b.endArray(); + b.endObject(); + b.endArray(); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("properties"); + b.startObject("@timestamp"); + b.field("type", "date"); + b.endObject(); + b.startObject("status"); + b.field("type", "integer"); + b.endObject(); + b.startObject("size"); + b.field("type", "integer"); + b.endObject(); + b.endObject(); + }); + } + private XContentBuilder getMinMapping() throws IOException { return getMinMapping(false, false, false, false); } From beda6164c09f39ceb4dd832e3debdbcb7f7c8f3e Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Fri, 23 Aug 2024 10:42:05 -0700 Subject: [PATCH 04/21] [Bugfix] Fixes IRC NPE bug for timed-out cacheable queries (#15327) * Fix IRC timeout bug Signed-off-by: Peter Alfonsi * addressed Sagar's comments Signed-off-by: Peter Alfonsi * addressed Ankit's comments Signed-off-by: Peter Alfonsi * Add UT for test coverage Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * tweak imports in new UT Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi --------- Signed-off-by: Peter Alfonsi Co-authored-by: Peter Alfonsi --- .../indices/IndicesRequestCacheIT.java | 63 +++++++++++++++++++ .../indices/IndicesRequestCache.java | 11 ++-- .../opensearch/indices/IndicesService.java | 4 +- .../indices/IndicesServiceTests.java | 35 +++++++++++ 4 files changed, 105 insertions(+), 8 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 09d5c208a8756..108ef14f0fcb4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -34,6 +34,12 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.Weight; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -56,7 +62,10 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; import org.opensearch.index.cache.request.RequestCacheStats; +import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; @@ -65,6 +74,7 @@ import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; +import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.time.ZoneId; @@ -768,6 +778,59 @@ public void testDeleteAndCreateSameIndexShardOnSameNode() throws Exception { assertTrue(stats.getMemorySizeInBytes() == 0); } + public void testTimedOutQuery() throws Exception { + // A timed out query should be cached and then invalidated + Client client = client(); + String index = "index"; + assertAcked( + client.admin() + .indices() + .prepareCreate(index) + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + // Disable index refreshing to avoid cache being invalidated mid-test + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1)) + ) + .get() + ); + indexRandom(true, client.prepareIndex(index).setSource("k", "hello")); + ensureSearchable(index); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + forceMerge(client, index); + + QueryBuilder timeoutQueryBuilder = new TermQueryBuilder("k", "hello") { + @Override + protected Query doToQuery(QueryShardContext context) { + return new TermQuery(new Term("k", "hello")) { + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + // Create the weight before sleeping. Otherwise, TermStates.build() (in the call to super.createWeight()) will + // sometimes throw an exception on timeout, rather than timing out gracefully. + Weight result = super.createWeight(searcher, scoreMode, boost); + try { + Thread.sleep(500); + } catch (InterruptedException ignored) {} + return result; + } + }; + } + }; + + SearchResponse resp = client.prepareSearch(index) + .setRequestCache(true) + .setQuery(timeoutQueryBuilder) + .setTimeout(TimeValue.ZERO) + .get(); + assertTrue(resp.isTimedOut()); + RequestCacheStats requestCacheStats = getRequestCacheStats(client, index); + // The cache should be empty as the timed-out query was invalidated + assertEquals(0, requestCacheStats.getMemorySizeInBytes()); + } + private Path[] shardDirectory(String server, Index index, int shard) { NodeEnvironment env = internalCluster().getInstance(NodeEnvironment.class, server); final Path[] paths = env.availableShardPaths(new ShardId(index, shard)); diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index 93946fa11de13..71f8cf5a78ec5 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -310,12 +310,11 @@ BytesReference getOrCompute( * @param cacheKey the cache key to invalidate */ void invalidate(IndicesService.IndexShardCacheEntity cacheEntity, DirectoryReader reader, BytesReference cacheKey) { - assert reader.getReaderCacheHelper() != null; - String readerCacheKeyId = null; - if (reader instanceof OpenSearchDirectoryReader) { - IndexReader.CacheHelper cacheHelper = ((OpenSearchDirectoryReader) reader).getDelegatingCacheHelper(); - readerCacheKeyId = ((OpenSearchDirectoryReader.DelegatingCacheHelper) cacheHelper).getDelegatingCacheKey().getId(); - } + assert reader.getReaderCacheHelper() instanceof OpenSearchDirectoryReader.DelegatingCacheHelper; + OpenSearchDirectoryReader.DelegatingCacheHelper delegatingCacheHelper = (OpenSearchDirectoryReader.DelegatingCacheHelper) reader + .getReaderCacheHelper(); + String readerCacheKeyId = delegatingCacheHelper.getDelegatingCacheKey().getId(); + IndexShard indexShard = (IndexShard) cacheEntity.getCacheIdentity(); cache.invalidate(getICacheKey(new Key(indexShard.shardId(), cacheKey, readerCacheKeyId, System.identityHashCode(indexShard)))); } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 902ca95643625..a78328e24c588 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -68,6 +68,7 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.lucene.index.OpenSearchDirectoryReader.DelegatingCacheHelper; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; @@ -1754,8 +1755,7 @@ public boolean canCache(ShardSearchRequest request, SearchContext context) { if (context.getQueryShardContext().isCacheable() == false) { return false; } - return true; - + return context.searcher().getDirectoryReader().getReaderCacheHelper() instanceof DelegatingCacheHelper; } /** diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index 6757dbc184961..b5350a39e8599 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -31,12 +31,15 @@ package org.opensearch.indices; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.Version; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.IndexShardStats; +import org.opensearch.action.search.SearchType; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexGraveyard; @@ -44,6 +47,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; +import org.opensearch.common.lucene.index.OpenSearchDirectoryReader.DelegatingCacheHelper; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -76,8 +80,11 @@ import org.opensearch.plugins.EnginePlugin; import org.opensearch.plugins.MapperPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.search.internal.ContextIndexSearcher; +import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.test.TestSearchContext; import org.opensearch.test.hamcrest.RegexMatcher; import java.io.IOException; @@ -627,4 +634,32 @@ public void testClusterRemoteTranslogBufferIntervalDefault() { indicesService.getRemoteStoreSettings().getClusterRemoteTranslogBufferInterval() ); } + + public void testDirectoryReaderWithoutDelegatingCacheHelperNotCacheable() throws IOException { + IndicesService indicesService = getIndicesService(); + final IndexService indexService = createIndex("test"); + ShardSearchRequest request = mock(ShardSearchRequest.class); + when(request.requestCache()).thenReturn(true); + + TestSearchContext context = new TestSearchContext(indexService.getBigArrays(), indexService) { + @Override + public SearchType searchType() { + return SearchType.QUERY_THEN_FETCH; + } + }; + + ContextIndexSearcher searcher = mock(ContextIndexSearcher.class); + context.setSearcher(searcher); + DirectoryReader reader = mock(DirectoryReader.class); + when(searcher.getDirectoryReader()).thenReturn(reader); + when(searcher.getIndexReader()).thenReturn(reader); + IndexReader.CacheHelper notDelegatingCacheHelper = mock(IndexReader.CacheHelper.class); + DelegatingCacheHelper delegatingCacheHelper = mock(DelegatingCacheHelper.class); + + for (boolean useDelegatingCacheHelper : new boolean[] { true, false }) { + IndexReader.CacheHelper cacheHelper = useDelegatingCacheHelper ? delegatingCacheHelper : notDelegatingCacheHelper; + when(reader.getReaderCacheHelper()).thenReturn(cacheHelper); + assertEquals(useDelegatingCacheHelper, indicesService.canCache(request, context)); + } + } } From 0eb63bdab7a602a85d03601970e6a699a015211d Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 23 Aug 2024 14:09:37 -0400 Subject: [PATCH 05/21] Bump opentelemetry from 1.40.0 to 1.41.0, opentelemetry-semconv from 1.26.0-alpha to 1.27.0-alpha (#15361) Signed-off-by: Andriy Redko --- CHANGELOG.md | 2 ++ buildSrc/version.properties | 4 ++-- plugins/telemetry-otel/build.gradle | 4 +++- .../telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 | 1 - .../telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 | 1 + .../opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 | 1 - .../opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 | 1 + .../licenses/opentelemetry-context-1.40.0.jar.sha1 | 1 - .../licenses/opentelemetry-context-1.41.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 | 1 + .../opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 | 1 - .../opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 | 1 + .../opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 | 1 - .../opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 | 1 + .../telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 | 1 - .../telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 | 1 + .../licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 | 1 - .../licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 | 1 + 31 files changed, 21 insertions(+), 17 deletions(-) delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index cd02af4f625b9..021a459352ba7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.azure:azure-core-http-netty` from 1.15.1 to 1.15.3 ([#15300](https://github.com/opensearch-project/OpenSearch/pull/15300)) - Bump `com.gradle.develocity` from 3.17.6 to 3.18 ([#15297](https://github.com/opensearch-project/OpenSearch/pull/15297)) - Bump `commons-cli:commons-cli` from 1.8.0 to 1.9.0 ([#15298](https://github.com/opensearch-project/OpenSearch/pull/15298)) +- Bump `opentelemetry` from 1.40.0 to 1.41.0 ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) +- Bump `opentelemetry-semconv` from 1.26.0-alpha to 1.27.0-alpha ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) ### Changed - Add lower limit for primary and replica batch allocators timeout ([#14979](https://github.com/opensearch-project/OpenSearch/pull/14979)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 9d7bbf6f8f769..ccec8e2891a65 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -74,5 +74,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.40.0 -opentelemetrysemconv = 1.26.0-alpha +opentelemetry = 1.41.0 +opentelemetrysemconv = 1.27.0-alpha diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 66d172e3dc7f3..872d928aa093f 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -86,7 +86,9 @@ thirdPartyAudit { 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', 'kotlin.io.path.PathsKt', 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', - 'io.opentelemetry.sdk.autoconfigure.spi.internal.AutoConfigureListener' + 'io.opentelemetry.sdk.autoconfigure.spi.internal.AutoConfigureListener', + 'io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider', + 'io.opentelemetry.sdk.autoconfigure.spi.internal.StructuredConfigProperties' ) } diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 deleted file mode 100644 index 04ec81edf969c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6db562f2b74ffaa7253d740e9aa7a3c4f2e392ec \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..ead8fb235fa12 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 @@ -0,0 +1 @@ +ec5ad3b420c9fba4b340e85a3199fd0f2accd023 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 deleted file mode 100644 index bcd7c886b5f6c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -43115633361430a3c6aaa39fd78363014ac79270 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..b601a4fb5246f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 @@ -0,0 +1 @@ +fd387313cc37a6e93062e9a80a2526634d22cb19 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 deleted file mode 100644 index 9716ec518c886..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf1db0f288b9baaabdb439ab6179b673b751511e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..74b7cb25cdfe5 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 @@ -0,0 +1 @@ +3d7cf15ef425053e24e825160ca7b4ac08d721aa \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 deleted file mode 100644 index c0e79b05aa675..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b883b179c242a1761df2d408fe01ec41b17327a3 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..d8d8f75850cb6 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 @@ -0,0 +1 @@ +cf92f4c1b60c2359c12f6f323f6a2a623c333910 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 deleted file mode 100644 index 1df0ad183c475..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a8c1f9b05ac9fb1259517cf53950ccecaf84ebe1 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..3e1212943f894 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 @@ -0,0 +1 @@ +8dee21440b811004ecc1c36c1cd44f9d3494546c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 deleted file mode 100644 index ebeb639a8459c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d8b92bcdb0ace48fb5764cc1ad7a0de197d5b8c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..21a29cc8445e5 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 @@ -0,0 +1 @@ +d86e60b6d49e389ebe5797d42a7288a20d30c162 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 deleted file mode 100644 index b630c808d4763..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -80fa10130cc7e7626e2581aa7c5871eab7381889 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..ae522ac698aa8 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 @@ -0,0 +1 @@ +aeba3075b8dfd97779edadc0a3711d999bb0e396 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 deleted file mode 100644 index eda90dc825e6f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -006dcdbf8eb911ad4d11c54fa824f5a97f582850 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..a741d0a167d60 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 @@ -0,0 +1 @@ +368d7905d6a0a313c63e3a91f895a3a08500519e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 deleted file mode 100644 index cdd7dc6551b33..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -59f260c5412b79a5a40c7d433600248727cd195a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..972e7de1c74be --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 @@ -0,0 +1 @@ +c740e8f7d0d914d6acd310ac53901bb8753c6e8d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 deleted file mode 100644 index 668291498bbae..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7042214012232a5d6a251aca4aa5932014a4946b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..c56ca0b9e8169 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 @@ -0,0 +1 @@ +b820861f85ba83db0ad896c47f723208d7473d5a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 deleted file mode 100644 index 74f0786e21954..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1c6b884d65f79d40429263ac0ab7ed1422237837 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..39db6cb73727f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 @@ -0,0 +1 @@ +f88ee292f5605c87dfe85c8d90131bce9f0b3b8e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 deleted file mode 100644 index 23ef1bf6e6b2c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a1c9b33a8660ace82aecb7f1c7ea50093dc87f0a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..6dcd496e033d3 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 @@ -0,0 +1 @@ +9d1200befb28e3e9f61073ac3de23cc55e509dc7 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 deleted file mode 100644 index aea753f0df18b..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5145f077bf2821ad243617baf8c1810d29af8566 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..161e400f87077 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 @@ -0,0 +1 @@ +d9bbc2e2e800317d72fbf3141ae8391e95fa6229 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 deleted file mode 100644 index 7124dcb31da3f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -955de1d2de4d3d2bb6ba2498f19c9a06da2f3956 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..e986b4b53388e --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 @@ -0,0 +1 @@ +906d916bee46f60260c09314284b5948c54a0662 \ No newline at end of file From 2301adf9a0e2921d2fa359c51ee5c87309ca71ad Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Fri, 23 Aug 2024 17:13:17 -0700 Subject: [PATCH 06/21] Add SplitResponseProcessor to allowlist (#15393) Signed-off-by: Daniel Widdis --- CHANGELOG.md | 1 + .../pipeline/common/SearchPipelineCommonModulePlugin.java | 2 ++ .../pipeline/common/SearchPipelineCommonModulePluginTests.java | 3 ++- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 021a459352ba7..387aaf94bbe5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fixed array field name omission in flat_object function for nested JSON ([#13620](https://github.com/opensearch-project/OpenSearch/pull/13620)) - Fix range aggregation optimization ignoring top level queries ([#15194](https://github.com/opensearch-project/OpenSearch/pull/15194)) - Fix incorrect parameter names in MinHash token filter configuration handling ([#15233](https://github.com/opensearch-project/OpenSearch/pull/15233)) +- Fix split response processor not included in allowlist ([#15393](https://github.com/opensearch-project/OpenSearch/pull/15393)) ### Security diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java index 2a2de9debb9d9..488b9e632aa2a 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java @@ -97,6 +97,8 @@ public Map> getResponseProces new TruncateHitsResponseProcessor.Factory(), CollapseResponseProcessor.TYPE, new CollapseResponseProcessor.Factory(), + SplitResponseProcessor.TYPE, + new SplitResponseProcessor.Factory(), SortResponseProcessor.TYPE, new SortResponseProcessor.Factory() ) diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java index 404842742629c..e10f06da29ba0 100644 --- a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java @@ -47,6 +47,7 @@ public void testResponseProcessorAllowlist() throws IOException { List.of("rename_field", "truncate_hits", "collapse"), SearchPipelineCommonModulePlugin::getResponseProcessors ); + runAllowlistTest(key, List.of("split", "sort"), SearchPipelineCommonModulePlugin::getResponseProcessors); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, @@ -82,7 +83,7 @@ public void testAllowlistNotSpecified() throws IOException { try (SearchPipelineCommonModulePlugin plugin = new SearchPipelineCommonModulePlugin()) { assertEquals(Set.of("oversample", "filter_query", "script"), plugin.getRequestProcessors(createParameters(settings)).keySet()); assertEquals( - Set.of("rename_field", "truncate_hits", "collapse", "sort"), + Set.of("rename_field", "truncate_hits", "collapse", "split", "sort"), plugin.getResponseProcessors(createParameters(settings)).keySet() ); assertEquals(Set.of(), plugin.getSearchPhaseResultsProcessors(createParameters(settings)).keySet()); From 6152afeef3ceb3fe4f18f6be2d55ac60256bc2db Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Sat, 24 Aug 2024 09:54:11 -0700 Subject: [PATCH 07/21] Fix unchecked cast in dynamic action map getter (#15394) Signed-off-by: Daniel Widdis --- CHANGELOG.md | 1 + .../src/main/java/org/opensearch/action/ActionModule.java | 7 +++++-- .../org/opensearch/action/DynamicActionRegistryTests.java | 5 +++++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 387aaf94bbe5c..deea2778dedd2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,6 +68,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix range aggregation optimization ignoring top level queries ([#15194](https://github.com/opensearch-project/OpenSearch/pull/15194)) - Fix incorrect parameter names in MinHash token filter configuration handling ([#15233](https://github.com/opensearch-project/OpenSearch/pull/15233)) - Fix split response processor not included in allowlist ([#15393](https://github.com/opensearch-project/OpenSearch/pull/15393)) +- Fix unchecked cast in dynamic action map getter ([#15394](https://github.com/opensearch-project/OpenSearch/pull/15394)) ### Security diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 574b7029a6501..c86e6580122d5 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -1200,9 +1200,12 @@ public void unregisterDynamicRoute(NamedRoute route) { * @param route The {@link RestHandler.Route}. * @return the corresponding {@link RestSendToExtensionAction} if it is registered, null otherwise. */ - @SuppressWarnings("unchecked") public RestSendToExtensionAction get(RestHandler.Route route) { - return routeRegistry.get(route); + if (route instanceof NamedRoute) { + return routeRegistry.get((NamedRoute) route); + } + // Only NamedRoutes are map keys so any other route is not in the map + return null; } } } diff --git a/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java b/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java index 20c2b1f17124c..1e2b29287acb3 100644 --- a/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java +++ b/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java @@ -20,6 +20,7 @@ import org.opensearch.extensions.action.ExtensionTransportAction; import org.opensearch.extensions.rest.RestSendToExtensionAction; import org.opensearch.rest.NamedRoute; +import org.opensearch.rest.RestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; @@ -85,13 +86,17 @@ public void testDynamicActionRegistryWithNamedRoutes() { RestSendToExtensionAction action2 = mock(RestSendToExtensionAction.class); NamedRoute r1 = new NamedRoute.Builder().method(RestRequest.Method.GET).path("/foo").uniqueName("foo").build(); NamedRoute r2 = new NamedRoute.Builder().method(RestRequest.Method.PUT).path("/bar").uniqueName("bar").build(); + RestHandler.Route r3 = new RestHandler.Route(RestRequest.Method.DELETE, "/foo"); DynamicActionRegistry registry = new DynamicActionRegistry(); registry.registerDynamicRoute(r1, action); registry.registerDynamicRoute(r2, action2); assertTrue(registry.isActionRegistered("foo")); + assertEquals(action, registry.get(r1)); assertTrue(registry.isActionRegistered("bar")); + assertEquals(action2, registry.get(r2)); + assertNull(registry.get(r3)); registry.unregisterDynamicRoute(r2); From 689cfcac8367e289ad67cbcfe91ebd75d50f07d0 Mon Sep 17 00:00:00 2001 From: Shivansh Arora Date: Mon, 26 Aug 2024 00:27:51 +0530 Subject: [PATCH 08/21] [Remote State] Disable remote publication if remote state disabled (#15219) * Disable remote publication if remote state disabled Signed-off-by: Shivansh Arora --- .../remote/RemoteStatePublicationIT.java | 24 ++++++++++++++++--- .../coordination/CoordinationState.java | 3 ++- .../coordination/CoordinationStateTests.java | 14 ++++++++++- 3 files changed, 36 insertions(+), 5 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java index 07d6e1379ced8..6a2e7ce4957ae 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java @@ -50,15 +50,22 @@ public class RemoteStatePublicationIT extends RemoteStoreBaseIntegTestCase { private static String INDEX_NAME = "test-index"; + private boolean isRemoteStateEnabled = true; + private String isRemotePublicationEnabled = "true"; @Before public void setup() { asyncUploadMockFsRepo = false; + isRemoteStateEnabled = true; + isRemotePublicationEnabled = "true"; } @Override protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL, "true").build(); + return Settings.builder() + .put(super.featureFlagSettings()) + .put(FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL, isRemotePublicationEnabled) + .build(); } @Override @@ -76,7 +83,7 @@ protected Settings nodeSettings(int nodeOrdinal) { ); return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), isRemoteStateEnabled) .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, routingTableRepoName) .put(routingTableRepoTypeAttributeKey, ReloadableFsRepository.TYPE) .put(routingTableRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) @@ -136,6 +143,18 @@ public void testPublication() throws Exception { } } + public void testRemotePublicationDisableIfRemoteStateDisabled() { + // only disable remote state + isRemoteStateEnabled = false; + // create cluster with multi node with in-consistent settings + prepareCluster(3, 2, INDEX_NAME, 1, 2); + // assert cluster is stable, ensuring publication falls back to legacy transport with inconsistent settings + ensureStableCluster(5); + ensureGreen(INDEX_NAME); + + assertNull(internalCluster().getCurrentClusterManagerNodeInstance(RemoteClusterStateService.class)); + } + private Map getMetadataFiles(BlobStoreRepository repository, String subDirectory) throws IOException { BlobPath metadataPath = repository.basePath() .add( @@ -151,5 +170,4 @@ private Map getMetadataFiles(BlobStoreRepository repository, St return fileName.split(DELIMITER)[0]; }).collect(Collectors.toMap(Function.identity(), key -> 1, Integer::sum)); } - } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index 7fa63ae8abc62..c7820c2c9a365 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -105,7 +105,8 @@ public CoordinationState( .getLastAcceptedConfiguration(); this.publishVotes = new VoteCollection(); this.isRemoteStateEnabled = isRemoteStoreClusterStateEnabled(settings); - this.isRemotePublicationEnabled = FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) + this.isRemotePublicationEnabled = isRemoteStateEnabled + && FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) && localNode.isRemoteStatePublicationEnabled(); } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java index e74962dcbba1b..3ee2278aec546 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java @@ -66,6 +66,8 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; @@ -971,7 +973,7 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, randomRepoName) .put(stateRepoTypeAttributeKey, FsRepository.TYPE) .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") - .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) .build(); final CoordinationState coordinationState = createCoordinationState(persistedStateRegistry, node1, settings); @@ -1002,6 +1004,16 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep ); } + public void testIsRemotePublicationEnabled_WithInconsistentSettings() { + // create settings with remote state disabled but publication enabled + Settings settings = Settings.builder() + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false) + .put(REMOTE_PUBLICATION_EXPERIMENTAL, true) + .build(); + CoordinationState coordinationState = createCoordinationState(psr1, node1, settings); + assertFalse(coordinationState.isRemotePublicationEnabled()); + } + public static CoordinationState createCoordinationState( PersistedStateRegistry persistedStateRegistry, DiscoveryNode localNode, From f19528573dc3a16e4417550d40374afd471f8997 Mon Sep 17 00:00:00 2001 From: Dmitry Kryukov Date: Mon, 26 Aug 2024 02:22:04 +0300 Subject: [PATCH 09/21] Compare numbers with equals() instead of == (#15366) Signed-off-by: Dmitry Kryukov --- .../java/org/opensearch/transport/nio/MockNioTransport.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java index cd6bf02efef6f..9956c651618d3 100644 --- a/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java @@ -467,7 +467,7 @@ private void logLongRunningExecutions() { final Thread thread = entry.getKey(); final String stackTrace = Arrays.stream(thread.getStackTrace()).map(Object::toString).collect(Collectors.joining("\n")); final Thread.State threadState = thread.getState(); - if (blockedSinceInNanos == registry.get(thread)) { + if (blockedSinceInNanos.equals(registry.get(thread))) { logger.warn( "Potentially blocked execution on network thread [{}] [{}] [{} milliseconds]: \n{}", thread.getName(), From 90148942a56fa6a4840ad2afed195071f2d3c8e6 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Mon, 26 Aug 2024 21:46:18 +0530 Subject: [PATCH 10/21] Add pinned timestamp utils and setting to enable/disable the feature (#15401) Signed-off-by: Sachin Kale Co-authored-by: Sachin Kale --- .../RemoteStorePinnedTimestampsIT.java | 10 + .../common/settings/ClusterSettings.java | 1 + .../index/remote/RemoteStoreUtils.java | 176 ++++++ .../store/RemoteSegmentStoreDirectory.java | 5 + .../indices/RemoteStoreSettings.java | 15 + .../main/java/org/opensearch/node/Node.java | 3 +- .../index/remote/RemoteStoreUtilsTests.java | 545 ++++++++++++++++++ 7 files changed, 754 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java index 05ff738d2df0b..cb91c63e17245 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java @@ -9,8 +9,10 @@ package org.opensearch.remotestore; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; import org.opensearch.test.OpenSearchIntegTestCase; @@ -20,6 +22,14 @@ public class RemoteStorePinnedTimestampsIT extends RemoteStoreBaseIntegTestCase { static final String INDEX_NAME = "remote-store-test-idx-1"; + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true) + .build(); + } + ActionListener noOpActionListener = new ActionListener<>() { @Override public void onResponse(Void unused) {} diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 49ef87838ed2e..8daf9125bb27e 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -761,6 +761,7 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA, RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_SCHEDULER_INTERVAL, RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_LOOKBACK_INTERVAL, + RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED, SearchService.CLUSTER_ALLOW_DERIVED_FIELD_SETTING, diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index a5e0c10f72301..b2bc8a0294a49 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -20,20 +20,27 @@ import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; import java.nio.ByteBuffer; +import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.Set; +import java.util.TreeSet; import java.util.function.Function; import java.util.stream.Collectors; @@ -373,4 +380,173 @@ public static boolean isSwitchToStrictCompatibilityMode(ClusterUpdateSettingsReq incomingSettings ) == RemoteStoreNodeService.CompatibilityMode.STRICT; } + + /** + * Determines and returns a set of metadata files that match provided pinned timestamps. + * + * This method is an overloaded version of getPinnedTimestampLockedFiles and do not use cached entries to find + * the metadata file + * + * @param metadataFiles A list of metadata file names. Expected to be sorted in descending order of timestamp. + * @param pinnedTimestampSet A set of timestamps representing pinned points in time. + * @param getTimestampFunction A function that extracts the timestamp from a metadata file name. + * @param prefixFunction A function that extracts a tuple of prefix information from a metadata file name. + * @return A set of metadata file names that are implicitly locked based on the pinned timestamps. + */ + public static Set getPinnedTimestampLockedFiles( + List metadataFiles, + Set pinnedTimestampSet, + Function getTimestampFunction, + Function> prefixFunction + ) { + return getPinnedTimestampLockedFiles(metadataFiles, pinnedTimestampSet, new HashMap<>(), getTimestampFunction, prefixFunction); + } + + /** + * Determines and returns a set of metadata files that match provided pinned timestamps. If pinned timestamp + * feature is not enabled, this function is a no-op. + * + * This method identifies metadata files that are considered implicitly locked due to their timestamps + * matching or being the closest preceding timestamp to the pinned timestamps. It uses a caching mechanism + * to improve performance for previously processed timestamps. + * + * The method performs the following steps: + * 1. Validates input parameters. + * 2. Updates the cache (metadataFilePinnedTimestampMap) to remove outdated entries. + * 3. Processes cached entries and identifies new timestamps to process. + * 4. For new timestamps, iterates through metadata files to find matching or closest preceding files. + * 5. Updates the cache with newly processed timestamps and their corresponding metadata files. + * + * @param metadataFiles A list of metadata file names. Expected to be sorted in descending order of timestamp. + * @param pinnedTimestampSet A set of timestamps representing pinned points in time. + * @param metadataFilePinnedTimestampMap A map used for caching processed timestamps and their corresponding metadata files. + * @param getTimestampFunction A function that extracts the timestamp from a metadata file name. + * @param prefixFunction A function that extracts a tuple of prefix information from a metadata file name. + * @return A set of metadata file names that are implicitly locked based on the pinned timestamps. + * + */ + public static Set getPinnedTimestampLockedFiles( + List metadataFiles, + Set pinnedTimestampSet, + Map metadataFilePinnedTimestampMap, + Function getTimestampFunction, + Function> prefixFunction + ) { + Set implicitLockedFiles = new HashSet<>(); + + if (RemoteStoreSettings.isPinnedTimestampsEnabled() == false) { + return implicitLockedFiles; + } + + if (metadataFiles == null || metadataFiles.isEmpty() || pinnedTimestampSet == null) { + return implicitLockedFiles; + } + + // Remove entries for timestamps that are no longer pinned + metadataFilePinnedTimestampMap.keySet().retainAll(pinnedTimestampSet); + + // Add cached entries and collect new timestamps + Set newPinnedTimestamps = new TreeSet<>(Collections.reverseOrder()); + for (Long pinnedTimestamp : pinnedTimestampSet) { + String cachedFile = metadataFilePinnedTimestampMap.get(pinnedTimestamp); + if (cachedFile != null) { + implicitLockedFiles.add(cachedFile); + } else { + newPinnedTimestamps.add(pinnedTimestamp); + } + } + + if (newPinnedTimestamps.isEmpty()) { + return implicitLockedFiles; + } + + // Sort metadata files in descending order of timestamp + // ToDo: Do we really need this? Files fetched from remote store are already lexicographically sorted. + metadataFiles.sort(String::compareTo); + + // If we have metadata files from multiple writers, it can result in picking file generated by stale primary. + // To avoid this, we fail fast. + RemoteStoreUtils.verifyNoMultipleWriters(metadataFiles, prefixFunction); + + Iterator timestampIterator = newPinnedTimestamps.iterator(); + Long currentPinnedTimestamp = timestampIterator.next(); + long prevMdTimestamp = Long.MAX_VALUE; + for (String metadataFileName : metadataFiles) { + long currentMdTimestamp = getTimestampFunction.apply(metadataFileName); + // We always prefer md file with higher values of prefix like primary term, generation etc. + if (currentMdTimestamp > prevMdTimestamp) { + continue; + } + while (currentMdTimestamp <= currentPinnedTimestamp && prevMdTimestamp > currentPinnedTimestamp) { + implicitLockedFiles.add(metadataFileName); + // Do not cache entry for latest metadata file as the next metadata can also match the same pinned timestamp + if (prevMdTimestamp != Long.MAX_VALUE) { + metadataFilePinnedTimestampMap.put(currentPinnedTimestamp, metadataFileName); + } + if (timestampIterator.hasNext() == false) { + return implicitLockedFiles; + } + currentPinnedTimestamp = timestampIterator.next(); + } + prevMdTimestamp = currentMdTimestamp; + } + + return implicitLockedFiles; + } + + /** + * Filters out metadata files based on their age and pinned timestamps settings. + * + * This method filters a list of metadata files, keeping only those that are older + * than a certain threshold determined by the last successful fetch of pinned timestamps + * and a configured lookback interval. + * + * @param metadataFiles A list of metadata file names to be filtered. + * @param getTimestampFunction A function that extracts a timestamp from a metadata file name. + * @param lastSuccessfulFetchOfPinnedTimestamps The timestamp of the last successful fetch of pinned timestamps. + * @return A new list containing only the metadata files that meet the age criteria. + * If pinned timestamps are not enabled, returns a copy of the input list. + */ + public static List filterOutMetadataFilesBasedOnAge( + List metadataFiles, + Function getTimestampFunction, + long lastSuccessfulFetchOfPinnedTimestamps + ) { + if (RemoteStoreSettings.isPinnedTimestampsEnabled() == false) { + return new ArrayList<>(metadataFiles); + } + long maximumAllowedTimestamp = lastSuccessfulFetchOfPinnedTimestamps - RemoteStoreSettings.getPinnedTimestampsLookbackInterval() + .getMillis(); + List metadataFilesWithMinAge = new ArrayList<>(); + for (String metadataFileName : metadataFiles) { + long metadataTimestamp = getTimestampFunction.apply(metadataFileName); + if (metadataTimestamp < maximumAllowedTimestamp) { + metadataFilesWithMinAge.add(metadataFileName); + } + } + return metadataFilesWithMinAge; + } + + /** + * Determines if the pinned timestamp state is stale. + * + * This method checks whether the last successful fetch of pinned timestamps + * is considered stale based on the current time and configured intervals. + * The state is considered stale if the last successful fetch occurred before + * a certain threshold, which is calculated as three times the scheduler interval + * plus the lookback interval. + * + * @return true if the pinned timestamp state is stale, false otherwise. + * Always returns false if pinned timestamps are not enabled. + */ + public static boolean isPinnedTimestampStateStale() { + if (RemoteStoreSettings.isPinnedTimestampsEnabled() == false) { + return false; + } + long lastSuccessfulFetchTimestamp = RemoteStorePinnedTimestampService.getPinnedTimestamps().v1(); + long staleBufferInMillis = (RemoteStoreSettings.getPinnedTimestampsSchedulerInterval().millis() * 3) + RemoteStoreSettings + .getPinnedTimestampsLookbackInterval() + .millis(); + return lastSuccessfulFetchTimestamp < (System.currentTimeMillis() - staleBufferInMillis); + } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 8c0ecb4cc783a..9ff97f12015bd 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -349,6 +349,11 @@ static long getGeneration(String[] filenameTokens) { return RemoteStoreUtils.invertLong(filenameTokens[2]); } + public static long getTimestamp(String filename) { + String[] filenameTokens = filename.split(SEPARATOR); + return RemoteStoreUtils.invertLong(filenameTokens[6]); + } + public static Tuple getNodeIdByPrimaryTermAndGen(String filename) { String[] tokens = filename.split(SEPARATOR); if (tokens.length < 8) { diff --git a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java index 495288626627b..55280ca5c96d6 100644 --- a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java +++ b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java @@ -134,6 +134,15 @@ public class RemoteStoreSettings { Property.Dynamic ); + /** + * Controls pinned timestamp feature enablement + */ + public static final Setting CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED = Setting.boolSetting( + "cluster.remote_store.pinned_timestamps.enabled", + false, + Setting.Property.NodeScope + ); + /** * Controls pinned timestamp scheduler interval */ @@ -163,6 +172,7 @@ public class RemoteStoreSettings { private volatile RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm; private volatile int maxRemoteTranslogReaders; private volatile boolean isTranslogMetadataEnabled; + private static volatile boolean isPinnedTimestampsEnabled; private static volatile TimeValue pinnedTimestampsSchedulerInterval; private static volatile TimeValue pinnedTimestampsLookbackInterval; @@ -205,6 +215,7 @@ public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { pinnedTimestampsSchedulerInterval = CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_SCHEDULER_INTERVAL.get(settings); pinnedTimestampsLookbackInterval = CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_LOOKBACK_INTERVAL.get(settings); + isPinnedTimestampsEnabled = CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.get(settings); } public TimeValue getClusterRemoteTranslogBufferInterval() { @@ -280,4 +291,8 @@ public static TimeValue getPinnedTimestampsSchedulerInterval() { public static TimeValue getPinnedTimestampsLookbackInterval() { return pinnedTimestampsLookbackInterval; } + + public static boolean isPinnedTimestampsEnabled() { + return isPinnedTimestampsEnabled; + } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 1a9b233b387b2..7e867d3966ff5 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -305,6 +305,7 @@ import static org.opensearch.common.util.FeatureFlags.TELEMETRY; import static org.opensearch.env.NodeEnvironment.collectFileCacheDataPath; import static org.opensearch.index.ShardIndexingPressureSettings.SHARD_INDEXING_PRESSURE_ENABLED_ATTRIBUTE_KEY; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreAttributePresent; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; @@ -812,7 +813,7 @@ protected Node( remoteClusterStateCleanupManager = null; } final RemoteStorePinnedTimestampService remoteStorePinnedTimestampService; - if (isRemoteStoreAttributePresent(settings)) { + if (isRemoteStoreAttributePresent(settings) && CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.get(settings)) { remoteStorePinnedTimestampService = new RemoteStorePinnedTimestampService( repositoriesServiceReference::get, settings, diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index ec48032df4a15..ceaee8337ae34 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -25,23 +25,29 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.translog.transfer.TranslogTransferMetadata; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.test.OpenSearchTestCase; import java.math.BigInteger; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.UUID; import java.util.stream.Collectors; @@ -60,6 +66,7 @@ import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; @@ -537,4 +544,542 @@ private Map getRemoteStoreNodeAttributes() { remoteStoreNodeAttributes.put(REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, "my-translog-repo-1"); return remoteStoreNodeAttributes; } + + private void setupRemotePinnedTimestampFeature(boolean enabled) { + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), enabled).build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + } + + public void testGetPinnedTimestampLockedFilesFeatureDisabled() { + setupRemotePinnedTimestampFeature(false); + // Pinned timestamps 800, 900, 1000, 2000 + // Metadata with timestamp 990, 995, 1000, 1001 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + // Metadata timestamp 1001 <= Pinned Timestamp 2000 + Map metadataFilePinnedTimestampCache = new HashMap<>(); + Tuple, Set> metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L), + Set.of(800L, 900L, 1000L, 2000L), + metadataFilePinnedTimestampCache + ); + Map metadataFiles = metadataAndLocks.v1(); + Set implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(0, implicitLockedFiles.size()); + assertEquals(0, metadataFilePinnedTimestampCache.size()); + } + + public void testGetPinnedTimestampLockedFilesWithEmptyMetadataFiles() { + setupRemotePinnedTimestampFeature(true); + List metadataFiles = Collections.emptyList(); + Set pinnedTimestampSet = new HashSet<>(Arrays.asList(1L, 2L, 3L)); + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + pinnedTimestampSet, + new HashMap<>(), + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + assertTrue(implicitLockedFiles.isEmpty()); + } + + public void testGetPinnedTimestampLockedFilesWithNoPinnedTimestamps() { + setupRemotePinnedTimestampFeature(true); + List metadataFiles = Arrays.asList("file1.txt", "file2.txt", "file3.txt"); + Set pinnedTimestampSet = Collections.emptySet(); + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + pinnedTimestampSet, + new HashMap<>(), + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + assertTrue(implicitLockedFiles.isEmpty()); + } + + public void testGetPinnedTimestampLockedFilesWithNullMetadataFiles() { + setupRemotePinnedTimestampFeature(true); + List metadataFiles = null; + Set pinnedTimestampSet = new HashSet<>(Arrays.asList(1L, 2L, 3L)); + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + pinnedTimestampSet, + new HashMap<>(), + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + assertTrue(implicitLockedFiles.isEmpty()); + } + + public void testGetPinnedTimestampLockedFilesWithNullPinnedTimestampSet() { + setupRemotePinnedTimestampFeature(true); + List metadataFiles = Arrays.asList("file1.txt", "file2.txt", "file3.txt"); + Set pinnedTimestampSet = null; + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + pinnedTimestampSet, + new HashMap<>(), + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + assertTrue(implicitLockedFiles.isEmpty()); + } + + private Tuple, Set> testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List metadataFileTimestamps, + Set pinnedTimetamps, + Map metadataFilePinnedTimestampCache + ) { + String metadataPrefix = "metadata__1__2__3__4__5__"; + Map metadataFiles = new HashMap<>(); + for (Long metadataFileTimestamp : metadataFileTimestamps) { + metadataFiles.put(metadataFileTimestamp, metadataPrefix + RemoteStoreUtils.invertLong(metadataFileTimestamp)); + } + return new Tuple<>( + metadataFiles, + RemoteStoreUtils.getPinnedTimestampLockedFiles( + new ArrayList<>(metadataFiles.values()), + pinnedTimetamps, + metadataFilePinnedTimestampCache, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ) + ); + } + + private Tuple, Set> testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map metadataFileTimestampsPrimaryTermMap, + Set pinnedTimetamps, + Map metadataFilePinnedTimestampCache + ) { + setupRemotePinnedTimestampFeature(true); + Map metadataFiles = new HashMap<>(); + for (Map.Entry metadataFileTimestampPrimaryTerm : metadataFileTimestampsPrimaryTermMap.entrySet()) { + String primaryTerm = RemoteStoreUtils.invertLong(metadataFileTimestampPrimaryTerm.getValue()); + String metadataPrefix = "metadata__" + primaryTerm + "__2__3__4__5__"; + long metadataFileTimestamp = metadataFileTimestampPrimaryTerm.getKey(); + metadataFiles.put(metadataFileTimestamp, metadataPrefix + RemoteStoreUtils.invertLong(metadataFileTimestamp)); + } + return new Tuple<>( + metadataFiles, + RemoteStoreUtils.getPinnedTimestampLockedFiles( + new ArrayList<>(metadataFiles.values()), + pinnedTimetamps, + metadataFilePinnedTimestampCache, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ) + ); + } + + public void testGetPinnedTimestampLockedFilesWithPinnedTimestamps() { + setupRemotePinnedTimestampFeature(true); + + Map metadataFilePinnedTimestampCache = new HashMap<>(); + + // Pinned timestamps 800, 900 + // Metadata with timestamp 990 + // No metadata matches the timestamp + Tuple, Set> metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L), + Set.of(800L, 900L), + metadataFilePinnedTimestampCache + ); + Map metadataFiles = metadataAndLocks.v1(); + Set implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(0, implicitLockedFiles.size()); + assertEquals(0, metadataFilePinnedTimestampCache.size()); + + // Pinned timestamps 800, 900, 1000 + // Metadata with timestamp 990 + // Metadata timestamp 990 <= Pinned Timestamp 1000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L), + Set.of(800L, 900L, 1000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(990L))); + // This is still 0 as we don't cache the latest metadata file as it can change (explained in the next test case) + assertEquals(0, metadataFilePinnedTimestampCache.size()); + + // Pinned timestamps 800, 900, 1000 + // Metadata with timestamp 990, 995 + // Metadata timestamp 995 <= Pinned Timestamp 1000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L), + Set.of(800L, 900L, 1000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(995L))); + // This is still 0 as we don't cache the latest metadata file as it can change + assertEquals(0, metadataFilePinnedTimestampCache.size()); + + // Pinned timestamps 800, 900, 1000 + // Metadata with timestamp 990, 995, 1000 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L), + Set.of(800L, 900L, 1000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1000L))); + // This is still 0 as we don't cache the latest metadata file as it can change + assertEquals(0, metadataFilePinnedTimestampCache.size()); + + // Pinned timestamps 800, 900, 1000, 2000 + // Metadata with timestamp 990, 995, 1000, 1001 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + // Metadata timestamp 1001 <= Pinned Timestamp 2000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L), + Set.of(800L, 900L, 1000L, 2000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(2, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1000L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1001L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1000L), metadataFilePinnedTimestampCache.get(1000L)); + + // Pinned timestamps 800, 900, 1000, 2000, 3000, 4000, 5000 + // Metadata with timestamp 990, 995, 1000, 1001 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + // Metadata timestamp 1001 <= Pinned Timestamp 2000 + // Metadata timestamp 1001 <= Pinned Timestamp 3000 + // Metadata timestamp 1001 <= Pinned Timestamp 4000 + // Metadata timestamp 1001 <= Pinned Timestamp 5000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L), + Set.of(800L, 900L, 1000L, 2000L, 3000L, 4000L, 5000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(2, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1000L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1001L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1000L), metadataFilePinnedTimestampCache.get(1000L)); + + // Pinned timestamps 800, 900, 1000, 2000, 3000, 4000, 5000 + // Metadata with timestamp 990, 995, 1000, 1001, 1900, 2300 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + // Metadata timestamp 1900 <= Pinned Timestamp 2000 + // Metadata timestamp 2300 <= Pinned Timestamp 3000 + // Metadata timestamp 2300 <= Pinned Timestamp 4000 + // Metadata timestamp 2300 <= Pinned Timestamp 5000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L, 1900L, 2300L), + Set.of(800L, 900L, 1000L, 2000L, 3000L, 4000L, 5000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(3, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1000L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1900L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(2300L))); + // Now we cache all the matches except the last one. + assertEquals(2, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1000L), metadataFilePinnedTimestampCache.get(1000L)); + assertEquals(metadataFiles.get(1900L), metadataFilePinnedTimestampCache.get(2000L)); + + // Pinned timestamps 2000, 3000, 4000, 5000 + // Metadata with timestamp 990, 995, 1000, 1001, 1900, 2300 + // Metadata timestamp 1900 <= Pinned Timestamp 2000 + // Metadata timestamp 2300 <= Pinned Timestamp 3000 + // Metadata timestamp 2300 <= Pinned Timestamp 4000 + // Metadata timestamp 2300 <= Pinned Timestamp 5000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L, 1900L, 2300L), + Set.of(2000L, 3000L, 4000L, 5000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(2, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1900L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(2300L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1900L), metadataFilePinnedTimestampCache.get(2000L)); + + // Pinned timestamps 2000, 3000, 4000, 5000 + // Metadata with timestamp 1001, 1900, 2300, 3000, 3001, 5500, 6000 + // Metadata timestamp 1900 <= Pinned Timestamp 2000 + // Metadata timestamp 3000 <= Pinned Timestamp 3000 + // Metadata timestamp 3001 <= Pinned Timestamp 4000 + // Metadata timestamp 3001 <= Pinned Timestamp 5000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(1001L, 1900L, 2300L, 3000L, 3001L, 5500L, 6000L), + Set.of(2000L, 3000L, 4000L, 5000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(3, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1900L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(3000L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(3001L))); + // Now we cache all the matches except the last one. + assertEquals(4, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1900L), metadataFilePinnedTimestampCache.get(2000L)); + assertEquals(metadataFiles.get(3000L), metadataFilePinnedTimestampCache.get(3000L)); + assertEquals(metadataFiles.get(3001L), metadataFilePinnedTimestampCache.get(4000L)); + assertEquals(metadataFiles.get(3001L), metadataFilePinnedTimestampCache.get(5000L)); + + // Pinned timestamps 4000, 5000, 6000, 7000 + // Metadata with timestamp 2300, 3000, 3001, 5500, 6000 + // Metadata timestamp 3001 <= Pinned Timestamp 4000 + // Metadata timestamp 3001 <= Pinned Timestamp 5000 + // Metadata timestamp 6000 <= Pinned Timestamp 6000 + // Metadata timestamp 6000 <= Pinned Timestamp 7000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(2300L, 3000L, 3001L, 5500L, 6000L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(2, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(3001L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6000L))); + // Now we cache all the matches except the last one. + assertEquals(2, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(3001L), metadataFilePinnedTimestampCache.get(4000L)); + assertEquals(metadataFiles.get(3001L), metadataFilePinnedTimestampCache.get(5000L)); + } + + public void testGetPinnedTimestampLockedFilesWithPinnedTimestampsDifferentPrefix() { + setupRemotePinnedTimestampFeature(true); + + Map metadataFilePinnedTimestampCache = new HashMap<>(); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 7002 + // 6 - 6998 + // 5 - 6995 + // 5 - 6990 + Tuple, Set> metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(7002L, 6L, 6998L, 6L, 6995L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + Map metadataFiles = metadataAndLocks.v1(); + Set implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6998L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(6998L), metadataFilePinnedTimestampCache.get(7000L)); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 7002 + // 5 - 6998 + // 5 - 6995 + // 5 - 6990 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(7002L, 6L, 6998L, 5L, 6995L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6998L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(6998L), metadataFilePinnedTimestampCache.get(7000L)); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 7002 + // 6 - 6998 + // 5 - 7001 + // 5 - 6990 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(7002L, 6L, 6998L, 6L, 7001L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6998L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(6998L), metadataFilePinnedTimestampCache.get(7000L)); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 7002 + // 5 - 7005 + // 5 - 6990 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(7002L, 6L, 7005L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6990L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(6990L), metadataFilePinnedTimestampCache.get(7000L)); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 6999 + // 5 - 7005 + // 5 - 6990 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(6999L, 6L, 7005L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6999L))); + // Now we cache all the matches except the last one. + assertEquals(0, metadataFilePinnedTimestampCache.size()); + } + + public void testFilterOutMetadataFilesBasedOnAgeFeatureDisabled() { + setupRemotePinnedTimestampFeature(false); + List metadataFiles = new ArrayList<>(); + + for (int i = 0; i < randomIntBetween(5, 10); i++) { + metadataFiles.add((System.currentTimeMillis() - randomIntBetween(-150000, 150000)) + "_file" + i + ".txt"); + } + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + assertEquals(metadataFiles, result); + } + + public void testFilterOutMetadataFilesBasedOnAge_AllFilesOldEnough() { + setupRemotePinnedTimestampFeature(true); + + List metadataFiles = Arrays.asList( + (System.currentTimeMillis() - 150000) + "_file1.txt", + (System.currentTimeMillis() - 300000) + "_file2.txt", + (System.currentTimeMillis() - 450000) + "_file3.txt" + ); + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + assertEquals(metadataFiles, result); + } + + public void testFilterOutMetadataFilesBasedOnAge_SomeFilesTooNew() { + setupRemotePinnedTimestampFeature(true); + + String file1 = (System.currentTimeMillis() - 150000) + "_file1.txt"; + String file2 = (System.currentTimeMillis() - 300000) + "_file2.txt"; + String file3 = (System.currentTimeMillis() + 450000) + "_file3.txt"; + + List metadataFiles = Arrays.asList(file1, file2, file3); + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + List expected = Arrays.asList(file1, file2); + assertEquals(expected, result); + } + + public void testFilterOutMetadataFilesBasedOnAge_AllFilesTooNew() { + setupRemotePinnedTimestampFeature(true); + + String file1 = (System.currentTimeMillis() + 150000) + "_file1.txt"; + String file2 = (System.currentTimeMillis() + 300000) + "_file2.txt"; + String file3 = (System.currentTimeMillis() + 450000) + "_file3.txt"; + + List metadataFiles = Arrays.asList(file1, file2, file3); + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + assertTrue(result.isEmpty()); + } + + public void testFilterOutMetadataFilesBasedOnAge_EmptyInputList() { + setupRemotePinnedTimestampFeature(true); + + List metadataFiles = Arrays.asList(); + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + assertTrue(result.isEmpty()); + } + + public void testIsPinnedTimestampStateStaleFeatureDisabled() { + setupRemotePinnedTimestampFeature(false); + assertFalse(RemoteStoreUtils.isPinnedTimestampStateStale()); + } + + public void testIsPinnedTimestampStateStaleFeatureEnabled() { + setupRemotePinnedTimestampFeature(true); + assertTrue(RemoteStoreUtils.isPinnedTimestampStateStale()); + } + } From 6e701918d07b403d1b3d368c0fe0ce595f035380 Mon Sep 17 00:00:00 2001 From: Brandon Shien <44730413+bshien@users.noreply.github.com> Date: Mon, 26 Aug 2024 11:48:53 -0700 Subject: [PATCH 11/21] Add release notes for release 1.3.19 (#15392) Signed-off-by: Brandon Shien --- release-notes/opensearch.release-notes-1.3.19.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 release-notes/opensearch.release-notes-1.3.19.md diff --git a/release-notes/opensearch.release-notes-1.3.19.md b/release-notes/opensearch.release-notes-1.3.19.md new file mode 100644 index 0000000000000..fe62624fc6362 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.19.md @@ -0,0 +1,5 @@ +## 2024-08-22 Version 1.3.19 Release Notes + +### Upgrades +- OpenJDK Update (July 2024 Patch releases) ([#15002](https://github.com/opensearch-project/OpenSearch/pull/15002)) +- Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) From f247d8fe1b88487554373495bd5f8f8fcc1f0147 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Aug 2024 15:53:43 -0400 Subject: [PATCH 12/21] Bump tj-actions/changed-files from 44 to 45 (#15422) * Bump tj-actions/changed-files from 44 to 45 Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 44 to 45. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v44...v45) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- .github/workflows/gradle-check.yml | 2 +- CHANGELOG.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 89d894403ff1a..1b9b30625eb83 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@v4 - name: Get changed files id: changed-files-specific - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@v45 with: files_ignore: | release-notes/*.md diff --git a/CHANGELOG.md b/CHANGELOG.md index deea2778dedd2..2838437200db8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `commons-cli:commons-cli` from 1.8.0 to 1.9.0 ([#15298](https://github.com/opensearch-project/OpenSearch/pull/15298)) - Bump `opentelemetry` from 1.40.0 to 1.41.0 ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) - Bump `opentelemetry-semconv` from 1.26.0-alpha to 1.27.0-alpha ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) +- Bump `tj-actions/changed-files` from 44 to 45 ([#15422](https://github.com/opensearch-project/OpenSearch/pull/15422)) ### Changed - Add lower limit for primary and replica batch allocators timeout ([#14979](https://github.com/opensearch-project/OpenSearch/pull/14979)) From 46a269ef21e88e3cb1398474e4bf82af4c3b3b7f Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Mon, 26 Aug 2024 14:11:24 -0700 Subject: [PATCH 13/21] Do not throw exception when flat_object field is explicitly null (#15375) It is valid for a flat_object field to have an explicit value of null. (It's functionally the same as not specifying the field at all.) Prior to this fix, though, we would erroneously advance the context parser to the next token, violating the contract with DocumentParser (which says that a call to parseCreateField with a null value should complete with the parser still pointing at the null value -- it is DocumentParser's responsibility to advance). Signed-off-by: Michael Froh * Fix unit test Signed-off-by: Michael Froh * Add changelog entry Signed-off-by: Michael Froh --------- Signed-off-by: Michael Froh --- CHANGELOG.md | 1 + .../test/index/100_partial_flat_object.yml | 13 +++++++++++-- .../index/mapper/FlatObjectFieldMapper.java | 6 +----- .../index/mapper/FlatObjectFieldMapperTests.java | 8 ++++---- 4 files changed, 17 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2838437200db8..c04ddb6724d28 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,6 +68,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fixed array field name omission in flat_object function for nested JSON ([#13620](https://github.com/opensearch-project/OpenSearch/pull/13620)) - Fix range aggregation optimization ignoring top level queries ([#15194](https://github.com/opensearch-project/OpenSearch/pull/15194)) - Fix incorrect parameter names in MinHash token filter configuration handling ([#15233](https://github.com/opensearch-project/OpenSearch/pull/15233)) +- Fix indexing error when flat_object field is explicitly null ([#15375](https://github.com/opensearch-project/OpenSearch/pull/15375)) - Fix split response processor not included in allowlist ([#15393](https://github.com/opensearch-project/OpenSearch/pull/15393)) - Fix unchecked cast in dynamic action map getter ([#15394](https://github.com/opensearch-project/OpenSearch/pull/15394)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml index 91e4127da9c32..e1bc86f1c9f3d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml @@ -88,7 +88,16 @@ setup: } ] } } - + - do: + index: + index: test_partial_flat_object + id: 4 + body: { + "issue": { + "number": 999, + "labels": null + } + } - do: indices.refresh: index: test_partial_flat_object @@ -135,7 +144,7 @@ teardown: } } - - length: { hits.hits: 3 } + - length: { hits.hits: 4 } # Match Query with exact dot path. - do: diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java index b82fa3999612a..bf8f83e1b95df 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -568,11 +568,7 @@ protected void parseCreateField(ParseContext context) throws IOException { if (context.externalValueSet()) { String value = context.externalValue().toString(); parseValueAddFields(context, value, fieldType().name()); - } else if (context.parser().currentToken() == XContentParser.Token.VALUE_NULL) { - context.parser().nextToken(); // This triggers an exception in DocumentParser. - // We could remove the above nextToken() call to skip the null value, but the existing - // behavior (since 2.7) throws the exception. - } else { + } else if (context.parser().currentToken() != XContentParser.Token.VALUE_NULL) { JsonToStringXContentParser jsonToStringParser = new JsonToStringXContentParser( NamedXContentRegistry.EMPTY, DeprecationHandler.IGNORE_DEPRECATIONS, diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java index 637072c8886c1..5b5ca378ee7ff 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java @@ -25,7 +25,6 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.core.IsEqual.equalTo; -import static org.hamcrest.core.StringContains.containsString; public class FlatObjectFieldMapperTests extends MapperTestCase { private static final String FIELD_TYPE = "flat_object"; @@ -133,9 +132,10 @@ public void testDefaults() throws Exception { public void testNullValue() throws IOException { DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); - MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.nullField("field")))); - assertThat(e.getMessage(), containsString("object mapping for [_doc] tried to parse field [field] as object")); - + ParsedDocument parsedDocument = mapper.parse(source(b -> b.nullField("field"))); + assertEquals(1, parsedDocument.docs().size()); + IndexableField[] fields = parsedDocument.rootDoc().getFields("field"); + assertEquals(0, fields.length); } @Override From f6d9a86f0e2e8241fd58b7e8b6cdeaf931b5108f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 06:27:10 -0400 Subject: [PATCH 14/21] Bump com.netflix.nebula.ospackage-base from 11.9.1 to 11.10.0 in /distribution/packages (#15419) * Bump com.netflix.nebula.ospackage-base in /distribution/packages Bumps com.netflix.nebula.ospackage-base from 11.9.1 to 11.10.0. --- updated-dependencies: - dependency-name: com.netflix.nebula.ospackage-base dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + distribution/packages/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c04ddb6724d28..7a4964dd4c528 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `opentelemetry` from 1.40.0 to 1.41.0 ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) - Bump `opentelemetry-semconv` from 1.26.0-alpha to 1.27.0-alpha ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) - Bump `tj-actions/changed-files` from 44 to 45 ([#15422](https://github.com/opensearch-project/OpenSearch/pull/15422)) +- Bump `com.netflix.nebula.ospackage-base` from 11.9.1 to 11.10.0 ([#15419](https://github.com/opensearch-project/OpenSearch/pull/15419)) ### Changed - Add lower limit for primary and replica batch allocators timeout ([#14979](https://github.com/opensearch-project/OpenSearch/pull/14979)) diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 621620eef9d71..25af649bb4aed 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.9.1" + id "com.netflix.nebula.ospackage-base" version "11.10.0" } void addProcessFilesTask(String type, boolean jdk) { From 771949dd5b5186679a3d1d16c2f2eb6c6c488d33 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 06:28:28 -0400 Subject: [PATCH 15/21] Bump com.microsoft.azure:msal4j from 1.16.2 to 1.17.0 in /plugins/repository-azure (#15420) * Bump com.microsoft.azure:msal4j in /plugins/repository-azure Bumps [com.microsoft.azure:msal4j](https://github.com/AzureAD/microsoft-authentication-library-for-java) from 1.16.2 to 1.17.0. - [Release notes](https://github.com/AzureAD/microsoft-authentication-library-for-java/releases) - [Changelog](https://github.com/AzureAD/microsoft-authentication-library-for-java/blob/dev/changelog.txt) - [Commits](https://github.com/AzureAD/microsoft-authentication-library-for-java/commits) --- updated-dependencies: - dependency-name: com.microsoft.azure:msal4j dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 2 +- plugins/repository-azure/build.gradle | 2 +- plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 | 1 - plugins/repository-azure/licenses/msal4j-1.17.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 create mode 100644 plugins/repository-azure/licenses/msal4j-1.17.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a4964dd4c528..7d4ec6a635fde 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,7 +31,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) - Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.16.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861), [#15205](https://github.com/opensearch-project/OpenSearch/pull/15205)) - OpenJDK Update (July 2024 Patch releases) ([#14998](https://github.com/opensearch-project/OpenSearch/pull/14998)) -- Bump `com.microsoft.azure:msal4j` from 1.16.1 to 1.16.2 ([#14995](https://github.com/opensearch-project/OpenSearch/pull/14995)) +- Bump `com.microsoft.azure:msal4j` from 1.16.1 to 1.17.0 ([#14995](https://github.com/opensearch-project/OpenSearch/pull/14995), [#15420](https://github.com/opensearch-project/OpenSearch/pull/15420)) - Bump `actions/github-script` from 6 to 7 ([#14997](https://github.com/opensearch-project/OpenSearch/pull/14997)) - Bump `org.tukaani:xz` from 1.9 to 1.10 ([#15110](https://github.com/opensearch-project/OpenSearch/pull/15110)) - Bump `actions/setup-java` from 1 to 4 ([#15104](https://github.com/opensearch-project/OpenSearch/pull/15104)) diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 6844311927db0..e76556f24cc23 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -61,7 +61,7 @@ dependencies { // Start of transitive dependencies for azure-identity api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' api "net.java.dev.jna:jna-platform:${versions.jna}" - api 'com.microsoft.azure:msal4j:1.16.2' + api 'com.microsoft.azure:msal4j:1.17.0' api 'com.nimbusds:oauth2-oidc-sdk:11.9.1' api 'com.nimbusds:nimbus-jose-jwt:9.40' api 'com.nimbusds:content-type:2.3' diff --git a/plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 deleted file mode 100644 index 1363e5a0793d2..0000000000000 --- a/plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b43ec4dd657f8ed5922bc0a8ccbe49000968bd15 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/msal4j-1.17.0.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.17.0.jar.sha1 new file mode 100644 index 0000000000000..34101c989eecd --- /dev/null +++ b/plugins/repository-azure/licenses/msal4j-1.17.0.jar.sha1 @@ -0,0 +1 @@ +7d37157da92b719f250b0023234ac9dda922a2a5 \ No newline at end of file From 091ab6fd4c90311015189e05f9a6ff242fd23d1f Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Tue, 27 Aug 2024 17:04:14 +0530 Subject: [PATCH 16/21] [Star tree] Doc count field support in star tree (#15282) --------- Signed-off-by: Bharathwaj G --- .../index/mapper/StarTreeMapperIT.java | 5 + .../Composite99DocValuesWriter.java | 39 +- .../compositeindex/datacube/MetricStat.java | 18 +- .../datacube/startree/StarTreeValidator.java | 3 +- .../aggregators/CountValueAggregator.java | 5 +- .../aggregators/DocCountAggregator.java | 70 +++ .../aggregators/ValueAggregatorFactory.java | 4 +- .../startree/builder/BaseStarTreeBuilder.java | 56 +- .../index/mapper/StarTreeMapper.java | 7 +- .../AbstractValueAggregatorTests.java | 15 +- .../CountValueAggregatorTests.java | 12 +- .../aggregators/DocCountAggregatorTests.java | 75 +++ .../builder/AbstractStarTreeBuilderTests.java | 504 ++++++++++++------ .../index/mapper/StarTreeMapperTests.java | 36 +- 14 files changed, 644 insertions(+), 205 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregator.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregatorTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java index c461f83657340..52c6c6801a3c2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java @@ -264,11 +264,16 @@ public void testValidCompositeIndex() { ); assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("numeric_dv", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals(2, starTreeFieldType.getMetrics().size()); assertEquals("numeric_dv", starTreeFieldType.getMetrics().get(0).getField()); // Assert default metrics List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + + assertEquals("_doc_count", starTreeFieldType.getMetrics().get(1).getField()); + assertEquals(List.of(MetricStat.DOC_COUNT), starTreeFieldType.getMetrics().get(1).getMetrics()); + assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals( StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, diff --git a/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesWriter.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesWriter.java index da784e1232800..74ab7d423998e 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesWriter.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesWriter.java @@ -15,6 +15,7 @@ import org.apache.lucene.index.EmptyDocValuesProducer; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.MergeState; +import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.SortedNumericDocValues; import org.opensearch.common.annotation.ExperimentalApi; @@ -25,6 +26,7 @@ import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.builder.StarTreesBuilder; import org.opensearch.index.mapper.CompositeMappedFieldType; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.MapperService; import java.io.IOException; @@ -63,21 +65,29 @@ public Composite99DocValuesWriter(DocValuesConsumer delegate, SegmentWriteState this.compositeMappedFieldTypes = mapperService.getCompositeFieldTypes(); compositeFieldSet = new HashSet<>(); segmentFieldSet = new HashSet<>(); + // TODO : add integ test for this for (FieldInfo fi : segmentWriteState.fieldInfos) { if (DocValuesType.SORTED_NUMERIC.equals(fi.getDocValuesType())) { segmentFieldSet.add(fi.name); + } else if (fi.name.equals(DocCountFieldMapper.NAME)) { + segmentFieldSet.add(fi.name); } } for (CompositeMappedFieldType type : compositeMappedFieldTypes) { compositeFieldSet.addAll(type.fields()); } // check if there are any composite fields which are part of the segment + // TODO : add integ test where there are no composite fields in a segment, test both flush and merge cases segmentHasCompositeFields = Collections.disjoint(segmentFieldSet, compositeFieldSet) == false; } @Override public void addNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { delegate.addNumericField(field, valuesProducer); + // Perform this only during flush flow + if (mergeState.get() == null && segmentHasCompositeFields) { + createCompositeIndicesIfPossible(valuesProducer, field); + } } @Override @@ -119,13 +129,7 @@ private void createCompositeIndicesIfPossible(DocValuesProducer valuesProducer, if (segmentFieldSet.isEmpty()) { Set compositeFieldSetCopy = new HashSet<>(compositeFieldSet); for (String compositeField : compositeFieldSetCopy) { - fieldProducerMap.put(compositeField, new EmptyDocValuesProducer() { - @Override - public SortedNumericDocValues getSortedNumeric(FieldInfo field) { - return DocValues.emptySortedNumeric(); - } - }); - compositeFieldSet.remove(compositeField); + addDocValuesForEmptyField(compositeField); } } // we have all the required fields to build composite fields @@ -138,7 +142,28 @@ public SortedNumericDocValues getSortedNumeric(FieldInfo field) { } } } + } + /** + * Add empty doc values for fields not present in segment + */ + private void addDocValuesForEmptyField(String compositeField) { + if (compositeField.equals(DocCountFieldMapper.NAME)) { + fieldProducerMap.put(compositeField, new EmptyDocValuesProducer() { + @Override + public NumericDocValues getNumeric(FieldInfo field) { + return DocValues.emptyNumeric(); + } + }); + } else { + fieldProducerMap.put(compositeField, new EmptyDocValuesProducer() { + @Override + public SortedNumericDocValues getSortedNumeric(FieldInfo field) { + return DocValues.emptySortedNumeric(); + } + }); + } + compositeFieldSet.remove(compositeField); } @Override diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java index 84eaaeb637962..1522078024b64 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java @@ -24,13 +24,26 @@ public enum MetricStat { SUM("sum"), MIN("min"), MAX("max"), - AVG("avg", VALUE_COUNT, SUM); + AVG("avg", VALUE_COUNT, SUM), + DOC_COUNT("doc_count", true); private final String typeName; private final MetricStat[] baseMetrics; + // System field stats cannot be used as input for user metric types + private final boolean isSystemFieldStat; + + MetricStat(String typeName) { + this(typeName, false); + } + MetricStat(String typeName, MetricStat... baseMetrics) { + this(typeName, false, baseMetrics); + } + + MetricStat(String typeName, boolean isSystemFieldStat, MetricStat... baseMetrics) { this.typeName = typeName; + this.isSystemFieldStat = isSystemFieldStat; this.baseMetrics = baseMetrics; } @@ -56,7 +69,8 @@ public boolean isDerivedMetric() { public static MetricStat fromTypeName(String typeName) { for (MetricStat metric : MetricStat.values()) { - if (metric.getTypeName().equalsIgnoreCase(typeName)) { + // prevent system fields to be entered as user input + if (metric.getTypeName().equalsIgnoreCase(typeName) && metric.isSystemFieldStat == false) { return metric; } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeValidator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeValidator.java index cbed46604681d..203bca3f1c292 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeValidator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeValidator.java @@ -14,6 +14,7 @@ import org.opensearch.index.compositeindex.datacube.Dimension; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.mapper.CompositeMappedFieldType; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.StarTreeMapper; @@ -78,7 +79,7 @@ public static void validate(MapperService mapperService, CompositeIndexSettings String.format(Locale.ROOT, "unknown metric field [%s] as part of star tree field", metric.getField()) ); } - if (ft.isAggregatable() == false) { + if (ft.isAggregatable() == false && ft instanceof DocCountFieldMapper.DocCountFieldType == false) { throw new IllegalArgumentException( String.format( Locale.ROOT, diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java index 81807cd174a10..e79abe0f170b3 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java @@ -17,12 +17,9 @@ class CountValueAggregator implements ValueAggregator { public static final long DEFAULT_INITIAL_VALUE = 1L; - private final StarTreeNumericType starTreeNumericType; private static final StarTreeNumericType VALUE_AGGREGATOR_TYPE = StarTreeNumericType.LONG; - public CountValueAggregator(StarTreeNumericType starTreeNumericType) { - this.starTreeNumericType = starTreeNumericType; - } + public CountValueAggregator() {} @Override public StarTreeNumericType getAggregatedValueType() { diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregator.java new file mode 100644 index 0000000000000..0896fa54e9f46 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregator.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.aggregators; + +import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericType; + +/** + * Aggregator to handle '_doc_count' field + * + * @opensearch.experimental + */ +public class DocCountAggregator implements ValueAggregator { + + private static final StarTreeNumericType VALUE_AGGREGATOR_TYPE = StarTreeNumericType.LONG; + + public DocCountAggregator() {} + + @Override + public StarTreeNumericType getAggregatedValueType() { + return VALUE_AGGREGATOR_TYPE; + } + + /** + * If _doc_count field for a doc is missing, we increment the _doc_count by '1' for the associated doc + * otherwise take the actual value present in the field + */ + @Override + public Long getInitialAggregatedValueForSegmentDocValue(Long segmentDocValue) { + if (segmentDocValue == null) { + return getIdentityMetricValue(); + } + return segmentDocValue; + } + + @Override + public Long mergeAggregatedValueAndSegmentValue(Long value, Long segmentDocValue) { + assert value != null; + return mergeAggregatedValues(value, segmentDocValue); + } + + @Override + public Long mergeAggregatedValues(Long value, Long aggregatedValue) { + if (value == null) { + value = getIdentityMetricValue(); + } + if (aggregatedValue == null) { + aggregatedValue = getIdentityMetricValue(); + } + return value + aggregatedValue; + } + + @Override + public Long toAggregatedValueType(Long rawValue) { + return rawValue; + } + + /** + * If _doc_count field for a doc is missing, we increment the _doc_count by '1' for the associated doc + */ + @Override + public Long getIdentityMetricValue() { + return 1L; + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java index ef5b773d81d27..bdc381110365d 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java @@ -31,11 +31,13 @@ public static ValueAggregator getValueAggregator(MetricStat aggregationType, Sta case SUM: return new SumValueAggregator(starTreeNumericType); case VALUE_COUNT: - return new CountValueAggregator(starTreeNumericType); + return new CountValueAggregator(); case MIN: return new MinValueAggregator(starTreeNumericType); case MAX: return new MaxValueAggregator(starTreeNumericType); + case DOC_COUNT: + return new DocCountAggregator(); default: throw new IllegalStateException("Unsupported aggregation type: " + aggregationType); } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java index 3fc8d24e6e0d2..ddcf02cc6291a 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.DocValues; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexOptions; @@ -28,6 +29,7 @@ import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; import org.opensearch.index.compositeindex.datacube.startree.utils.TreeNode; import org.opensearch.index.fielddata.IndexNumericFieldData; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NumberFieldMapper; @@ -117,6 +119,16 @@ protected BaseStarTreeBuilder(StarTreeField starTreeField, SegmentWriteState sta public List generateMetricAggregatorInfos(MapperService mapperService) { List metricAggregatorInfos = new ArrayList<>(); for (Metric metric : this.starTreeField.getMetrics()) { + if (metric.getField().equals(DocCountFieldMapper.NAME)) { + MetricAggregatorInfo metricAggregatorInfo = new MetricAggregatorInfo( + MetricStat.DOC_COUNT, + metric.getField(), + starTreeField.getName(), + IndexNumericFieldData.NumericType.LONG + ); + metricAggregatorInfos.add(metricAggregatorInfo); + continue; + } for (MetricStat metricStat : metric.getMetrics()) { if (metricStat.isDerivedMetric()) { continue; @@ -429,7 +441,7 @@ public void build(Map fieldProducerMap) throws IOExce String dimension = dimensionsSplitOrder.get(i).getField(); FieldInfo dimensionFieldInfo = state.fieldInfos.fieldInfo(dimension); if (dimensionFieldInfo == null) { - dimensionFieldInfo = getFieldInfo(dimension); + dimensionFieldInfo = getFieldInfo(dimension, DocValuesType.SORTED_NUMERIC); } dimensionReaders[i] = new SequentialDocValuesIterator( fieldProducerMap.get(dimensionFieldInfo.name).getSortedNumeric(dimensionFieldInfo) @@ -441,15 +453,15 @@ public void build(Map fieldProducerMap) throws IOExce logger.debug("Finished Building star-tree in ms : {}", (System.currentTimeMillis() - startTime)); } - private static FieldInfo getFieldInfo(String field) { + private static FieldInfo getFieldInfo(String field, DocValuesType docValuesType) { return new FieldInfo( field, - 1, + 1, // This is filled as part of doc values creation and is not used otherwise false, false, false, IndexOptions.NONE, - DocValuesType.SORTED_NUMERIC, + docValuesType, -1, Collections.emptyMap(), 0, @@ -473,20 +485,44 @@ public List getMetricReaders(SegmentWriteState stat List metricReaders = new ArrayList<>(); for (Metric metric : this.starTreeField.getMetrics()) { for (MetricStat metricStat : metric.getMetrics()) { + SequentialDocValuesIterator metricReader = null; FieldInfo metricFieldInfo = state.fieldInfos.fieldInfo(metric.getField()); - if (metricFieldInfo == null) { - metricFieldInfo = getFieldInfo(metric.getField()); + if (metricStat.equals(MetricStat.DOC_COUNT)) { + // _doc_count is numeric field , so we convert to sortedNumericDocValues and get iterator + metricReader = getIteratorForNumericField(fieldProducerMap, metricFieldInfo, DocCountFieldMapper.NAME); + } else { + if (metricFieldInfo == null) { + metricFieldInfo = getFieldInfo(metric.getField(), DocValuesType.SORTED_NUMERIC); + } + metricReader = new SequentialDocValuesIterator( + fieldProducerMap.get(metricFieldInfo.name).getSortedNumeric(metricFieldInfo) + ); } - - SequentialDocValuesIterator metricReader = new SequentialDocValuesIterator( - fieldProducerMap.get(metricFieldInfo.name).getSortedNumeric(metricFieldInfo) - ); metricReaders.add(metricReader); } } return metricReaders; } + /** + * Converts numericDocValues to sortedNumericDocValues and returns SequentialDocValuesIterator + */ + private SequentialDocValuesIterator getIteratorForNumericField( + Map fieldProducerMap, + FieldInfo fieldInfo, + String name + ) throws IOException { + if (fieldInfo == null) { + fieldInfo = getFieldInfo(name, DocValuesType.NUMERIC); + } + SequentialDocValuesIterator sequentialDocValuesIterator; + assert fieldProducerMap.containsKey(fieldInfo.name); + sequentialDocValuesIterator = new SequentialDocValuesIterator( + DocValues.singleton(fieldProducerMap.get(fieldInfo.name).getNumeric(fieldInfo)) + ); + return sequentialDocValuesIterator; + } + /** * Builds the star tree using Star-Tree Document * diff --git a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java index 93764e93ae30d..e52d6a621e4e8 100644 --- a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java @@ -226,6 +226,10 @@ private List buildMetrics(String fieldName, Map map, Map for (Object metric : metricsList) { Map metricMap = (Map) metric; String name = (String) XContentMapValues.extractValue(CompositeDataCubeFieldType.NAME, metricMap); + // Handle _doc_count metric separately at the end + if (name.equals(DocCountFieldMapper.NAME)) { + continue; + } metricMap.remove(CompositeDataCubeFieldType.NAME); if (objbuilder == null || objbuilder.mappersBuilders == null) { metrics.add(getMetric(name, metricMap, context)); @@ -250,7 +254,8 @@ private List buildMetrics(String fieldName, Map map, Map } else { throw new MapperParsingException(String.format(Locale.ROOT, "unable to parse metrics for star tree field [%s]", this.name)); } - + Metric docCountMetric = new Metric(DocCountFieldMapper.NAME, List.of(MetricStat.DOC_COUNT)); + metrics.add(docCountMetric); return metrics; } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/AbstractValueAggregatorTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/AbstractValueAggregatorTests.java index f5d3e197aa287..36f75834abba8 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/AbstractValueAggregatorTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/AbstractValueAggregatorTests.java @@ -48,11 +48,7 @@ public void testGetInitialAggregatedValueForSegmentDocNullValue() { } public void testMergeAggregatedNullValueAndSegmentNullValue() { - if (aggregator instanceof CountValueAggregator) { - assertThrows(AssertionError.class, () -> aggregator.mergeAggregatedValueAndSegmentValue(null, null)); - } else { - assertEquals(aggregator.getIdentityMetricValue(), aggregator.mergeAggregatedValueAndSegmentValue(null, null)); - } + assertEquals(aggregator.getIdentityMetricValue(), aggregator.mergeAggregatedValueAndSegmentValue(null, null)); } public void testMergeAggregatedNullValues() { @@ -65,13 +61,6 @@ public void testGetInitialAggregatedNullValue() { public void testGetInitialAggregatedValueForSegmentDocValue() { long randomLong = randomLong(); - if (aggregator instanceof CountValueAggregator) { - assertEquals(CountValueAggregator.DEFAULT_INITIAL_VALUE, aggregator.getInitialAggregatedValueForSegmentDocValue(randomLong())); - } else { - assertEquals( - starTreeNumericType.getDoubleValue(randomLong), - aggregator.getInitialAggregatedValueForSegmentDocValue(randomLong) - ); - } + assertEquals(starTreeNumericType.getDoubleValue(randomLong), aggregator.getInitialAggregatedValueForSegmentDocValue(randomLong)); } } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java index 550a4fea1174a..b270c1b1bc26c 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java @@ -31,6 +31,11 @@ public void testMergeAggregatedValues() { assertEquals(randomLong2, aggregator.mergeAggregatedValues(null, randomLong2), 0.0); } + @Override + public void testMergeAggregatedNullValueAndSegmentNullValue() { + assertThrows(AssertionError.class, () -> aggregator.mergeAggregatedValueAndSegmentValue(null, null)); + } + public void testGetInitialAggregatedValue() { long randomLong = randomLong(); assertEquals(randomLong, aggregator.getInitialAggregatedValue(randomLong), 0.0); @@ -48,8 +53,13 @@ public void testIdentityMetricValue() { @Override public ValueAggregator getValueAggregator(StarTreeNumericType starTreeNumericType) { - aggregator = new CountValueAggregator(starTreeNumericType); + aggregator = new CountValueAggregator(); return aggregator; } + @Override + public void testGetInitialAggregatedValueForSegmentDocValue() { + long randomLong = randomLong(); + assertEquals(CountValueAggregator.DEFAULT_INITIAL_VALUE, (long) aggregator.getInitialAggregatedValueForSegmentDocValue(randomLong)); + } } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregatorTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregatorTests.java new file mode 100644 index 0000000000000..2765629aa5950 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregatorTests.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.aggregators; + +import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericType; + +/** + * Unit tests for {@link DocCountAggregator}. + */ +public class DocCountAggregatorTests extends AbstractValueAggregatorTests { + + private DocCountAggregator aggregator; + + public DocCountAggregatorTests(StarTreeNumericType starTreeNumericType) { + super(starTreeNumericType); + } + + public void testMergeAggregatedValueAndSegmentValue() { + long randomLong = randomLong(); + assertEquals(randomLong + 3L, (long) aggregator.mergeAggregatedValueAndSegmentValue(randomLong, 3L)); + } + + public void testMergeAggregatedValues() { + long randomLong1 = randomLong(); + long randomLong2 = randomLong(); + assertEquals(randomLong1 + randomLong2, (long) aggregator.mergeAggregatedValues(randomLong1, randomLong2)); + assertEquals(randomLong1 + 1L, (long) aggregator.mergeAggregatedValues(randomLong1, null)); + assertEquals(randomLong2 + 1L, (long) aggregator.mergeAggregatedValues(null, randomLong2)); + } + + @Override + public void testMergeAggregatedNullValueAndSegmentNullValue() { + assertThrows(AssertionError.class, () -> aggregator.mergeAggregatedValueAndSegmentValue(null, null)); + } + + @Override + public void testMergeAggregatedNullValues() { + assertEquals( + (aggregator.getIdentityMetricValue() + aggregator.getIdentityMetricValue()), + (long) aggregator.mergeAggregatedValues(null, null) + ); + } + + public void testGetInitialAggregatedValue() { + long randomLong = randomLong(); + assertEquals(randomLong, (long) aggregator.getInitialAggregatedValue(randomLong)); + } + + public void testToStarTreeNumericTypeValue() { + long randomLong = randomLong(); + assertEquals(randomLong, (long) aggregator.toAggregatedValueType(randomLong)); + } + + public void testIdentityMetricValue() { + assertEquals(1L, (long) aggregator.getIdentityMetricValue()); + } + + @Override + public ValueAggregator getValueAggregator(StarTreeNumericType starTreeNumericType) { + aggregator = new DocCountAggregator(); + return aggregator; + } + + @Override + public void testGetInitialAggregatedValueForSegmentDocValue() { + long randomLong = randomLong(); + assertEquals(randomLong, (long) aggregator.getInitialAggregatedValueForSegmentDocValue(randomLong)); + } +} diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java index 389b6cb34f085..e77f184ac0243 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java @@ -55,6 +55,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -94,7 +95,8 @@ public void setup() throws IOException { new Metric("field4", List.of(MetricStat.SUM)), new Metric("field6", List.of(MetricStat.VALUE_COUNT)), new Metric("field9", List.of(MetricStat.MIN)), - new Metric("field10", List.of(MetricStat.MAX)) + new Metric("field10", List.of(MetricStat.MAX)), + new Metric("_doc_count", List.of(MetricStat.DOC_COUNT)) ); DocValuesProducer docValuesProducer = mock(DocValuesProducer.class); @@ -187,11 +189,26 @@ public void test_sortAndAggregateStarTreeDocuments() throws IOException { int noOfStarTreeDocuments = 5; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble(), 8.0, 20.0 }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, 12.0, randomDouble(), 6.0, 24.0 }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, 16.0, randomDouble(), 8.0, 13.0 }); + starTreeDocuments[0] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 12.0, 10.0, randomDouble(), 8.0, 20.0, 10L } + ); + starTreeDocuments[1] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 10.0, 6.0, randomDouble(), 12.0, 10.0, 10L } + ); + starTreeDocuments[2] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 14.0, 12.0, randomDouble(), 6.0, 24.0, 10L } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 9.0, 4.0, randomDouble(), 9.0, 12.0, null } + ); + starTreeDocuments[4] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 11.0, 16.0, randomDouble(), 8.0, 13.0, null } + ); StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; for (int i = 0; i < noOfStarTreeDocuments; i++) { @@ -200,14 +217,15 @@ public void test_sortAndAggregateStarTreeDocuments() throws IOException { long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); long metric4 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[3]); long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); + Long metric6 = (Long) starTreeDocuments[i].metrics[5]; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, metric6 } ); } List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0, 11L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0, 21L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -233,6 +251,7 @@ public void test_sortAndAggregateStarTreeDocuments() throws IOException { assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); numOfAggregatedDocuments++; } @@ -280,15 +299,15 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetric() throws IOExcepti int noOfStarTreeDocuments = 5; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble(), 8.0, 20.0 }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, 12.0, randomDouble(), 6.0, 24.0 }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, null, randomDouble(), 8.0, 13.0 }); + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 12.0, 10.0, randomDouble(), 8.0, 20.0 }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 14.0, 12.0, randomDouble(), 6.0, 24.0 }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 11.0, null, randomDouble(), 8.0, 13.0 }); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 18.0, 3L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 18.0, 3L, 6.0, 24.0, 3L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -303,7 +322,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetric() throws IOExcepti long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Object[] { metric1, metric2, metric3, metric4, metric5 } + new Object[] { metric1, metric2, metric3, metric4, metric5, null } ); } SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); @@ -326,6 +345,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetric() throws IOExcepti assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); } } @@ -334,15 +354,30 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetricField() throws IOEx int noOfStarTreeDocuments = 5; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; // Setting second metric iterator as empty sorted numeric , indicating a metric field is null - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, null, randomDouble(), 8.0, 20.0 }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, null, randomDouble(), 12.0, 10.0 }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, null, randomDouble(), 6.0, 24.0 }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, null, randomDouble(), 9.0, 12.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, null, randomDouble(), 8.0, 13.0 }); + starTreeDocuments[0] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 12.0, null, randomDouble(), 8.0, 20.0, null } + ); + starTreeDocuments[1] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 10.0, null, randomDouble(), 12.0, 10.0, null } + ); + starTreeDocuments[2] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 14.0, null, randomDouble(), 6.0, 24.0, null } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 9.0, null, randomDouble(), 9.0, 12.0, 10L } + ); + starTreeDocuments[4] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 11.0, null, randomDouble(), 8.0, 13.0, null } + ); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 0.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 0.0, 3L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 0.0, 2L, 8.0, 20.0, 11L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 0.0, 3L, 6.0, 24.0, 3L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -355,9 +390,10 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetricField() throws IOEx long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); long metric4 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[3]); long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); + Long metric6 = starTreeDocuments[i].metrics[5] != null ? (Long) starTreeDocuments[i].metrics[5] : null; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Object[] { metric1, metric2, metric3, metric4, metric5 } + new Object[] { metric1, metric2, metric3, metric4, metric5, metric6 } ); } SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); @@ -380,6 +416,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetricField() throws IOEx assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); } } @@ -390,18 +427,18 @@ public void test_sortAndAggregateStarTreeDocuments_nullAndMinusOneInDimensionFie // Setting second metric iterator as empty sorted numeric , indicating a metric field is null starTreeDocuments[0] = new StarTreeDocument( new Long[] { 2L, null, 3L, 4L }, - new Double[] { 12.0, null, randomDouble(), 8.0, 20.0 } + new Object[] { 12.0, null, randomDouble(), 8.0, 20.0 } ); starTreeDocuments[1] = new StarTreeDocument( new Long[] { null, 4L, 2L, 1L }, - new Double[] { 10.0, null, randomDouble(), 12.0, 10.0 } + new Object[] { 10.0, null, randomDouble(), 12.0, 10.0 } ); starTreeDocuments[2] = new StarTreeDocument( new Long[] { null, 4L, 2L, 1L }, - new Double[] { 14.0, null, randomDouble(), 6.0, 24.0 } + new Object[] { 14.0, null, randomDouble(), 6.0, 24.0 } ); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, null, 3L, 4L }, new Double[] { 9.0, null, randomDouble(), 9.0, 12.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { -1L, 4L, 2L, 1L }, new Double[] { 11.0, null, randomDouble(), 8.0, 13.0 }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, null, 3L, 4L }, new Object[] { 9.0, null, randomDouble(), 9.0, 12.0 }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { -1L, 4L, 2L, 1L }, new Object[] { 11.0, null, randomDouble(), 8.0, 13.0 }); List inorderStarTreeDocuments = List.of( new StarTreeDocument(new Long[] { 2L, null, 3L, 4L }, new Object[] { 21.0, 0.0, 2L }), @@ -443,6 +480,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullAndMinusOneInDimensionFie assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); } builder.build(segmentStarTreeDocumentIterator); validateStarTree(builder.getRootNode(), 4, 1, builder.getStarTreeDocuments()); @@ -452,14 +490,29 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndNullMetrics( int noOfStarTreeDocuments = 5; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; // Setting second metric iterator as empty sorted numeric , indicating a metric field is null - starTreeDocuments[0] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); + starTreeDocuments[0] = new StarTreeDocument( + new Long[] { null, null, null, null }, + new Object[] { null, null, null, null, null, null } + ); + starTreeDocuments[1] = new StarTreeDocument( + new Long[] { null, null, null, null }, + new Object[] { null, null, null, null, null, null } + ); + starTreeDocuments[2] = new StarTreeDocument( + new Long[] { null, null, null, null }, + new Object[] { null, null, null, null, null, null } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { null, null, null, null }, + new Object[] { null, null, null, null, null, null } + ); + starTreeDocuments[4] = new StarTreeDocument( + new Long[] { null, null, null, null }, + new Object[] { null, null, null, null, null, null } + ); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { 0.0, 0.0, 0L, null, null }) + new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { 0.0, 0.0, 0L, null, null, 5L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -482,7 +535,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndNullMetrics( : null; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Object[] { metric1, metric2, metric3, metric4, metric5 } + new Object[] { metric1, metric2, metric3, metric4, metric5, null } ); } SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); @@ -505,6 +558,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndNullMetrics( assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); } builder.build(segmentStarTreeDocumentIterator); validateStarTree(builder.getRootNode(), 4, 1, builder.getStarTreeDocuments()); @@ -521,21 +575,21 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndFewNullMetri // Setting second metric iterator as empty sorted numeric , indicating a metric field is null starTreeDocuments[0] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { null, null, randomDouble(), null, maxValue } + new Object[] { null, null, randomDouble(), null, maxValue } ); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { null, null, null, null, null }); starTreeDocuments[2] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { null, null, null, minValue, null } + new Object[] { null, null, null, minValue, null } ); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { null, null, null, null, null }); starTreeDocuments[4] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { sumValue, null, randomDouble(), null, null } + new Object[] { sumValue, null, randomDouble(), null, null } ); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { sumValue, 0.0, 2L, minValue, maxValue }) + new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { sumValue, 0.0, 2L, minValue, maxValue, 5L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -558,7 +612,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndFewNullMetri : null; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Object[] { metric1, metric2, metric3, metric4, metric5 } + new Object[] { metric1, metric2, metric3, metric4, metric5, null } ); } SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); @@ -581,6 +635,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndFewNullMetri assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); } builder.build(segmentStarTreeDocumentIterator); validateStarTree(builder.getRootNode(), 4, 1, builder.getStarTreeDocuments()); @@ -593,27 +648,27 @@ public void test_sortAndAggregateStarTreeDocuments_emptyDimensions() throws IOEx // Setting second metric iterator as empty sorted numeric , indicating a metric field is null starTreeDocuments[0] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { 12.0, null, randomDouble(), 8.0, 20.0 } + new Object[] { 12.0, null, randomDouble(), 8.0, 20.0, 10L } ); starTreeDocuments[1] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { 10.0, null, randomDouble(), 12.0, 10.0 } + new Object[] { 10.0, null, randomDouble(), 12.0, 10.0, 10L } ); starTreeDocuments[2] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { 14.0, null, randomDouble(), 6.0, 24.0 } + new Object[] { 14.0, null, randomDouble(), 6.0, 24.0, 10L } ); starTreeDocuments[3] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { 9.0, null, randomDouble(), 9.0, 12.0 } + new Object[] { 9.0, null, randomDouble(), 9.0, 12.0, 10L } ); starTreeDocuments[4] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { 11.0, null, randomDouble(), 8.0, 13.0 } + new Object[] { 11.0, null, randomDouble(), 8.0, 13.0, 10L } ); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { 56.0, 0.0, 5L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { 56.0, 0.0, 5L, 6.0, 24.0, 50L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -626,9 +681,10 @@ public void test_sortAndAggregateStarTreeDocuments_emptyDimensions() throws IOEx Long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); Long metric4 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[3]); Long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); + Long metric6 = (Long) starTreeDocuments[i].metrics[5]; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Object[] { metric1, metric2, metric3, metric4, metric5 } + new Object[] { metric1, metric2, metric3, metric4, metric5, metric6 } ); } SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); @@ -651,6 +707,7 @@ public void test_sortAndAggregateStarTreeDocuments_emptyDimensions() throws IOEx assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); } } @@ -661,28 +718,28 @@ public void test_sortAndAggregateStarTreeDocument_longMaxAndLongMinDimensions() starTreeDocuments[0] = new StarTreeDocument( new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, - new Double[] { 12.0, 10.0, randomDouble(), 8.0, 20.0 } + new Object[] { 12.0, 10.0, randomDouble(), 8.0, 20.0 } ); starTreeDocuments[1] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, - new Double[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 } + new Object[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 } ); starTreeDocuments[2] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, - new Double[] { 14.0, 12.0, randomDouble(), 6.0, 24.0 } + new Object[] { 14.0, 12.0, randomDouble(), 6.0, 24.0 } ); starTreeDocuments[3] = new StarTreeDocument( new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, - new Double[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 } + new Object[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 } ); starTreeDocuments[4] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, - new Double[] { 11.0, 16.0, randomDouble(), 8.0, 13.0 } + new Object[] { 11.0, 16.0, randomDouble(), 8.0, 13.0 } ); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0, 3L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -695,7 +752,7 @@ public void test_sortAndAggregateStarTreeDocument_longMaxAndLongMinDimensions() long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, null } ); } @@ -720,6 +777,7 @@ public void test_sortAndAggregateStarTreeDocument_longMaxAndLongMinDimensions() assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); numOfAggregatedDocuments++; } @@ -735,19 +793,28 @@ public void test_sortAndAggregateStarTreeDocument_DoubleMaxAndDoubleMinMetrics() starTreeDocuments[0] = new StarTreeDocument( new Long[] { 2L, 4L, 3L, 4L }, - new Double[] { Double.MAX_VALUE, 10.0, randomDouble(), 8.0, 20.0 } + new Object[] { Double.MAX_VALUE, 10.0, randomDouble(), 8.0, 20.0, 100L } + ); + starTreeDocuments[1] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 10.0, 6.0, randomDouble(), 12.0, 10.0, 100L } ); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 }); starTreeDocuments[2] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, 1L }, - new Double[] { 14.0, Double.MIN_VALUE, randomDouble(), 6.0, 24.0 } + new Object[] { 14.0, Double.MIN_VALUE, randomDouble(), 6.0, 24.0, 100L } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 9.0, 4.0, randomDouble(), 9.0, 12.0, 100L } + ); + starTreeDocuments[4] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 11.0, 16.0, randomDouble(), 8.0, 13.0, 100L } ); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, 16.0, randomDouble(), 8.0, 13.0 }); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { Double.MAX_VALUE + 9, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, Double.MIN_VALUE + 22, 3L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { Double.MAX_VALUE + 9, 14.0, 2L, 8.0, 20.0, 200L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, Double.MIN_VALUE + 22, 3L, 6.0, 24.0, 300L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -758,9 +825,10 @@ public void test_sortAndAggregateStarTreeDocument_DoubleMaxAndDoubleMinMetrics() long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); long metric4 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[3]); long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); + Long metric6 = (Long) starTreeDocuments[i].metrics[5]; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, metric6 } ); } @@ -892,7 +960,7 @@ public void test_build_halfFloatMetrics() throws IOException { ); segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, null } ); } @@ -943,20 +1011,23 @@ public void test_build_floatMetrics() throws IOException { starTreeDocuments[0] = new StarTreeDocument( new Long[] { 2L, 4L, 3L, 4L }, - new Float[] { 12.0F, 10.0F, randomFloat(), 8.0F, 20.0F } + new Object[] { 12.0F, 10.0F, randomFloat(), 8.0F, 20.0F, null } ); starTreeDocuments[1] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, 1L }, - new Float[] { 10.0F, 6.0F, randomFloat(), 12.0F, 10.0F } + new Object[] { 10.0F, 6.0F, randomFloat(), 12.0F, 10.0F, null } ); starTreeDocuments[2] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, 1L }, - new Float[] { 14.0F, 12.0F, randomFloat(), 6.0F, 24.0F } + new Object[] { 14.0F, 12.0F, randomFloat(), 6.0F, 24.0F, null } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 9.0F, 4.0F, randomFloat(), 9.0F, 12.0F, null } ); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Float[] { 9.0F, 4.0F, randomFloat(), 9.0F, 12.0F }); starTreeDocuments[4] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, 1L }, - new Float[] { 11.0F, 16.0F, randomFloat(), 8.0F, 13.0F } + new Object[] { 11.0F, 16.0F, randomFloat(), 8.0F, 13.0F, null } ); StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; @@ -966,9 +1037,10 @@ public void test_build_floatMetrics() throws IOException { long metric3 = NumericUtils.floatToSortableInt((Float) starTreeDocuments[i].metrics[2]); long metric4 = NumericUtils.floatToSortableInt((Float) starTreeDocuments[i].metrics[3]); long metric5 = NumericUtils.floatToSortableInt((Float) starTreeDocuments[i].metrics[4]); + Long metric6 = (Long) starTreeDocuments[i].metrics[5]; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, metric6 } ); } @@ -1031,7 +1103,7 @@ public void test_build_longMetrics() throws IOException { long metric5 = (Long) starTreeDocuments[i].metrics[4]; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, null } ); } @@ -1053,13 +1125,13 @@ public void test_build_longMetrics() throws IOException { private static Iterator getExpectedStarTreeDocumentIterator() { List expectedStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0 }), - new StarTreeDocument(new Long[] { null, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0 }), - new StarTreeDocument(new Long[] { null, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { null, 4L, null, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0 }), - new StarTreeDocument(new Long[] { null, 4L, null, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { null, 4L, null, null }, new Object[] { 56.0, 48.0, 5L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0, 3L }), + new StarTreeDocument(new Long[] { null, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0, 3L }), + new StarTreeDocument(new Long[] { null, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0, 2L }), + new StarTreeDocument(new Long[] { null, 4L, null, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0, 3L }), + new StarTreeDocument(new Long[] { null, 4L, null, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0, 2L }), + new StarTreeDocument(new Long[] { null, 4L, null, null }, new Object[] { 56.0, 48.0, 5L, 6.0, 24.0, 5L }) ); return expectedStarTreeDocuments.iterator(); } @@ -1069,11 +1141,26 @@ public void test_build() throws IOException { int noOfStarTreeDocuments = 5; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble(), 8.0, 20.0 }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, 12.0, randomDouble(), 6.0, 24.0 }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, 16.0, randomDouble(), 8.0, 13.0 }); + starTreeDocuments[0] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 12.0, 10.0, randomDouble(), 8.0, 20.0, 1L } + ); + starTreeDocuments[1] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 10.0, 6.0, randomDouble(), 12.0, 10.0, null } + ); + starTreeDocuments[2] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 14.0, 12.0, randomDouble(), 6.0, 24.0, null } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 9.0, 4.0, randomDouble(), 9.0, 12.0, null } + ); + starTreeDocuments[4] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 11.0, 16.0, randomDouble(), 8.0, 13.0, null } + ); StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; for (int i = 0; i < noOfStarTreeDocuments; i++) { @@ -1082,9 +1169,10 @@ public void test_build() throws IOException { long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); long metric4 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[3]); long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); + Long metric6 = (Long) starTreeDocuments[i].metrics[5]; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, metric6 } ); } @@ -1130,7 +1218,7 @@ public void test_build_starTreeDataset() throws IOException { fields = List.of("fieldC", "fieldB", "fieldL", "fieldI"); dimensionsOrder = List.of(new NumericDimension("fieldC"), new NumericDimension("fieldB"), new NumericDimension("fieldL")); - metrics = List.of(new Metric("fieldI", List.of(MetricStat.SUM))); + metrics = List.of(new Metric("fieldI", List.of(MetricStat.SUM)), new Metric("_doc_count", List.of(MetricStat.DOC_COUNT))); DocValuesProducer docValuesProducer = mock(DocValuesProducer.class); @@ -1199,18 +1287,18 @@ public void test_build_starTreeDataset() throws IOException { int noOfStarTreeDocuments = 7; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 1L, 11L, 21L }, new Double[] { 400.0 }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 1L, 12L, 22L }, new Double[] { 200.0 }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 2L, 13L, 23L }, new Double[] { 300.0 }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 13L, 21L }, new Double[] { 100.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 11L, 21L }, new Double[] { 600.0 }); - starTreeDocuments[5] = new StarTreeDocument(new Long[] { 3L, 12L, 23L }, new Double[] { 200.0 }); - starTreeDocuments[6] = new StarTreeDocument(new Long[] { 3L, 12L, 21L }, new Double[] { 400.0 }); + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 1L, 11L, 21L }, new Object[] { 400.0, null }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 1L, 12L, 22L }, new Object[] { 200.0, null }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 2L, 13L, 23L }, new Object[] { 300.0, null }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 13L, 21L }, new Object[] { 100.0, null }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 11L, 21L }, new Object[] { 600.0, null }); + starTreeDocuments[5] = new StarTreeDocument(new Long[] { 3L, 12L, 23L }, new Object[] { 200.0, null }); + starTreeDocuments[6] = new StarTreeDocument(new Long[] { 3L, 12L, 21L }, new Object[] { 400.0, null }); StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; for (int i = 0; i < noOfStarTreeDocuments; i++) { long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); - segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1 }); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, null }); } SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); @@ -1250,6 +1338,7 @@ public void test_build_starTreeDataset() throws IOException { assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); } validateStarTree(builder.getRootNode(), 3, 1, builder.getStarTreeDocuments()); } @@ -1278,33 +1367,33 @@ private static Map> getExpectedDimToValueMap() { private Iterator expectedStarTreeDocuments() { List expectedStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 1L, 11L, 21L }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 1L, 12L, 22L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { 2L, 13L, 21L }, new Object[] { 100.0 }), - new StarTreeDocument(new Long[] { 2L, 13L, 23L }, new Object[] { 300.0 }), - new StarTreeDocument(new Long[] { 3L, 11L, 21L }, new Object[] { 600.0 }), - new StarTreeDocument(new Long[] { 3L, 12L, 21L }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 3L, 12L, 23L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { null, 11L, 21L }, new Object[] { 1000.0 }), - new StarTreeDocument(new Long[] { null, 12L, 21L }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { null, 12L, 22L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { null, 12L, 23L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { null, 13L, 21L }, new Object[] { 100.0 }), - new StarTreeDocument(new Long[] { null, 13L, 23L }, new Object[] { 300.0 }), - new StarTreeDocument(new Long[] { null, null, 21L }, new Object[] { 1500.0 }), - new StarTreeDocument(new Long[] { null, null, 22L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { null, null, 23L }, new Object[] { 500.0 }), - new StarTreeDocument(new Long[] { null, null, null }, new Object[] { 2200.0 }), - new StarTreeDocument(new Long[] { null, 12L, null }, new Object[] { 800.0 }), - new StarTreeDocument(new Long[] { null, 13L, null }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 1L, null, 21L }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 1L, null, 22L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { 1L, null, null }, new Object[] { 600.0 }), - new StarTreeDocument(new Long[] { 2L, 13L, null }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 3L, null, 21L }, new Object[] { 1000.0 }), - new StarTreeDocument(new Long[] { 3L, null, 23L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { 3L, null, null }, new Object[] { 1200.0 }), - new StarTreeDocument(new Long[] { 3L, 12L, null }, new Object[] { 600.0 }) + new StarTreeDocument(new Long[] { 1L, 11L, 21L }, new Object[] { 400.0, 1L }), + new StarTreeDocument(new Long[] { 1L, 12L, 22L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { 2L, 13L, 21L }, new Object[] { 100.0, 1L }), + new StarTreeDocument(new Long[] { 2L, 13L, 23L }, new Object[] { 300.0, 1L }), + new StarTreeDocument(new Long[] { 3L, 11L, 21L }, new Object[] { 600.0, 1L }), + new StarTreeDocument(new Long[] { 3L, 12L, 21L }, new Object[] { 400.0, 1L }), + new StarTreeDocument(new Long[] { 3L, 12L, 23L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { null, 11L, 21L }, new Object[] { 1000.0, 2L }), + new StarTreeDocument(new Long[] { null, 12L, 21L }, new Object[] { 400.0, 1L }), + new StarTreeDocument(new Long[] { null, 12L, 22L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { null, 12L, 23L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { null, 13L, 21L }, new Object[] { 100.0, 1L }), + new StarTreeDocument(new Long[] { null, 13L, 23L }, new Object[] { 300.0, 1L }), + new StarTreeDocument(new Long[] { null, null, 21L }, new Object[] { 1500.0, 4L }), + new StarTreeDocument(new Long[] { null, null, 22L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { null, null, 23L }, new Object[] { 500.0, 2L }), + new StarTreeDocument(new Long[] { null, null, null }, new Object[] { 2200.0, 7L }), + new StarTreeDocument(new Long[] { null, 12L, null }, new Object[] { 800.0, 3L }), + new StarTreeDocument(new Long[] { null, 13L, null }, new Object[] { 400.0, 2L }), + new StarTreeDocument(new Long[] { 1L, null, 21L }, new Object[] { 400.0, 1L }), + new StarTreeDocument(new Long[] { 1L, null, 22L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { 1L, null, null }, new Object[] { 600.0, 2L }), + new StarTreeDocument(new Long[] { 2L, 13L, null }, new Object[] { 400.0, 2L }), + new StarTreeDocument(new Long[] { 3L, null, 21L }, new Object[] { 1000.0, 2L }), + new StarTreeDocument(new Long[] { 3L, null, 23L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { 3L, null, null }, new Object[] { 1200.0, 3L }), + new StarTreeDocument(new Long[] { 3L, 12L, null }, new Object[] { 600.0, 2L }) ); return expectedStarTreeDocuments.iterator(); @@ -2209,8 +2298,14 @@ public void testMergeFlowWithDuplicateDimensionValues() throws IOException { metricsList.add(getLongFromDouble(i * 10.0)); metricsWithField.add(i); } + List docCountMetricsList = new ArrayList<>(100); + List docCountMetricsWithField = new ArrayList<>(100); + for (int i = 0; i < 500; i++) { + docCountMetricsList.add(i * 10L); + docCountMetricsWithField.add(i); + } - StarTreeField sf = getStarTreeField(1); + StarTreeField sf = getStarTreeFieldWithDocCount(1, true); StarTreeValues starTreeValues = getStarTreeValues( dimList1, docsWithField1, @@ -2222,6 +2317,8 @@ public void testMergeFlowWithDuplicateDimensionValues() throws IOException { docsWithField4, metricsList, metricsWithField, + docCountMetricsList, + docCountMetricsWithField, sf ); @@ -2236,6 +2333,8 @@ public void testMergeFlowWithDuplicateDimensionValues() throws IOException { docsWithField4, metricsList, metricsWithField, + docCountMetricsList, + docCountMetricsWithField, sf ); builder = getStarTreeBuilder(sf, writeState, mapperService); @@ -2246,23 +2345,26 @@ public void testMergeFlowWithDuplicateDimensionValues() throws IOException { double sum = 0; /** 401 docs get generated - [0, 0, 0, 0] | [200.0] - [1, 1, 1, 1] | [700.0] - [2, 2, 2, 2] | [1200.0] - [3, 3, 3, 3] | [1700.0] - [4, 4, 4, 4] | [2200.0] + [0, 0, 0, 0] | [200.0, 10] + [1, 1, 1, 1] | [700.0, 10] + [2, 2, 2, 2] | [1200.0, 10] + [3, 3, 3, 3] | [1700.0, 10] + [4, 4, 4, 4] | [2200.0, 10] ..... - [null, null, null, 99] | [49700.0] - [null, null, null, null] | [2495000.0] + [null, null, null, 99] | [49700.0, 10] + [null, null, null, null] | [2495000.0, 1000] */ for (StarTreeDocument starTreeDocument : starTreeDocuments) { if (starTreeDocument.dimensions[3] == null) { assertEquals(sum, starTreeDocument.metrics[0]); + assertEquals(2495000L, (long) starTreeDocument.metrics[1]); } else { if (starTreeDocument.dimensions[0] != null) { sum += (double) starTreeDocument.metrics[0]; } assertEquals(starTreeDocument.dimensions[3] * 500 + 200.0, starTreeDocument.metrics[0]); + assertEquals(starTreeDocument.dimensions[3] * 500 + 200L, (long) starTreeDocument.metrics[1]); + } count++; } @@ -2319,7 +2421,14 @@ public void testMergeFlowWithMaxLeafDocs() throws IOException { metricsWithField.add(i); } - StarTreeField sf = getStarTreeField(3); + List metricsList1 = new ArrayList<>(100); + List metricsWithField1 = new ArrayList<>(100); + for (int i = 0; i < 500; i++) { + metricsList1.add(1L); + metricsWithField1.add(i); + } + + StarTreeField sf = getStarTreeFieldWithDocCount(3, true); StarTreeValues starTreeValues = getStarTreeValues( dimList1, docsWithField1, @@ -2331,6 +2440,8 @@ public void testMergeFlowWithMaxLeafDocs() throws IOException { docsWithField4, metricsList, metricsWithField, + metricsList1, + metricsWithField1, sf ); @@ -2345,6 +2456,8 @@ public void testMergeFlowWithMaxLeafDocs() throws IOException { docsWithField4, metricsList, metricsWithField, + metricsList1, + metricsWithField1, sf ); @@ -2353,17 +2466,58 @@ public void testMergeFlowWithMaxLeafDocs() throws IOException { List starTreeDocuments = builder.getStarTreeDocuments(); /** 635 docs get generated - [0, 0, 0, 0] | [200.0] - [1, 1, 1, 1] | [700.0] - [2, 2, 2, 2] | [1200.0] - [3, 3, 3, 3] | [1700.0] - [4, 4, 4, 4] | [2200.0] + [0, 0, 0, 0] | [200.0, 10] + [0, 0, 1, 1] | [700.0, 10] + [0, 0, 2, 2] | [1200.0, 10] + [0, 0, 3, 3] | [1700.0, 10] + [1, 0, 4, 4] | [2200.0, 10] + [1, 0, 5, 5] | [2700.0, 10] + [1, 0, 6, 6] | [3200.0, 10] + [1, 0, 7, 7] | [3700.0, 10] + [2, 0, 8, 8] | [4200.0, 10] + [2, 0, 9, 9] | [4700.0, 10] + [2, 1, 10, 10] | [5200.0, 10] + [2, 1, 11, 11] | [5700.0, 10] ..... - [null, null, null, 99] | [49700.0] + [18, 7, null, null] | [147800.0, 40] + ... + [7, 2, null, null] | [28900.0, 20] + ... + [null, null, null, 99] | [49700.0, 10] ..... - [null, null, null, null] | [2495000.0] + [null, null, null, null] | [2495000.0, 1000] */ assertEquals(635, starTreeDocuments.size()); + for (StarTreeDocument starTreeDocument : starTreeDocuments) { + if (starTreeDocument.dimensions[0] != null + && starTreeDocument.dimensions[1] != null + && starTreeDocument.dimensions[2] != null + && starTreeDocument.dimensions[3] != null) { + assertEquals(10L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[1] != null + && starTreeDocument.dimensions[2] != null + && starTreeDocument.dimensions[3] != null) { + assertEquals(10L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[0] != null + && starTreeDocument.dimensions[2] != null + && starTreeDocument.dimensions[3] != null) { + assertEquals(10L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[0] != null + && starTreeDocument.dimensions[1] != null + && starTreeDocument.dimensions[3] != null) { + assertEquals(10L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[0] != null && starTreeDocument.dimensions[3] != null) { + assertEquals(10L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[0] != null && starTreeDocument.dimensions[1] != null) { + assertTrue((long) starTreeDocument.metrics[1] == 20L || (long) starTreeDocument.metrics[1] == 40L); + } else if (starTreeDocument.dimensions[1] != null && starTreeDocument.dimensions[3] != null) { + assertEquals(10L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[1] != null) { + assertEquals(100L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[0] != null) { + assertEquals(40L, starTreeDocument.metrics[1]); + } + } validateStarTree(builder.getRootNode(), 4, sf.getStarTreeConfig().maxLeafDocs(), builder.getStarTreeDocuments()); } @@ -2378,6 +2532,8 @@ private StarTreeValues getStarTreeValues( List docsWithField4, List metricsList, List metricsWithField, + List metricsList1, + List metricsWithField1, StarTreeField sf ) { SortedNumericDocValues d1sndv = getSortedNumericMock(dimList1, docsWithField1); @@ -2385,8 +2541,11 @@ private StarTreeValues getStarTreeValues( SortedNumericDocValues d3sndv = getSortedNumericMock(dimList3, docsWithField3); SortedNumericDocValues d4sndv = getSortedNumericMock(dimList4, docsWithField4); SortedNumericDocValues m1sndv = getSortedNumericMock(metricsList, metricsWithField); + SortedNumericDocValues m2sndv = getSortedNumericMock(metricsList1, metricsWithField1); Map dimDocIdSetIterators = Map.of("field1", d1sndv, "field3", d2sndv, "field5", d3sndv, "field8", d4sndv); - Map metricDocIdSetIterators = Map.of("field2", m1sndv); + Map metricDocIdSetIterators = new LinkedHashMap<>(); + metricDocIdSetIterators.put("field2", m1sndv); + metricDocIdSetIterators.put("_doc_count", m2sndv); StarTreeValues starTreeValues = new StarTreeValues(sf, null, dimDocIdSetIterators, metricDocIdSetIterators, getAttributes(500)); return starTreeValues; } @@ -2438,7 +2597,14 @@ public void testMergeFlowWithDuplicateDimensionValueWithMaxLeafDocs() throws IOE metricsWithField.add(i); } - StarTreeField sf = getStarTreeField(3); + List docCountMetricsList = new ArrayList<>(100); + List docCountMetricsWithField = new ArrayList<>(100); + for (int i = 0; i < 500; i++) { + metricsList.add(getLongFromDouble(i * 2)); + metricsWithField.add(i); + } + + StarTreeField sf = getStarTreeFieldWithDocCount(3, true); StarTreeValues starTreeValues = getStarTreeValues( dimList1, docsWithField1, @@ -2450,6 +2616,8 @@ public void testMergeFlowWithDuplicateDimensionValueWithMaxLeafDocs() throws IOE docsWithField4, metricsList, metricsWithField, + docCountMetricsList, + docCountMetricsWithField, sf ); @@ -2464,6 +2632,8 @@ public void testMergeFlowWithDuplicateDimensionValueWithMaxLeafDocs() throws IOE docsWithField4, metricsList, metricsWithField, + docCountMetricsList, + docCountMetricsWithField, sf ); builder = getStarTreeBuilder(sf, writeState, mapperService); @@ -2536,8 +2706,13 @@ public void testMergeFlowWithMaxLeafDocsAndStarTreeNodesAssertion() throws IOExc metricsList.add(getLongFromDouble(10.0)); metricsWithField.add(i); } - - StarTreeField sf = getStarTreeField(10); + List metricsList1 = new ArrayList<>(100); + List metricsWithField1 = new ArrayList<>(100); + for (int i = 0; i < 500; i++) { + metricsList.add(1L); + metricsWithField.add(i); + } + StarTreeField sf = getStarTreeFieldWithDocCount(10, true); StarTreeValues starTreeValues = getStarTreeValues( dimList1, docsWithField1, @@ -2549,6 +2724,8 @@ public void testMergeFlowWithMaxLeafDocsAndStarTreeNodesAssertion() throws IOExc docsWithField4, metricsList, metricsWithField, + metricsList1, + metricsWithField1, sf ); @@ -2563,6 +2740,8 @@ public void testMergeFlowWithMaxLeafDocsAndStarTreeNodesAssertion() throws IOExc docsWithField4, metricsList, metricsWithField, + metricsList1, + metricsWithField1, sf ); builder = getStarTreeBuilder(sf, writeState, mapperService); @@ -2584,14 +2763,18 @@ public void testMergeFlowWithMaxLeafDocsAndStarTreeNodesAssertion() throws IOExc validateStarTree(builder.getRootNode(), 4, sf.getStarTreeConfig().maxLeafDocs(), builder.getStarTreeDocuments()); } - private static StarTreeField getStarTreeField(int maxLeafDocs) { + private static StarTreeField getStarTreeFieldWithDocCount(int maxLeafDocs, boolean includeDocCountMetric) { Dimension d1 = new NumericDimension("field1"); Dimension d2 = new NumericDimension("field3"); Dimension d3 = new NumericDimension("field5"); Dimension d4 = new NumericDimension("field8"); List dims = List.of(d1, d2, d3, d4); Metric m1 = new Metric("field2", List.of(MetricStat.SUM)); - List metrics = List.of(m1); + Metric m2 = null; + if (includeDocCountMetric) { + m2 = new Metric("_doc_count", List.of(MetricStat.DOC_COUNT)); + } + List metrics = m2 == null ? List.of(m1) : List.of(m1, m2); StarTreeFieldConfiguration c = new StarTreeFieldConfiguration( maxLeafDocs, new HashSet<>(), @@ -2684,8 +2867,9 @@ public void testMergeFlow() throws IOException { Dimension d4 = new NumericDimension("field8"); // Dimension d5 = new NumericDimension("field5"); Metric m1 = new Metric("field2", List.of(MetricStat.SUM)); + Metric m2 = new Metric("_doc_count", List.of(MetricStat.DOC_COUNT)); List dims = List.of(d1, d2, d3, d4); - List metrics = List.of(m1); + List metrics = List.of(m1, m2); StarTreeFieldConfiguration c = new StarTreeFieldConfiguration( 1, new HashSet<>(), @@ -2697,8 +2881,9 @@ public void testMergeFlow() throws IOException { SortedNumericDocValues d3sndv = getSortedNumericMock(dimList3, docsWithField3); SortedNumericDocValues d4sndv = getSortedNumericMock(dimList4, docsWithField4); SortedNumericDocValues m1sndv = getSortedNumericMock(metricsList, metricsWithField); + SortedNumericDocValues m2sndv = DocValues.emptySortedNumeric(); Map dimDocIdSetIterators = Map.of("field1", d1sndv, "field3", d2sndv, "field5", d3sndv, "field8", d4sndv); - Map metricDocIdSetIterators = Map.of("field2", m1sndv); + Map metricDocIdSetIterators = Map.of("field2", m1sndv, "_doc_count", m2sndv); StarTreeValues starTreeValues = new StarTreeValues(sf, null, dimDocIdSetIterators, metricDocIdSetIterators, getAttributes(1000)); SortedNumericDocValues f2d1sndv = getSortedNumericMock(dimList1, docsWithField1); @@ -2706,6 +2891,7 @@ public void testMergeFlow() throws IOException { SortedNumericDocValues f2d3sndv = getSortedNumericMock(dimList3, docsWithField3); SortedNumericDocValues f2d4sndv = getSortedNumericMock(dimList4, docsWithField4); SortedNumericDocValues f2m1sndv = getSortedNumericMock(metricsList, metricsWithField); + SortedNumericDocValues f2m2sndv = DocValues.emptySortedNumeric(); Map f2dimDocIdSetIterators = Map.of( "field1", f2d1sndv, @@ -2716,7 +2902,7 @@ public void testMergeFlow() throws IOException { "field8", f2d4sndv ); - Map f2metricDocIdSetIterators = Map.of("field2", f2m1sndv); + Map f2metricDocIdSetIterators = Map.of("field2", f2m1sndv, "_doc_count", f2m2sndv); StarTreeValues starTreeValues2 = new StarTreeValues( sf, null, @@ -2728,17 +2914,18 @@ public void testMergeFlow() throws IOException { builder = getStarTreeBuilder(sf, writeState, mapperService); Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); /** - [0, 0, 0, 0] | [0.0] - [1, 1, 1, 1] | [20.0] - [2, 2, 2, 2] | [40.0] - [3, 3, 3, 3] | [60.0] - [4, 4, 4, 4] | [80.0] - [5, 5, 5, 5] | [100.0] + [0, 0, 0, 0] | [0.0, 2] + [1, 1, 1, 1] | [20.0, 2] + [2, 2, 2, 2] | [40.0, 2] + [3, 3, 3, 3] | [60.0, 2] + [4, 4, 4, 4] | [80.0, 2] + [5, 5, 5, 5] | [100.0, 2] ... [999, 999, 999, 999] | [19980.0] */ for (StarTreeDocument starTreeDocument : builder.getStarTreeDocuments()) { assertEquals(starTreeDocument.dimensions[0] * 20.0, starTreeDocument.metrics[0]); + assertEquals(2L, starTreeDocument.metrics[1]); } builder.build(starTreeDocumentIterator); @@ -2934,13 +3121,6 @@ private static StarTreeField getStarTreeField(MetricStat count) { return new StarTreeField("sf", dims, metrics, c); } - private Long getLongFromDouble(Double num) { - if (num == null) { - return null; - } - return NumericUtils.doubleToSortableLong(num); - } - SortedNumericDocValues getSortedNumericMock(List dimList, List docsWithField) { return new SortedNumericDocValues() { int index = -1; diff --git a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java index 6b3b87da89915..449b251dddca1 100644 --- a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java @@ -56,6 +56,7 @@ public void testValidStarTree() throws IOException { Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); for (CompositeMappedFieldType type : compositeFieldTypes) { StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) type; + assertEquals(2, starTreeFieldType.getDimensions().size()); assertEquals("@timestamp", starTreeFieldType.getDimensions().get(0).getField()); assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); @@ -65,6 +66,7 @@ public void testValidStarTree() throws IOException { ); assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals(2, starTreeFieldType.getMetrics().size()); assertEquals("size", starTreeFieldType.getMetrics().get(0).getField()); // Assert COUNT and SUM gets added when AVG is defined @@ -126,6 +128,11 @@ public void testMetricsWithCountAndSum() throws IOException { // Assert AVG gets added when both of its base metrics is already present List expectedMetrics = List.of(MetricStat.SUM, MetricStat.VALUE_COUNT, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + + Metric metric = starTreeFieldType.getMetrics().get(1); + assertEquals("_doc_count", metric.getField()); + assertEquals(List.of(MetricStat.DOC_COUNT), metric.getMetrics()); + assertEquals(100, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); assertEquals( @@ -149,9 +156,17 @@ public void testValidStarTreeDefaults() throws IOException { ); assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals(3, starTreeFieldType.getMetrics().size()); assertEquals("status", starTreeFieldType.getMetrics().get(0).getField()); List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + + assertEquals("metric_field", starTreeFieldType.getMetrics().get(1).getField()); + expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(1).getMetrics()); + Metric metric = starTreeFieldType.getMetrics().get(2); + assertEquals("_doc_count", metric.getField()); + assertEquals(List.of(MetricStat.DOC_COUNT), metric.getMetrics()); assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims()); @@ -188,7 +203,7 @@ public void testNoMetrics() { public void testInvalidParam() { MapperParsingException ex = expectThrows( MapperParsingException.class, - () -> createMapperService(getInvalidMapping(false, false, false, false, true)) + () -> createMapperService(getInvalidMapping(false, false, false, false, true, false)) ); assertEquals( "Failed to parse mapping [_doc]: Star tree mapping definition has unsupported parameters: [invalid : {invalid=invalid}]", @@ -234,6 +249,14 @@ public void testInvalidMetricType() { ); } + public void testInvalidMetricTypeWithDocCount() { + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> createMapperService(getInvalidMapping(false, false, false, false, false, true)) + ); + assertEquals("Failed to parse mapping [_doc]: Invalid metric stat: _doc_count", ex.getMessage()); + } + public void testInvalidDimType() { MapperParsingException ex = expectThrows( MapperParsingException.class, @@ -701,7 +724,8 @@ private XContentBuilder getInvalidMapping( boolean invalidSkipDims, boolean invalidDimType, boolean invalidMetricType, - boolean invalidParam + boolean invalidParam, + boolean invalidDocCountMetricType ) throws IOException { return topMapping(b -> { b.startObject("composite"); @@ -738,6 +762,12 @@ private XContentBuilder getInvalidMapping( b.endObject(); b.startObject(); b.field("name", "metric_field"); + if (invalidDocCountMetricType) { + b.startArray("stats"); + b.value("_doc_count"); + b.value("avg"); + b.endArray(); + } b.endObject(); b.endArray(); b.endObject(); @@ -836,7 +866,7 @@ private XContentBuilder getInvalidMappingWithDv( private XContentBuilder getInvalidMapping(boolean singleDim, boolean invalidSkipDims, boolean invalidDimType, boolean invalidMetricType) throws IOException { - return getInvalidMapping(singleDim, invalidSkipDims, invalidDimType, invalidMetricType, false); + return getInvalidMapping(singleDim, invalidSkipDims, invalidDimType, invalidMetricType, false, false); } protected boolean supportsOrIgnoresBoost() { From 5e449764f521e42ff4d587d69b339064354b980f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 08:32:48 -0400 Subject: [PATCH 17/21] Bump org.roaringbitmap:RoaringBitmap from 1.1.0 to 1.2.1 in /server (#15423) * Bump org.roaringbitmap:RoaringBitmap from 1.1.0 to 1.2.1 in /server Bumps [org.roaringbitmap:RoaringBitmap](https://github.com/RoaringBitmap/RoaringBitmap) from 1.1.0 to 1.2.1. - [Release notes](https://github.com/RoaringBitmap/RoaringBitmap/releases) - [Commits](https://github.com/RoaringBitmap/RoaringBitmap/compare/1.1.0...1.2.1) --- updated-dependencies: - dependency-name: org.roaringbitmap:RoaringBitmap dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + server/build.gradle | 2 +- server/licenses/RoaringBitmap-1.1.0.jar.sha1 | 1 - server/licenses/RoaringBitmap-1.2.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 server/licenses/RoaringBitmap-1.1.0.jar.sha1 create mode 100644 server/licenses/RoaringBitmap-1.2.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d4ec6a635fde..a9469846b1648 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,6 +50,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `opentelemetry-semconv` from 1.26.0-alpha to 1.27.0-alpha ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) - Bump `tj-actions/changed-files` from 44 to 45 ([#15422](https://github.com/opensearch-project/OpenSearch/pull/15422)) - Bump `com.netflix.nebula.ospackage-base` from 11.9.1 to 11.10.0 ([#15419](https://github.com/opensearch-project/OpenSearch/pull/15419)) +- Bump `org.roaringbitmap:RoaringBitmap` from 1.1.0 to 1.2.1 ([#15423](https://github.com/opensearch-project/OpenSearch/pull/15423)) ### Changed - Add lower limit for primary and replica batch allocators timeout ([#14979](https://github.com/opensearch-project/OpenSearch/pull/14979)) diff --git a/server/build.gradle b/server/build.gradle index d655796674001..0cc42ad690eab 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -127,7 +127,7 @@ dependencies { api "jakarta.annotation:jakarta.annotation-api:${versions.jakarta_annotation}" // https://mvnrepository.com/artifact/org.roaringbitmap/RoaringBitmap - implementation 'org.roaringbitmap:RoaringBitmap:1.1.0' + implementation 'org.roaringbitmap:RoaringBitmap:1.2.1' testImplementation(project(":test:framework")) { // tests use the locally compiled version of server diff --git a/server/licenses/RoaringBitmap-1.1.0.jar.sha1 b/server/licenses/RoaringBitmap-1.1.0.jar.sha1 deleted file mode 100644 index bf34e11b92710..0000000000000 --- a/server/licenses/RoaringBitmap-1.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9607213861158ae7060234d93ee9c9cb19f494d1 \ No newline at end of file diff --git a/server/licenses/RoaringBitmap-1.2.1.jar.sha1 b/server/licenses/RoaringBitmap-1.2.1.jar.sha1 new file mode 100644 index 0000000000000..ef8cd48c7a388 --- /dev/null +++ b/server/licenses/RoaringBitmap-1.2.1.jar.sha1 @@ -0,0 +1 @@ +828eb489b5e8c8762f2471010e9c7f20c7de596d \ No newline at end of file From eb5035398967510165fcab4ff4664fd3e80e2cce Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 12:36:06 -0400 Subject: [PATCH 18/21] Bump dnsjava:dnsjava from 3.6.0 to 3.6.1 in /test/fixtures/hdfs-fixture (#15418) * Bump dnsjava:dnsjava from 3.6.0 to 3.6.1 in /test/fixtures/hdfs-fixture Bumps [dnsjava:dnsjava](https://github.com/dnsjava/dnsjava) from 3.6.0 to 3.6.1. - [Release notes](https://github.com/dnsjava/dnsjava/releases) - [Changelog](https://github.com/dnsjava/dnsjava/blob/master/Changelog) - [Commits](https://github.com/dnsjava/dnsjava/compare/v3.6.0...v3.6.1) --- updated-dependencies: - dependency-name: dnsjava:dnsjava dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: Daniel (dB.) Doubrovkine Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Daniel (dB.) Doubrovkine --- CHANGELOG.md | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a9469846b1648..e8117ea05f80c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `opentelemetry` from 1.40.0 to 1.41.0 ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) - Bump `opentelemetry-semconv` from 1.26.0-alpha to 1.27.0-alpha ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) - Bump `tj-actions/changed-files` from 44 to 45 ([#15422](https://github.com/opensearch-project/OpenSearch/pull/15422)) +- Bump `dnsjava:dnsjava` from 3.6.0 to 3.6.1 ([#15418](https://github.com/opensearch-project/OpenSearch/pull/15418)) - Bump `com.netflix.nebula.ospackage-base` from 11.9.1 to 11.10.0 ([#15419](https://github.com/opensearch-project/OpenSearch/pull/15419)) - Bump `org.roaringbitmap:RoaringBitmap` from 1.1.0 to 1.2.1 ([#15423](https://github.com/opensearch-project/OpenSearch/pull/15423)) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 411509dfe5acc..b5cd12ef0c11f 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -55,7 +55,7 @@ dependencies { exclude group: 'com.nimbusds' exclude module: "commons-configuration2" } - api "dnsjava:dnsjava:3.6.0" + api "dnsjava:dnsjava:3.6.1" api "org.codehaus.jettison:jettison:${versions.jettison}" api "org.apache.commons:commons-compress:${versions.commonscompress}" api "commons-codec:commons-codec:${versions.commonscodec}" From 46a7bb6d8c1bce571ee6201a29035473945c3092 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Tue, 27 Aug 2024 22:34:43 +0530 Subject: [PATCH 19/21] Add support to skip pinned timestamp in remote segment garbage collector (#15017) --------- Signed-off-by: Sachin Kale Co-authored-by: Sachin Kale --- .../opensearch/remotestore/RemoteStoreIT.java | 20 +- .../snapshots/DeleteSnapshotIT.java | 11 +- .../store/RemoteSegmentStoreDirectory.java | 101 +++++- .../RemoteStorePinnedTimestampService.java | 17 +- .../index/remote/RemoteStoreUtilsTests.java | 1 - .../BaseRemoteSegmentStoreDirectoryTests.java | 38 +-- ...toreDirectoryWithPinnedTimestampTests.java | 292 ++++++++++++++++++ .../test/OpenSearchIntegTestCase.java | 1 + 8 files changed, 430 insertions(+), 51 deletions(-) create mode 100644 server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryWithPinnedTimestampTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 194dce5f4a57a..a327b683874f6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -217,10 +217,15 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception { } else { // As delete is async its possible that the file gets created before the deletion or after // deletion. - MatcherAssert.assertThat( - actualFileCount, - is(oneOf(lastNMetadataFilesToKeep - 1, lastNMetadataFilesToKeep, lastNMetadataFilesToKeep + 1)) - ); + if (RemoteStoreSettings.isPinnedTimestampsEnabled()) { + // With pinned timestamp, we also keep md files since last successful fetch + assertTrue(actualFileCount >= lastNMetadataFilesToKeep); + } else { + MatcherAssert.assertThat( + actualFileCount, + is(oneOf(lastNMetadataFilesToKeep - 1, lastNMetadataFilesToKeep, lastNMetadataFilesToKeep + 1)) + ); + } } }, 30, TimeUnit.SECONDS); } @@ -249,7 +254,12 @@ public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); int actualFileCount = getFileCount(indexPath); // We also allow (numberOfIterations + 1) as index creation also triggers refresh. - MatcherAssert.assertThat(actualFileCount, is(oneOf(4))); + if (RemoteStoreSettings.isPinnedTimestampsEnabled()) { + // With pinned timestamp, we also keep md files since last successful fetch + assertTrue(actualFileCount >= 4); + } else { + assertEquals(4, actualFileCount); + } } public void testStaleCommitDeletionWithMinSegmentFiles_Disabled() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java index e688a4491b1a7..2331d52c3a1bc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java @@ -19,6 +19,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.store.RemoteBufferedOutputDirectory; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -287,8 +288,14 @@ public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { public void testRemoteStoreCleanupForDeletedIndex() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); final Path remoteStoreRepoPath = randomRepoPath(); - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); - internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + Settings settings = remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath); + // Disabling pinned timestamp as this test is specifically for shallow snapshot. + settings = Settings.builder() + .put(settings) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), false) + .build(); + internalCluster().startClusterManagerOnlyNode(settings); + internalCluster().startDataOnlyNode(settings); final Client clusterManagerClient = internalCluster().clusterManagerClient(); ensureStableCluster(2); diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 9ff97f12015bd..26871429e41d6 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -40,6 +40,7 @@ import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; import org.opensearch.threadpool.ThreadPool; import java.io.FileNotFoundException; @@ -91,6 +92,8 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement private final RemoteStoreLockManager mdLockManager; + private final Map metadataFilePinnedTimestampMap; + private final ThreadPool threadPool; /** @@ -132,6 +135,7 @@ public RemoteSegmentStoreDirectory( this.remoteMetadataDirectory = remoteMetadataDirectory; this.mdLockManager = mdLockManager; this.threadPool = threadPool; + this.metadataFilePinnedTimestampMap = new HashMap<>(); this.logger = Loggers.getLogger(getClass(), shardId); init(); } @@ -176,6 +180,42 @@ public RemoteSegmentMetadata initializeToSpecificCommit(long primaryTerm, long c return remoteSegmentMetadata; } + /** + * Initializes the remote segment metadata to a specific timestamp. + * + * @param timestamp The timestamp to initialize the remote segment metadata to. + * @return The RemoteSegmentMetadata object corresponding to the specified timestamp, or null if no metadata file is found for that timestamp. + * @throws IOException If an I/O error occurs while reading the metadata file. + */ + public RemoteSegmentMetadata initializeToSpecificTimestamp(long timestamp) throws IOException { + List metadataFiles = remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + MetadataFilenameUtils.METADATA_PREFIX, + Integer.MAX_VALUE + ); + Set lockedMetadataFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + Set.of(timestamp), + MetadataFilenameUtils::getTimestamp, + MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + if (lockedMetadataFiles.isEmpty()) { + return null; + } + if (lockedMetadataFiles.size() > 1) { + throw new IOException( + "Expected exactly one metadata file matching timestamp: " + timestamp + " but got " + lockedMetadataFiles + ); + } + String metadataFile = lockedMetadataFiles.iterator().next(); + RemoteSegmentMetadata remoteSegmentMetadata = readMetadataFile(metadataFile); + if (remoteSegmentMetadata != null) { + this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(remoteSegmentMetadata.getMetadata()); + } else { + this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(); + } + return remoteSegmentMetadata; + } + /** * Read the latest metadata file to get the list of segments uploaded to the remote segment store. * We upload a metadata file per refresh, but it is not unique per refresh. Refresh metadata file is unique for a given commit. @@ -324,7 +364,8 @@ public static String getMetadataFilename( long translogGeneration, long uploadCounter, int metadataVersion, - String nodeId + String nodeId, + long creationTimestamp ) { return String.join( SEPARATOR, @@ -334,11 +375,30 @@ public static String getMetadataFilename( RemoteStoreUtils.invertLong(translogGeneration), RemoteStoreUtils.invertLong(uploadCounter), String.valueOf(Objects.hash(nodeId)), - RemoteStoreUtils.invertLong(System.currentTimeMillis()), + RemoteStoreUtils.invertLong(creationTimestamp), String.valueOf(metadataVersion) ); } + public static String getMetadataFilename( + long primaryTerm, + long generation, + long translogGeneration, + long uploadCounter, + int metadataVersion, + String nodeId + ) { + return getMetadataFilename( + primaryTerm, + generation, + translogGeneration, + uploadCounter, + metadataVersion, + nodeId, + System.currentTimeMillis() + ); + } + // Visible for testing static long getPrimaryTerm(String[] filenameTokens) { return RemoteStoreUtils.invertLong(filenameTokens[1]); @@ -778,6 +838,7 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException ); return; } + List sortedMetadataFileList = remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( MetadataFilenameUtils.METADATA_PREFIX, Integer.MAX_VALUE @@ -791,16 +852,44 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException return; } - List metadataFilesEligibleToDelete = new ArrayList<>( - sortedMetadataFileList.subList(lastNMetadataFilesToKeep, sortedMetadataFileList.size()) + // Check last fetch status of pinned timestamps. If stale, return. + if (RemoteStoreUtils.isPinnedTimestampStateStale()) { + logger.warn("Skipping remote segment store garbage collection as last fetch of pinned timestamp is stale"); + return; + } + + Tuple> pinnedTimestampsState = RemoteStorePinnedTimestampService.getPinnedTimestamps(); + + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + sortedMetadataFileList, + pinnedTimestampsState.v2(), + metadataFilePinnedTimestampMap, + MetadataFilenameUtils::getTimestamp, + MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen ); - Set allLockFiles; + final Set allLockFiles = new HashSet<>(implicitLockedFiles); + try { - allLockFiles = ((RemoteStoreMetadataLockManager) mdLockManager).fetchLockedMetadataFiles(MetadataFilenameUtils.METADATA_PREFIX); + allLockFiles.addAll( + ((RemoteStoreMetadataLockManager) mdLockManager).fetchLockedMetadataFiles(MetadataFilenameUtils.METADATA_PREFIX) + ); } catch (Exception e) { logger.error("Exception while fetching segment metadata lock files, skipping deleteStaleSegments", e); return; } + + List metadataFilesEligibleToDelete = new ArrayList<>( + sortedMetadataFileList.subList(lastNMetadataFilesToKeep, sortedMetadataFileList.size()) + ); + + // Along with last N files, we need to keep files since last successful run of scheduler + long lastSuccessfulFetchOfPinnedTimestamps = pinnedTimestampsState.v1(); + metadataFilesEligibleToDelete = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFilesEligibleToDelete, + MetadataFilenameUtils::getTimestamp, + lastSuccessfulFetchOfPinnedTimestamps + ); + List metadataFilesToBeDeleted = metadataFilesEligibleToDelete.stream() .filter(metadataFile -> allLockFiles.contains(metadataFile) == false) .collect(Collectors.toList()); diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java index c37db618c2522..f7b262664d147 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java @@ -219,9 +219,9 @@ private ActionListener getListenerForWriteCallResponse( private PinnedTimestamps readExistingPinnedTimestamps(String blobFilename, RemotePinnedTimestamps remotePinnedTimestamps) { remotePinnedTimestamps.setBlobFileName(blobFilename); - remotePinnedTimestamps.setFullBlobName(pinnedTimestampsBlobStore.getBlobPathForUpload(remotePinnedTimestamps)); + remotePinnedTimestamps.setFullBlobName(pinnedTimestampsBlobStore().getBlobPathForUpload(remotePinnedTimestamps)); try { - return pinnedTimestampsBlobStore.read(remotePinnedTimestamps); + return pinnedTimestampsBlobStore().read(remotePinnedTimestamps); } catch (IOException e) { throw new RuntimeException("Failed to read existing pinned timestamps", e); } @@ -245,6 +245,14 @@ public static Tuple> getPinnedTimestamps() { return pinnedTimestampsSet; } + public RemoteStorePinnedTimestampsBlobStore pinnedTimestampsBlobStore() { + return pinnedTimestampsBlobStore; + } + + public BlobStoreTransferService blobStoreTransferService() { + return blobStoreTransferService; + } + /** * Inner class for asynchronously updating the pinned timestamp set. */ @@ -266,11 +274,12 @@ protected void runInternal() { clusterService.state().metadata().clusterUUID(), blobStoreRepository.getCompressor() ); - BlobPath path = pinnedTimestampsBlobStore.getBlobPathForUpload(remotePinnedTimestamps); - blobStoreTransferService.listAllInSortedOrder(path, remotePinnedTimestamps.getType(), 1, new ActionListener<>() { + BlobPath path = pinnedTimestampsBlobStore().getBlobPathForUpload(remotePinnedTimestamps); + blobStoreTransferService().listAllInSortedOrder(path, remotePinnedTimestamps.getType(), 1, new ActionListener<>() { @Override public void onResponse(List blobMetadata) { if (blobMetadata.isEmpty()) { + pinnedTimestampsSet = new Tuple<>(triggerTimestamp, Set.of()); return; } PinnedTimestamps pinnedTimestamps = readExistingPinnedTimestamps(blobMetadata.get(0).name(), remotePinnedTimestamps); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index ceaee8337ae34..a6db37285fe6f 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -1081,5 +1081,4 @@ public void testIsPinnedTimestampStateStaleFeatureEnabled() { setupRemotePinnedTimestampFeature(true); assertTrue(RemoteStoreUtils.isPinnedTimestampStateStale()); } - } diff --git a/server/src/test/java/org/opensearch/index/store/BaseRemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/BaseRemoteSegmentStoreDirectoryTests.java index ff9b62a341deb..2c55d26261fe0 100644 --- a/server/src/test/java/org/opensearch/index/store/BaseRemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/BaseRemoteSegmentStoreDirectoryTests.java @@ -43,16 +43,9 @@ public class BaseRemoteSegmentStoreDirectoryTests extends IndexShardTestCase { protected SegmentInfos segmentInfos; protected ThreadPool threadPool; - protected final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( - 12, - 23, - 34, - 1, - 1, - "node-1" - ); + protected String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 23, 34, 1, 1, "node-1"); - protected final String metadataFilenameDup = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + protected String metadataFilenameDup = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( 12, 23, 34, @@ -60,30 +53,9 @@ public class BaseRemoteSegmentStoreDirectoryTests extends IndexShardTestCase { 1, "node-2" ); - protected final String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( - 12, - 13, - 34, - 1, - 1, - "node-1" - ); - protected final String metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( - 10, - 38, - 34, - 1, - 1, - "node-1" - ); - protected final String metadataFilename4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( - 10, - 36, - 34, - 1, - 1, - "node-1" - ); + protected String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 13, 34, 1, 1, "node-1"); + protected String metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(10, 38, 34, 1, 1, "node-1"); + protected String metadataFilename4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(10, 36, 34, 1, 1, "node-1"); public void setupRemoteSegmentStoreDirectory() throws IOException { remoteDataDirectory = mock(RemoteDirectory.class); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryWithPinnedTimestampTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryWithPinnedTimestampTests.java new file mode 100644 index 0000000000000..b4f93d706bb1e --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryWithPinnedTimestampTests.java @@ -0,0 +1,292 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.util.Version; +import org.opensearch.common.UUIDs; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.gateway.remote.model.RemotePinnedTimestamps; +import org.opensearch.gateway.remote.model.RemoteStorePinnedTimestampsBlobStore; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.indices.RemoteStoreSettings; +import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; +import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import org.mockito.Mockito; + +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED; +import static org.opensearch.test.RemoteStoreTestUtils.createMetadataFileBytes; +import static org.hamcrest.CoreMatchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class RemoteSegmentStoreDirectoryWithPinnedTimestampTests extends RemoteSegmentStoreDirectoryTests { + + Runnable updatePinnedTimstampTask; + BlobStoreTransferService blobStoreTransferService; + RemoteStorePinnedTimestampsBlobStore remoteStorePinnedTimestampsBlobStore; + RemoteStorePinnedTimestampService remoteStorePinnedTimestampServiceSpy; + + @Before + public void setupPinnedTimestamp() throws IOException { + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true).build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + + Supplier repositoriesServiceSupplier = mock(Supplier.class); + Settings settings = Settings.builder() + .put(Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "remote-repo") + .build(); + RepositoriesService repositoriesService = mock(RepositoriesService.class); + when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService); + BlobStoreRepository blobStoreRepository = mock(BlobStoreRepository.class); + when(repositoriesService.repository("remote-repo")).thenReturn(blobStoreRepository); + + when(threadPool.schedule(any(), any(), any())).then(invocationOnMock -> { + updatePinnedTimstampTask = invocationOnMock.getArgument(0); + updatePinnedTimstampTask.run(); + return null; + }).then(subsequentInvocationsOnMock -> null); + + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = new RemoteStorePinnedTimestampService( + repositoriesServiceSupplier, + settings, + threadPool, + clusterService + ); + remoteStorePinnedTimestampServiceSpy = Mockito.spy(remoteStorePinnedTimestampService); + + remoteStorePinnedTimestampsBlobStore = mock(RemoteStorePinnedTimestampsBlobStore.class); + blobStoreTransferService = mock(BlobStoreTransferService.class); + when(remoteStorePinnedTimestampServiceSpy.pinnedTimestampsBlobStore()).thenReturn(remoteStorePinnedTimestampsBlobStore); + when(remoteStorePinnedTimestampServiceSpy.blobStoreTransferService()).thenReturn(blobStoreTransferService); + + doAnswer(invocationOnMock -> { + ActionListener> actionListener = invocationOnMock.getArgument(3); + actionListener.onResponse(new ArrayList<>()); + return null; + }).when(blobStoreTransferService).listAllInSortedOrder(any(), any(), eq(1), any()); + + remoteStorePinnedTimestampServiceSpy.start(); + + metadataWithOlderTimestamp(); + } + + private void metadataWithOlderTimestamp() { + metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() - 300000 + ); + metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 13, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() - 400000 + ); + metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 10, + 38, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() - 500000 + ); + metadataFilename4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 10, + 36, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() - 600000 + ); + } + + public void testInitializeToSpecificTimestampNoMetadataFiles() throws IOException { + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + Integer.MAX_VALUE + ) + ).thenReturn(new ArrayList<>()); + assertNull(remoteSegmentStoreDirectory.initializeToSpecificTimestamp(1234L)); + } + + public void testInitializeToSpecificTimestampNoMdMatchingTimestamp() throws IOException { + String metadataPrefix = "metadata__1__2__3__4__5__"; + List metadataFiles = new ArrayList<>(); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(2000)); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(3000)); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(4000)); + + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + Integer.MAX_VALUE + ) + ).thenReturn(metadataFiles); + assertNull(remoteSegmentStoreDirectory.initializeToSpecificTimestamp(1234L)); + } + + public void testInitializeToSpecificTimestampMatchingMdFile() throws IOException { + String metadataPrefix = "metadata__1__2__3__4__5__"; + List metadataFiles = new ArrayList<>(); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(1000)); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(2000)); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(3000)); + + Map metadata = new HashMap<>(); + metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234::512::" + Version.LATEST.major); + metadata.put("_0.cfs", "_0.cfs::_0.cfs__" + UUIDs.base64UUID() + "::2345::1024::" + Version.LATEST.major); + + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + Integer.MAX_VALUE + ) + ).thenReturn(metadataFiles); + when(remoteMetadataDirectory.getBlobStream(metadataPrefix + RemoteStoreUtils.invertLong(1000))).thenReturn( + createMetadataFileBytes(metadata, indexShard.getLatestReplicationCheckpoint(), segmentInfos) + ); + + RemoteSegmentMetadata remoteSegmentMetadata = remoteSegmentStoreDirectory.initializeToSpecificTimestamp(1234L); + assertNotNull(remoteSegmentMetadata); + Map uploadedSegments = remoteSegmentStoreDirectory + .getSegmentsUploadedToRemoteStore(); + assertEquals(2, uploadedSegments.size()); + assertTrue(uploadedSegments.containsKey("_0.cfe")); + assertTrue(uploadedSegments.containsKey("_0.cfs")); + } + + public void testDeleteStaleCommitsNoPinnedTimestampMdFilesLatest() throws Exception { + metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() + ); + metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 13, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() + ); + metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 10, + 38, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() + ); + + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + eq(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX), + anyInt() + ) + ).thenReturn(List.of(metadataFilename, metadataFilename2, metadataFilename3)); + + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + // populateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + // But as the oldest metadata file's timestamp is within time threshold since last successful fetch, + // GC will skip deleting any data or metadata files. + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); + + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteDataDirectory, times(0)).deleteFile(any()); + verify(remoteMetadataDirectory, times(0)).deleteFile(any()); + } + + public void testDeleteStaleCommitsPinnedTimestampMdFile() throws Exception { + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + eq(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX), + anyInt() + ) + ).thenReturn(List.of(metadataFilename, metadataFilename2, metadataFilename3)); + + doAnswer(invocationOnMock -> { + ActionListener> actionListener = invocationOnMock.getArgument(3); + actionListener.onResponse(List.of(new PlainBlobMetadata("pinned_timestamp_123", 1000))); + return null; + }).when(blobStoreTransferService).listAllInSortedOrder(any(), any(), eq(1), any()); + + long pinnedTimestampMatchingMetadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getTimestamp(metadataFilename2) + 10; + when(remoteStorePinnedTimestampsBlobStore.read(any())).thenReturn(new RemotePinnedTimestamps.PinnedTimestamps(Map.of(pinnedTimestampMatchingMetadataFilename2, List.of("xyz")))); + when(remoteStorePinnedTimestampsBlobStore.getBlobPathForUpload(any())).thenReturn(new BlobPath()); + + final Map> metadataFilenameContentMapping = populateMetadata(); + final List filesToBeDeleted = metadataFilenameContentMapping.get(metadataFilename3) + .values() + .stream() + .map(metadata -> metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]) + .collect(Collectors.toList()); + + updatePinnedTimstampTask.run(); + + remoteSegmentStoreDirectory.init(); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(1); + + for (final String file : filesToBeDeleted) { + verify(remoteDataDirectory).deleteFile(file); + } + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteMetadataDirectory).deleteFile(metadataFilename3); + verify(remoteMetadataDirectory, times(0)).deleteFile(metadataFilename2); + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index b86cce682c68e..911aa92340de6 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -2792,6 +2792,7 @@ private static Settings buildRemoteStoreNodeAttributes( } settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())); settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), randomBoolean()); + settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), randomBoolean()); return settings.build(); } From 20ebe6e0c03eaa167a082a3e0522fdb0a8d54d0b Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Tue, 27 Aug 2024 14:35:36 -0500 Subject: [PATCH 20/21] Throw UnsupportedOperationException in unused methods (#15446) These methods infinitely recurse as currently implemented. This change makes them throw UnsupportedOperationException similar to many other methods in this class. Signed-off-by: Andrew Ross --- .../org/opensearch/index/engine/TranslogLeafReader.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java index dea389bb6a0ff..94b8c6181de4e 100644 --- a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java +++ b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java @@ -264,13 +264,13 @@ public CacheHelper getReaderCacheHelper() { } @Override - public FloatVectorValues getFloatVectorValues(String field) throws IOException { - return getFloatVectorValues(field); + public FloatVectorValues getFloatVectorValues(String field) { + throw new UnsupportedOperationException(); } @Override - public ByteVectorValues getByteVectorValues(String field) throws IOException { - return getByteVectorValues(field); + public ByteVectorValues getByteVectorValues(String field) { + throw new UnsupportedOperationException(); } @Override From c771bdd34e403856d5ce10719d160b06da602821 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Tue, 27 Aug 2024 15:06:32 -0700 Subject: [PATCH 21/21] Fix DerivedFieldQuery to support concurrent search. (#15326) * Fix DerivedFieldQuery to support concurrent search. This change updates DerivedFieldQuery to create a separate ValueFetcher instance per thread. The DerivedFieldValueFetcher is not thread safe in that it holds a ref to a compiled DerivedFieldScript that is created per thread. Each script also holds a SourceLookup object that is not thread safe. Signed-off-by: Marc Handalian * Fix broken cases relying on ObjectDerivedFieldValueFetcher. DerivedFieldQuery will accept a supplier for a valueFetcher rather than constructing it. This ensures that the DerivedFieldType creating the query (obj or regular) passes the correct supplier func. Signed-off-by: Marc Handalian * remove unused clone method Signed-off-by: Marc Handalian * Add changelog entry Signed-off-by: Marc Handalian * add an extra test for DerivedFieldType multiPhraseQuery Signed-off-by: Marc Handalian * more coverage Signed-off-by: Marc Handalian * add tests for normalizedWildcard and phrase prefix Signed-off-by: Marc Handalian --------- Signed-off-by: Marc Handalian --- CHANGELOG.md | 1 + .../opensearch/painless/SimplePainlessIT.java | 8 ---- .../index/mapper/DerivedFieldType.java | 45 +++++++------------ .../index/query/DerivedFieldQuery.java | 25 ++++++++--- .../mapper/DerivedFieldMapperQueryTests.java | 22 ++++++++- .../index/mapper/DerivedFieldTypeTests.java | 18 ++++++++ .../index/query/DerivedFieldQueryTests.java | 6 +-- 7 files changed, 77 insertions(+), 48 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e8117ea05f80c..3dff44ed96dfd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add allowlist setting for ingest-geoip and ingest-useragent ([#15325](https://github.com/opensearch-project/OpenSearch/pull/15325)) - Adding access to noSubMatches and noOverlappingMatches in Hyphenation ([#13895](https://github.com/opensearch-project/OpenSearch/pull/13895)) - Add support for index level max slice count setting for concurrent segment search ([#15336](https://github.com/opensearch-project/OpenSearch/pull/15336)) +- Add concurrent search support for Derived Fields ([#15326](https://github.com/opensearch-project/OpenSearch/pull/15326)) ### Dependencies - Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) diff --git a/modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java b/modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java index df327bf4871c6..c9078fdeeea28 100644 --- a/modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java +++ b/modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java @@ -188,10 +188,6 @@ public void testTermsValuesSource() throws Exception { } public void testSimpleDerivedFieldsQuery() { - assumeFalse( - "Derived fields do not support concurrent search https://github.com/opensearch-project/OpenSearch/issues/15007", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); SearchRequest searchRequest = new SearchRequest("test-df").source( SearchSourceBuilder.searchSource() .derivedField("result", "keyword", new Script("emit(params._source[\"field\"])")) @@ -204,10 +200,6 @@ public void testSimpleDerivedFieldsQuery() { } public void testSimpleDerivedFieldsAgg() { - assumeFalse( - "Derived fields do not support concurrent search https://github.com/opensearch-project/OpenSearch/issues/15007", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); SearchRequest searchRequest = new SearchRequest("test-df").source( SearchSourceBuilder.searchSource() .derivedField("result", "keyword", new Script("emit(params._source[\"field\"])")) diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java index e230e37e6d826..fe81f19d74b21 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java @@ -159,10 +159,9 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S @Override public Query termQuery(Object value, QueryShardContext context) { Query query = typeFieldMapper.mappedFieldType.termQuery(value, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -176,10 +175,9 @@ public Query termQuery(Object value, QueryShardContext context) { @Override public Query termQueryCaseInsensitive(Object value, @Nullable QueryShardContext context) { Query query = typeFieldMapper.mappedFieldType.termQueryCaseInsensitive(value, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -195,10 +193,9 @@ public Query termQueryCaseInsensitive(Object value, @Nullable QueryShardContext @Override public Query termsQuery(List values, @Nullable QueryShardContext context) { Query query = typeFieldMapper.mappedFieldType.termsQuery(values, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -230,10 +227,9 @@ public Query rangeQuery( parser, context ); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); return new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -251,10 +247,9 @@ public Query fuzzyQuery( QueryShardContext context ) { Query query = typeFieldMapper.mappedFieldType.fuzzyQuery(value, fuzziness, prefixLength, maxExpansions, transpositions, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -289,10 +284,9 @@ public Query fuzzyQuery( method, context ); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -316,10 +310,9 @@ public Query prefixQuery( QueryShardContext context ) { Query query = typeFieldMapper.mappedFieldType.prefixQuery(value, method, caseInsensitive, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -343,10 +336,9 @@ public Query wildcardQuery( QueryShardContext context ) { Query query = typeFieldMapper.mappedFieldType.wildcardQuery(value, method, caseInsensitive, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -365,10 +357,9 @@ public Query wildcardQuery( @Override public Query normalizedWildcardQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) { Query query = typeFieldMapper.mappedFieldType.normalizedWildcardQuery(value, method, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -394,10 +385,9 @@ public Query regexpQuery( QueryShardContext context ) { Query query = typeFieldMapper.mappedFieldType.regexpQuery(value, syntaxFlags, matchFlags, maxDeterminizedStates, method, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -416,10 +406,9 @@ public Query regexpQuery( @Override public Query phraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) throws IOException { Query query = typeFieldMapper.mappedFieldType.phraseQuery(stream, slop, enablePositionIncrements, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -441,10 +430,9 @@ public Query phraseQuery(TokenStream stream, int slop, boolean enablePositionInc public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) throws IOException { Query query = typeFieldMapper.mappedFieldType.multiPhraseQuery(stream, slop, enablePositionIncrements, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -465,10 +453,9 @@ public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositi @Override public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions, QueryShardContext context) throws IOException { Query query = typeFieldMapper.mappedFieldType.phrasePrefixQuery(stream, slop, maxExpansions, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -493,10 +480,9 @@ public SpanQuery spanPrefixQuery(String value, SpanMultiTermQueryWrapper.SpanRew @Override public Query distanceFeatureQuery(Object origin, String pivot, float boost, QueryShardContext context) { Query query = typeFieldMapper.mappedFieldType.distanceFeatureQuery(origin, pivot, boost, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); return new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, @@ -507,10 +493,9 @@ public Query distanceFeatureQuery(Object origin, String pivot, float boost, Quer @Override public Query geoShapeQuery(Geometry shape, String fieldName, ShapeRelation relation, QueryShardContext context) { Query query = ((GeoShapeQueryable) (typeFieldMapper.mappedFieldType)).geoShapeQuery(shape, fieldName, relation, context); - DerivedFieldValueFetcher valueFetcher = valueFetcher(context, context.lookup(), null); return new DerivedFieldQuery( query, - valueFetcher, + () -> valueFetcher(context, context.lookup(), null), context.lookup(), getIndexAnalyzer(), indexableFieldGenerator, diff --git a/server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java b/server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java index db943bdef0a12..dcc02726cb0ef 100644 --- a/server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java +++ b/server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java @@ -30,6 +30,7 @@ import java.util.List; import java.util.Objects; import java.util.function.Function; +import java.util.function.Supplier; /** * DerivedFieldQuery used for querying derived fields. It contains the logic to execute an input lucene query against @@ -37,7 +38,7 @@ */ public final class DerivedFieldQuery extends Query { private final Query query; - private final DerivedFieldValueFetcher valueFetcher; + private final Supplier valueFetcherSupplier; private final SearchLookup searchLookup; private final Analyzer indexAnalyzer; private final boolean ignoreMalformed; @@ -46,20 +47,19 @@ public final class DerivedFieldQuery extends Query { /** * @param query lucene query to be executed against the derived field - * @param valueFetcher DerivedFieldValueFetcher ValueFetcher to fetch the value of a derived field from _source - * using LeafSearchLookup + * @param valueFetcherSupplier Supplier of a DerivedFieldValueFetcher that will be reconstructed per leaf * @param searchLookup SearchLookup to get the LeafSearchLookup look used by valueFetcher to fetch the _source */ public DerivedFieldQuery( Query query, - DerivedFieldValueFetcher valueFetcher, + Supplier valueFetcherSupplier, SearchLookup searchLookup, Analyzer indexAnalyzer, Function indexableFieldGenerator, boolean ignoreMalformed ) { this.query = query; - this.valueFetcher = valueFetcher; + this.valueFetcherSupplier = valueFetcherSupplier; this.searchLookup = searchLookup; this.indexAnalyzer = indexAnalyzer; this.indexableFieldGenerator = indexableFieldGenerator; @@ -77,7 +77,15 @@ public Query rewrite(IndexSearcher indexSearcher) throws IOException { if (rewritten == query) { return this; } - return new DerivedFieldQuery(rewritten, valueFetcher, searchLookup, indexAnalyzer, indexableFieldGenerator, ignoreMalformed); + ; + return new DerivedFieldQuery( + rewritten, + valueFetcherSupplier, + searchLookup, + indexAnalyzer, + indexableFieldGenerator, + ignoreMalformed + ); } @Override @@ -88,6 +96,11 @@ public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float bo public Scorer scorer(LeafReaderContext context) { DocIdSetIterator approximation; approximation = DocIdSetIterator.all(context.reader().maxDoc()); + + // Create a new ValueFetcher per thread. + // ValueFetcher.setNextReader creates a DerivedFieldScript and internally SourceLookup and these objects are not + // thread safe. + final DerivedFieldValueFetcher valueFetcher = valueFetcherSupplier.get(); valueFetcher.setNextReader(context); LeafSearchLookup leafSearchLookup = searchLookup.getLeafSearchLookup(context); TwoPhaseIterator twoPhase = new TwoPhaseIterator(approximation) { diff --git a/server/src/test/java/org/opensearch/index/mapper/DerivedFieldMapperQueryTests.java b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldMapperQueryTests.java index b9bdfca3509e3..c744f2592e24f 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DerivedFieldMapperQueryTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldMapperQueryTests.java @@ -15,6 +15,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.queryparser.classic.ParseException; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.TopDocs; @@ -24,9 +25,12 @@ import org.opensearch.common.lucene.Lucene; import org.opensearch.core.index.Index; import org.opensearch.geometry.Rectangle; +import org.opensearch.index.query.MatchPhrasePrefixQueryBuilder; +import org.opensearch.index.query.MultiMatchQueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.search.QueryStringQueryParser; import org.opensearch.script.DerivedFieldScript; import java.io.IOException; @@ -435,7 +439,7 @@ public void execute() { } } - public void testObjectDerivedFields() throws IOException { + public void testObjectDerivedFields() throws IOException, ParseException { MapperService mapperService = createMapperService(topMapping(b -> { b.startObject("properties"); { @@ -545,6 +549,17 @@ public void execute() { topDocs = searcher.search(query, 10); assertEquals(0, topDocs.totalHits.value); + query = new MatchPhrasePrefixQueryBuilder("object_field.text_field", "document number").toQuery(queryShardContext); + topDocs = searcher.search(query, 10); + assertEquals(0, topDocs.totalHits.value); + + // Multi Phrase Query + query = QueryBuilders.multiMatchQuery("GET", "object_field.nested_field.sub_field_1", "object_field.keyword_field") + .type(MultiMatchQueryBuilder.Type.PHRASE) + .toQuery(queryShardContext); + topDocs = searcher.search(query, 10); + assertEquals(7, topDocs.totalHits.value); + // Range queries of types - date, long and double query = QueryBuilders.rangeQuery("object_field.date_field").from("2024-03-20T14:20:50").toQuery(queryShardContext); topDocs = searcher.search(query, 10); @@ -567,6 +582,11 @@ public void execute() { topDocs = searcher.search(query, 10); assertEquals(7, topDocs.totalHits.value); + QueryStringQueryParser queryParser = new QueryStringQueryParser(queryShardContext, "object_field.keyword_field"); + queryParser.parse("GE?"); + topDocs = searcher.search(query, 10); + assertEquals(7, topDocs.totalHits.value); + // Regexp Query query = QueryBuilders.regexpQuery("object_field.keyword_field", ".*let.*").toQuery(queryShardContext); topDocs = searcher.search(query, 10); diff --git a/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java index fe9db24f494ad..7da8c9eb1efa0 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java @@ -17,6 +17,7 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.memory.MemoryIndex; +import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchException; import org.opensearch.common.collect.Tuple; @@ -59,6 +60,7 @@ public void testBooleanType() { assertTrue(dft.getFieldMapper() instanceof BooleanFieldMapper); assertTrue(dft.getIndexableFieldGenerator().apply(true) instanceof Field); assertTrue(dft.getIndexableFieldGenerator().apply(false) instanceof Field); + assertEquals("derived", dft.typeName()); } public void testDateType() { @@ -159,6 +161,22 @@ public void testGetAggregationScript_ip() throws IOException { assertEquals(new BytesRef(InetAddressPoint.encode(InetAddresses.forString((String) expected.get(0)))), result.get(0)); } + public void testDerivedFieldValueFetcherDoesNotSupportCustomFormats() { + DerivedFieldType dft = createDerivedFieldType("boolean"); + expectThrows( + IllegalArgumentException.class, + () -> dft.valueFetcher(mock(QueryShardContext.class), mock(SearchLookup.class), "yyyy-MM-dd") + ); + } + + public void testSpanPrefixQueryNotSupported() { + DerivedFieldType dft = createDerivedFieldType("boolean"); + expectThrows( + IllegalArgumentException.class, + () -> dft.spanPrefixQuery("value", mock(SpanMultiTermQueryWrapper.SpanRewriteMethod.class), mock(QueryShardContext.class)) + ); + } + private static LeafSearchLookup mockValueFetcherForAggs(QueryShardContext mockContext, DerivedFieldType dft, List expected) { SearchLookup searchLookup = mock(SearchLookup.class); LeafSearchLookup leafLookup = mock(LeafSearchLookup.class); diff --git a/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java b/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java index ecad1291bed19..bed2d22125810 100644 --- a/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java +++ b/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java @@ -88,7 +88,7 @@ public void execute() { // Create DerivedFieldQuery DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( new TermQuery(new Term("ip_from_raw_request", "247.37.0.0")), - valueFetcher, + () -> valueFetcher, searchLookup, Lucene.STANDARD_ANALYZER, indexableFieldFunction, @@ -157,7 +157,7 @@ public void execute() { // Create DerivedFieldQuery DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( new TermQuery(new Term("ip_from_raw_request", "247.37.0.0")), - valueFetcher, + () -> valueFetcher, searchLookup, Lucene.STANDARD_ANALYZER, badIndexableFieldFunction, @@ -169,7 +169,7 @@ public void execute() { // set ignore_malformed as true, query should pass derivedFieldQuery = new DerivedFieldQuery( new TermQuery(new Term("ip_from_raw_request", "247.37.0.0")), - valueFetcher, + () -> valueFetcher, searchLookup, Lucene.STANDARD_ANALYZER, badIndexableFieldFunction,